diff options
771 files changed, 12941 insertions, 4521 deletions
@@ -666,11 +666,6 @@ S: Tamsui town, Taipei county, S: Taiwan 251 S: Republic of China -N: Reinette Chatre -D: WiMedia Link Protocol implementation -D: UWB stack bits and pieces - N: Michael Elizabeth Chastain D: Configure, Menuconfig, xconfig @@ -3023,12 +3018,6 @@ S: Demonstratsii 8-382 S: Tula 300000 S: Russia -N: Inaky Perez-Gonzalez -D: UWB stack, HWA-RC driver and HWA-HC drivers -D: Wireless USB additions to the USB stack -D: WiMedia Link Protocol bits and pieces - N: Gordon Peters D: Isochronous receive for IEEE 1394 driver (OHCI module). diff --git a/Documentation/ABI/testing/configfs-usb-gadget-midi2 b/Documentation/ABI/testing/configfs-usb-gadget-midi2 new file mode 100644 index 000000000000..0eac3aaba137 --- /dev/null +++ b/Documentation/ABI/testing/configfs-usb-gadget-midi2 @@ -0,0 +1,54 @@ +What: /config/usb-gadget/gadget/functions/midi2.name +Date: Jul 2023 +KernelVersion: 6.6 +Description: + The attributes: + + ============ =============================================== + process_ump Flag to process UMP Stream messages (0 or 1) + static_block Flag for static blocks (0 or 1) + iface_name MIDI interface name string + ============ =============================================== + +What: /config/usb-gadget/gadget/functions/midi2.name/ep.number +Date: Jul 2023 +KernelVersion: 6.6 +Description: + This group contains a UMP Endpoint configuration. + A new Endpoint starts from 0, and can be up to 3. + + The attributes: + + ============= =============================================== + protocol_caps MIDI protocol capabilities (1, 2 or 3 for both) + protocol Default MIDI protocol (1 or 2) + ep_name UMP Endpoint name string + product_id Product ID string + manufacturer Manufacture ID (24 bit) + family Device family ID (16 bit) + model Device model ID (16 bit) + sw_revision Software Revision (32 bit) + ============= =============================================== + +What: /config/usb-gadget/gadget/functions/midi2.name/ep.number/block.number +Date: Jul 2023 +KernelVersion: 6.6 +Description: + This group contains a UMP Function Block configuration. + A new block starts from 0, and can be up to 31. + + The attributes: + + ================= ============================================== + name Function Block name string + direction 1: input, 2: output, 3: bidirectional + first_group The first UMP Group number (0-15) + num_groups The number of groups in this FB (1-16) + midi1_first_group The first UMP Group number for MIDI 1.0 (0-15) + midi1_num_groups The number of groups for MIDI 1.0 (0-16) + ui_hint 0: unknown, 1: receiver, 2: sender, 3: both + midi_ci_verison Supported MIDI-CI version number (8 bit) + is_midi1 Legacy MIDI 1.0 device (0, 1 or 2) + sysex8_streams Max number of SysEx8 streams (8 bit) + active Active FB flag (0 or 1) + ================= ============================================== diff --git a/Documentation/ABI/testing/sysfs-bus-cxl b/Documentation/ABI/testing/sysfs-bus-cxl index 6350dd82b9a9..087f762ebfd5 100644 --- a/Documentation/ABI/testing/sysfs-bus-cxl +++ b/Documentation/ABI/testing/sysfs-bus-cxl @@ -82,7 +82,12 @@ Description: whether it resides in persistent capacity, volatile capacity, or the LSA, is made permanently unavailable by whatever means is appropriate for the media type. This functionality requires - the device to be not be actively decoding any HPA ranges. + the device to be disabled, that is, not actively decoding any + HPA ranges. This permits avoiding explicit global CPU cache + management, relying instead for it to be done when a region + transitions between software programmed and hardware committed + states. If this file is not present, then there is no hardware + support for the operation. What /sys/bus/cxl/devices/memX/security/erase @@ -92,7 +97,13 @@ Contact: [email protected] Description: (WO) Write a boolean 'true' string value to this attribute to secure erase user data by changing the media encryption keys for - all user data areas of the device. + all user data areas of the device. This functionality requires + the device to be disabled, that is, not actively decoding any + HPA ranges. This permits avoiding explicit global CPU cache + management, relying instead for it to be done when a region + transitions between software programmed and hardware committed + states. If this file is not present, then there is no hardware + support for the operation. What: /sys/bus/cxl/devices/memX/firmware/ diff --git a/Documentation/ABI/testing/sysfs-bus-umc b/Documentation/ABI/testing/sysfs-bus-umc deleted file mode 100644 index 948fec412446..000000000000 --- a/Documentation/ABI/testing/sysfs-bus-umc +++ /dev/null @@ -1,28 +0,0 @@ -What: /sys/bus/umc/ -Date: July 2008 -KernelVersion: 2.6.27 -Contact: David Vrabel <[email protected]> -Description: - The Wireless Host Controller Interface (WHCI) - specification describes a PCI-based device with - multiple capabilities; the UWB Multi-interface - Controller (UMC). - - The umc bus presents each of the individual - capabilties as a device. - -What: /sys/bus/umc/devices/.../capability_id -Date: July 2008 -KernelVersion: 2.6.27 -Contact: David Vrabel <[email protected]> -Description: - The ID of this capability, with 0 being the radio - controller capability. - -What: /sys/bus/umc/devices/.../version -Date: July 2008 -KernelVersion: 2.6.27 -Contact: David Vrabel <[email protected]> -Description: - The specification version this capability's hardware - interface complies with. diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb index be663258b9b7..a44bfe020061 100644 --- a/Documentation/ABI/testing/sysfs-bus-usb +++ b/Documentation/ABI/testing/sysfs-bus-usb @@ -28,40 +28,6 @@ Description: drivers, non-authorized one are not. By default, wired USB devices are authorized. - Certified Wireless USB devices are not authorized - initially and should be (by writing 1) after the - device has been authenticated. - -What: /sys/bus/usb/device/.../wusb_cdid -Date: July 2008 -KernelVersion: 2.6.27 -Contact: David Vrabel <[email protected]> -Description: - For Certified Wireless USB devices only. - - A devices's CDID, as 16 space-separated hex octets. - -What: /sys/bus/usb/device/.../wusb_ck -Date: July 2008 -KernelVersion: 2.6.27 -Contact: David Vrabel <[email protected]> -Description: - For Certified Wireless USB devices only. - - Write the device's connection key (CK) to start the - authentication of the device. The CK is 16 - space-separated hex octets. - -What: /sys/bus/usb/device/.../wusb_disconnect -Date: July 2008 -KernelVersion: 2.6.27 -Contact: David Vrabel <[email protected]> -Description: - For Certified Wireless USB devices only. - - Write a 1 to force the device to disconnect - (equivalent to unplugging a wired USB device). - What: /sys/bus/usb/drivers/.../new_id Date: October 2011 Contact: [email protected] diff --git a/Documentation/ABI/testing/sysfs-class-uwb_rc b/Documentation/ABI/testing/sysfs-class-uwb_rc deleted file mode 100644 index a7ea169dc4eb..000000000000 --- a/Documentation/ABI/testing/sysfs-class-uwb_rc +++ /dev/null @@ -1,156 +0,0 @@ -What: /sys/class/uwb_rc -Date: July 2008 -KernelVersion: 2.6.27 -Contact: [email protected] -Description: - Interfaces for WiMedia Ultra Wideband Common Radio - Platform (UWB) radio controllers. - - Familiarity with the ECMA-368 'High Rate Ultra - Wideband MAC and PHY Specification' is assumed. - -What: /sys/class/uwb_rc/beacon_timeout_ms -Date: July 2008 -KernelVersion: 2.6.27 -Description: - If no beacons are received from a device for at least - this time, the device will be considered to have gone - and it will be removed. The default is 3 superframes - (~197 ms) as required by the specification. - -What: /sys/class/uwb_rc/uwb<N>/ -Date: July 2008 -KernelVersion: 2.6.27 -Contact: [email protected] -Description: - An individual UWB radio controller. - -What: /sys/class/uwb_rc/uwb<N>/beacon -Date: July 2008 -KernelVersion: 2.6.27 -Contact: [email protected] -Description: - Write: - - <channel> - - to force a specific channel to be used when beaconing, - or, if <channel> is -1, to prohibit beaconing. If - <channel> is 0, then the default channel selection - algorithm will be used. Valid channels depends on the - radio controller's supported band groups. - - Reading returns the currently active channel, or -1 if - the radio controller is not beaconing. - -What: /sys/class/uwb_rc/uwb<N>/ASIE -Date: August 2014 -KernelVersion: 3.18 -Contact: [email protected] -Description: - - The application-specific information element (ASIE) - included in this device's beacon, in space separated - hex octets. - - Reading returns the current ASIE. Writing replaces - the current ASIE with the one written. - -What: /sys/class/uwb_rc/uwb<N>/scan -Date: July 2008 -KernelVersion: 2.6.27 -Contact: [email protected] -Description: - Write: - - <channel> <type> [<bpst offset>] - - to start (or stop) scanning on a channel. <type> is one of: - - == ======================================= - 0 scan - 1 scan outside BP - 2 scan while inactive - 3 scanning disabled - 4 scan (with start time of <bpst offset>) - == ======================================= - -What: /sys/class/uwb_rc/uwb<N>/mac_address -Date: July 2008 -KernelVersion: 2.6.27 -Contact: [email protected] -Description: - The EUI-48, in colon-separated hex octets, for this - radio controller. A write will change the radio - controller's EUI-48 but only do so while the device is - not beaconing or scanning. - -What: /sys/class/uwb_rc/uwb<N>/wusbhc -Date: July 2008 -KernelVersion: 2.6.27 -Contact: [email protected] -Description: - A symlink to the device (if any) of the WUSB Host - Controller PAL using this radio controller. - -What: /sys/class/uwb_rc/uwb<N>/<EUI-48>/ -Date: July 2008 -KernelVersion: 2.6.27 -Contact: [email protected] -Description: - A neighbour UWB device that has either been detected - as part of a scan or is a member of the radio - controllers beacon group. - -What: /sys/class/uwb_rc/uwb<N>/<EUI-48>/BPST -Date: July 2008 -KernelVersion: 2.6.27 -Contact: [email protected] -Description: - The time (using the radio controllers internal 1 ms - interval superframe timer) of the last beacon from - this device was received. - -What: /sys/class/uwb_rc/uwb<N>/<EUI-48>/DevAddr -Date: July 2008 -KernelVersion: 2.6.27 -Contact: [email protected] -Description: - The current DevAddr of this device in colon separated - hex octets. - -What: /sys/class/uwb_rc/uwb<N>/<EUI-48>/EUI_48 -Date: July 2008 -KernelVersion: 2.6.27 -Contact: [email protected] -Description: - - The EUI-48 of this device in colon separated hex - octets. - -What: /sys/class/uwb_rc/uwb<N>/<EUI-48>/IEs -Date: July 2008 -KernelVersion: 2.6.27 -Contact: [email protected] -Description: - The latest IEs included in this device's beacon, in - space separated hex octets with one IE per line. - -What: /sys/class/uwb_rc/uwb<N>/<EUI-48>/LQE -Date: July 2008 -KernelVersion: 2.6.27 -Contact: [email protected] -Description: - Link Quality Estimate - the Signal to Noise Ratio - (SNR) of all packets received from this device in dB. - This gives an estimate on a suitable PHY rate. Refer - to [ECMA-368] section 13.3 for more details. - -What: /sys/class/uwb_rc/uwb<N>/<EUI-48>/RSSI -Date: July 2008 -KernelVersion: 2.6.27 -Contact: [email protected] -Description: - Received Signal Strength Indication - the strength of - the received signal in dB. LQE is a more useful - measure of the radio link quality. diff --git a/Documentation/ABI/testing/sysfs-class-uwb_rc-wusbhc b/Documentation/ABI/testing/sysfs-class-uwb_rc-wusbhc deleted file mode 100644 index 55eb55cac92e..000000000000 --- a/Documentation/ABI/testing/sysfs-class-uwb_rc-wusbhc +++ /dev/null @@ -1,57 +0,0 @@ -What: /sys/class/uwb_rc/uwb<N>/wusbhc/wusb_chid -Date: July 2008 -KernelVersion: 2.6.27 -Contact: David Vrabel <[email protected]> -Description: - Write the CHID (16 space-separated hex octets) for this host controller. - This starts the host controller, allowing it to accept connection from - WUSB devices. - - Set an all zero CHID to stop the host controller. - -What: /sys/class/uwb_rc/uwb<N>/wusbhc/wusb_trust_timeout -Date: July 2008 -KernelVersion: 2.6.27 -Contact: David Vrabel <[email protected]> -Description: - Devices that haven't sent a WUSB packet to the host - within 'wusb_trust_timeout' ms are considered to have - disconnected and are removed. The default value of - 4000 ms is the value required by the WUSB - specification. - - Since this relates to security (specifically, the - lifetime of PTKs and GTKs) it should not be changed - from the default. - -What: /sys/class/uwb_rc/uwb<N>/wusbhc/wusb_phy_rate -Date: August 2009 -KernelVersion: 2.6.32 -Contact: David Vrabel <[email protected]> -Description: - The maximum PHY rate to use for all connected devices. - This is only of limited use for testing and - development as the hardware's automatic rate - adaptation is better then this simple control. - - Refer to [ECMA-368] section 10.3.1.1 for the value to - use. - -What: /sys/class/uwb_rc/uwb<N>/wusbhc/wusb_dnts -Date: June 2013 -KernelVersion: 3.11 -Contact: Thomas Pugliese <[email protected]> -Description: - The device notification time slot (DNTS) count and inverval in - milliseconds that the WUSB host should use. This controls how - often the devices will have the opportunity to send - notifications to the host. - -What: /sys/class/uwb_rc/uwb<N>/wusbhc/wusb_retry_count -Date: June 2013 -KernelVersion: 3.11 -Contact: Thomas Pugliese <[email protected]> -Description: - The number of retries that the WUSB host should attempt - before reporting an error for a bus transaction. The range of - valid values is [0..15], where 0 indicates infinite retries. diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index ecd585ca2d50..77942eedf4f6 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -513,17 +513,18 @@ Description: information about CPUs heterogeneity. cpu_capacity: capacity of cpuX. What: /sys/devices/system/cpu/vulnerabilities + /sys/devices/system/cpu/vulnerabilities/gather_data_sampling + /sys/devices/system/cpu/vulnerabilities/itlb_multihit + /sys/devices/system/cpu/vulnerabilities/l1tf + /sys/devices/system/cpu/vulnerabilities/mds /sys/devices/system/cpu/vulnerabilities/meltdown + /sys/devices/system/cpu/vulnerabilities/mmio_stale_data + /sys/devices/system/cpu/vulnerabilities/retbleed + /sys/devices/system/cpu/vulnerabilities/spec_store_bypass /sys/devices/system/cpu/vulnerabilities/spectre_v1 /sys/devices/system/cpu/vulnerabilities/spectre_v2 - /sys/devices/system/cpu/vulnerabilities/spec_store_bypass - /sys/devices/system/cpu/vulnerabilities/l1tf - /sys/devices/system/cpu/vulnerabilities/mds /sys/devices/system/cpu/vulnerabilities/srbds /sys/devices/system/cpu/vulnerabilities/tsx_async_abort - /sys/devices/system/cpu/vulnerabilities/itlb_multihit - /sys/devices/system/cpu/vulnerabilities/mmio_stale_data - /sys/devices/system/cpu/vulnerabilities/retbleed Date: January 2018 Contact: Linux kernel mailing list <[email protected]> Description: Information about CPU vulnerabilities diff --git a/Documentation/ABI/testing/sysfs-platform-hidma b/Documentation/ABI/testing/sysfs-platform-hidma index fca40a54df59..a80aeda85ef6 100644 --- a/Documentation/ABI/testing/sysfs-platform-hidma +++ b/Documentation/ABI/testing/sysfs-platform-hidma @@ -2,7 +2,7 @@ What: /sys/devices/platform/hidma-*/chid /sys/devices/platform/QCOM8061:*/chid Date: Dec 2015 KernelVersion: 4.4 -Contact: "Sinan Kaya <[email protected]>" +Contact: "Sinan Kaya <[email protected]>" Description: Contains the ID of the channel within the HIDMA instance. It is used to associate a given HIDMA channel with the diff --git a/Documentation/ABI/testing/sysfs-platform-hidma-mgmt b/Documentation/ABI/testing/sysfs-platform-hidma-mgmt index 3b6c5c9eabdc..0373745b4e18 100644 --- a/Documentation/ABI/testing/sysfs-platform-hidma-mgmt +++ b/Documentation/ABI/testing/sysfs-platform-hidma-mgmt @@ -2,7 +2,7 @@ What: /sys/devices/platform/hidma-mgmt*/chanops/chan*/priority /sys/devices/platform/QCOM8060:*/chanops/chan*/priority Date: Nov 2015 KernelVersion: 4.4 -Contact: "Sinan Kaya <[email protected]>" +Contact: "Sinan Kaya <[email protected]>" Description: Contains either 0 or 1 and indicates if the DMA channel is a low priority (0) or high priority (1) channel. @@ -11,7 +11,7 @@ What: /sys/devices/platform/hidma-mgmt*/chanops/chan*/weight /sys/devices/platform/QCOM8060:*/chanops/chan*/weight Date: Nov 2015 KernelVersion: 4.4 -Contact: "Sinan Kaya <[email protected]>" +Contact: "Sinan Kaya <[email protected]>" Description: Contains 0..15 and indicates the weight of the channel among equal priority channels during round robin scheduling. @@ -20,7 +20,7 @@ What: /sys/devices/platform/hidma-mgmt*/chreset_timeout_cycles /sys/devices/platform/QCOM8060:*/chreset_timeout_cycles Date: Nov 2015 KernelVersion: 4.4 -Contact: "Sinan Kaya <[email protected]>" +Contact: "Sinan Kaya <[email protected]>" Description: Contains the platform specific cycle value to wait after a reset command is issued. If the value is chosen too short, @@ -32,7 +32,7 @@ What: /sys/devices/platform/hidma-mgmt*/dma_channels /sys/devices/platform/QCOM8060:*/dma_channels Date: Nov 2015 KernelVersion: 4.4 -Contact: "Sinan Kaya <[email protected]>" +Contact: "Sinan Kaya <[email protected]>" Description: Contains the number of dma channels supported by one instance of HIDMA hardware. The value may change from chip to chip. @@ -41,7 +41,7 @@ What: /sys/devices/platform/hidma-mgmt*/hw_version_major /sys/devices/platform/QCOM8060:*/hw_version_major Date: Nov 2015 KernelVersion: 4.4 -Contact: "Sinan Kaya <[email protected]>" +Contact: "Sinan Kaya <[email protected]>" Description: Version number major for the hardware. @@ -49,7 +49,7 @@ What: /sys/devices/platform/hidma-mgmt*/hw_version_minor /sys/devices/platform/QCOM8060:*/hw_version_minor Date: Nov 2015 KernelVersion: 4.4 -Contact: "Sinan Kaya <[email protected]>" +Contact: "Sinan Kaya <[email protected]>" Description: Version number minor for the hardware. @@ -57,7 +57,7 @@ What: /sys/devices/platform/hidma-mgmt*/max_rd_xactions /sys/devices/platform/QCOM8060:*/max_rd_xactions Date: Nov 2015 KernelVersion: 4.4 -Contact: "Sinan Kaya <[email protected]>" +Contact: "Sinan Kaya <[email protected]>" Description: Contains a value between 0 and 31. Maximum number of read transactions that can be issued back to back. @@ -69,7 +69,7 @@ What: /sys/devices/platform/hidma-mgmt*/max_read_request /sys/devices/platform/QCOM8060:*/max_read_request Date: Nov 2015 KernelVersion: 4.4 -Contact: "Sinan Kaya <[email protected]>" +Contact: "Sinan Kaya <[email protected]>" Description: Size of each read request. The value needs to be a power of two and can be between 128 and 1024. @@ -78,7 +78,7 @@ What: /sys/devices/platform/hidma-mgmt*/max_wr_xactions /sys/devices/platform/QCOM8060:*/max_wr_xactions Date: Nov 2015 KernelVersion: 4.4 -Contact: "Sinan Kaya <[email protected]>" +Contact: "Sinan Kaya <[email protected]>" Description: Contains a value between 0 and 31. Maximum number of write transactions that can be issued back to back. @@ -91,7 +91,7 @@ What: /sys/devices/platform/hidma-mgmt*/max_write_request /sys/devices/platform/QCOM8060:*/max_write_request Date: Nov 2015 KernelVersion: 4.4 -Contact: "Sinan Kaya <[email protected]>" +Contact: "Sinan Kaya <[email protected]>" Description: Size of each write request. The value needs to be a power of two and can be between 128 and 1024. diff --git a/Documentation/ABI/testing/sysfs-wusb_cbaf b/Documentation/ABI/testing/sysfs-wusb_cbaf deleted file mode 100644 index 2969d3694ec0..000000000000 --- a/Documentation/ABI/testing/sysfs-wusb_cbaf +++ /dev/null @@ -1,101 +0,0 @@ -What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_* -Date: August 2008 -KernelVersion: 2.6.27 -Contact: David Vrabel <[email protected]> -Description: - Various files for managing Cable Based Association of - (wireless) USB devices. - - The sequence of operations should be: - - 1. Device is plugged in. - - 2. The connection manager (CM) sees a device with CBA capability. - (the wusb_chid etc. files in /sys/devices/blah/OURDEVICE). - - 3. The CM writes the host name, supported band groups, - and the CHID (host ID) into the wusb_host_name, - wusb_host_band_groups and wusb_chid files. These - get sent to the device and the CDID (if any) for - this host is requested. - - 4. The CM can verify that the device's supported band - groups (wusb_device_band_groups) are compatible - with the host. - - 5. The CM reads the wusb_cdid file. - - 6. The CM looks it up its database. - - - If it has a matching CHID,CDID entry, the device - has been authorized before and nothing further - needs to be done. - - - If the CDID is zero (or the CM doesn't find a - matching CDID in its database), the device is - assumed to be not known. The CM may associate - the host with device by: writing a randomly - generated CDID to wusb_cdid and then a random CK - to wusb_ck (this uploads the new CC to the - device). - - CMD may choose to prompt the user before - associating with a new device. - - 7. Device is unplugged. - - References: - [WUSB-AM] - Association Models Supplement to the - Certified Wireless Universal Serial Bus - Specification, version 1.0. - -What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_chid -Date: August 2008 -KernelVersion: 2.6.27 -Contact: David Vrabel <[email protected]> -Description: - The CHID of the host formatted as 16 space-separated - hex octets. - - Writes fetches device's supported band groups and the - the CDID for any existing association with this host. - -What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_host_name -Date: August 2008 -KernelVersion: 2.6.27 -Contact: David Vrabel <[email protected]> -Description: - A friendly name for the host as a UTF-8 encoded string. - -What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_host_band_groups -Date: August 2008 -KernelVersion: 2.6.27 -Contact: David Vrabel <[email protected]> -Description: - The band groups supported by the host, in the format - defined in [WUSB-AM]. - -What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_device_band_groups -Date: August 2008 -KernelVersion: 2.6.27 -Contact: David Vrabel <[email protected]> -Description: - The band groups supported by the device, in the format - defined in [WUSB-AM]. - -What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_cdid -Date: August 2008 -KernelVersion: 2.6.27 -Contact: David Vrabel <[email protected]> -Description: - The device's CDID formatted as 16 space-separated hex - octets. - -What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_ck -Date: August 2008 -KernelVersion: 2.6.27 -Contact: David Vrabel <[email protected]> -Description: - Write 16 space-separated random, hex octets to - associate with the device. diff --git a/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst b/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst new file mode 100644 index 000000000000..264bfa937f7d --- /dev/null +++ b/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst @@ -0,0 +1,109 @@ +.. SPDX-License-Identifier: GPL-2.0 + +GDS - Gather Data Sampling +========================== + +Gather Data Sampling is a hardware vulnerability which allows unprivileged +speculative access to data which was previously stored in vector registers. + +Problem +------- +When a gather instruction performs loads from memory, different data elements +are merged into the destination vector register. However, when a gather +instruction that is transiently executed encounters a fault, stale data from +architectural or internal vector registers may get transiently forwarded to the +destination vector register instead. This will allow a malicious attacker to +infer stale data using typical side channel techniques like cache timing +attacks. GDS is a purely sampling-based attack. + +The attacker uses gather instructions to infer the stale vector register data. +The victim does not need to do anything special other than use the vector +registers. The victim does not need to use gather instructions to be +vulnerable. + +Because the buffers are shared between Hyper-Threads cross Hyper-Thread attacks +are possible. + +Attack scenarios +---------------- +Without mitigation, GDS can infer stale data across virtually all +permission boundaries: + + Non-enclaves can infer SGX enclave data + Userspace can infer kernel data + Guests can infer data from hosts + Guest can infer guest from other guests + Users can infer data from other users + +Because of this, it is important to ensure that the mitigation stays enabled in +lower-privilege contexts like guests and when running outside SGX enclaves. + +The hardware enforces the mitigation for SGX. Likewise, VMMs should ensure +that guests are not allowed to disable the GDS mitigation. If a host erred and +allowed this, a guest could theoretically disable GDS mitigation, mount an +attack, and re-enable it. + +Mitigation mechanism +-------------------- +This issue is mitigated in microcode. The microcode defines the following new +bits: + + ================================ === ============================ + IA32_ARCH_CAPABILITIES[GDS_CTRL] R/O Enumerates GDS vulnerability + and mitigation support. + IA32_ARCH_CAPABILITIES[GDS_NO] R/O Processor is not vulnerable. + IA32_MCU_OPT_CTRL[GDS_MITG_DIS] R/W Disables the mitigation + 0 by default. + IA32_MCU_OPT_CTRL[GDS_MITG_LOCK] R/W Locks GDS_MITG_DIS=0. Writes + to GDS_MITG_DIS are ignored + Can't be cleared once set. + ================================ === ============================ + +GDS can also be mitigated on systems that don't have updated microcode by +disabling AVX. This can be done by setting gather_data_sampling="force" or +"clearcpuid=avx" on the kernel command-line. + +If used, these options will disable AVX use by turning off XSAVE YMM support. +However, the processor will still enumerate AVX support. Userspace that +does not follow proper AVX enumeration to check both AVX *and* XSAVE YMM +support will break. + +Mitigation control on the kernel command line +--------------------------------------------- +The mitigation can be disabled by setting "gather_data_sampling=off" or +"mitigations=off" on the kernel command line. Not specifying either will default +to the mitigation being enabled. Specifying "gather_data_sampling=force" will +use the microcode mitigation when available or disable AVX on affected systems +where the microcode hasn't been updated to include the mitigation. + +GDS System Information +------------------------ +The kernel provides vulnerability status information through sysfs. For +GDS this can be accessed by the following sysfs file: + +/sys/devices/system/cpu/vulnerabilities/gather_data_sampling + +The possible values contained in this file are: + + ============================== ============================================= + Not affected Processor not vulnerable. + Vulnerable Processor vulnerable and mitigation disabled. + Vulnerable: No microcode Processor vulnerable and microcode is missing + mitigation. + Mitigation: AVX disabled, + no microcode Processor is vulnerable and microcode is missing + mitigation. AVX disabled as mitigation. + Mitigation: Microcode Processor is vulnerable and mitigation is in + effect. + Mitigation: Microcode (locked) Processor is vulnerable and mitigation is in + effect and cannot be disabled. + Unknown: Dependent on + hypervisor status Running on a virtual guest processor that is + affected but with no way to know if host + processor is mitigated or vulnerable. + ============================== ============================================= + +GDS Default mitigation +---------------------- +The updated microcode will enable the mitigation by default. The kernel's +default action is to leave the mitigation enabled. diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst index e0614760a99e..de99caabf65a 100644 --- a/Documentation/admin-guide/hw-vuln/index.rst +++ b/Documentation/admin-guide/hw-vuln/index.rst @@ -13,9 +13,11 @@ are configurable at compile, boot or run time. l1tf mds tsx_async_abort - multihit.rst - special-register-buffer-data-sampling.rst - core-scheduling.rst - l1d_flush.rst - processor_mmio_stale_data.rst - cross-thread-rsb.rst + multihit + special-register-buffer-data-sampling + core-scheduling + l1d_flush + processor_mmio_stale_data + cross-thread-rsb + srso + gather_data_sampling diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst new file mode 100644 index 000000000000..af59a9395662 --- /dev/null +++ b/Documentation/admin-guide/hw-vuln/srso.rst @@ -0,0 +1,150 @@ +.. SPDX-License-Identifier: GPL-2.0 + +Speculative Return Stack Overflow (SRSO) +======================================== + +This is a mitigation for the speculative return stack overflow (SRSO) +vulnerability found on AMD processors. The mechanism is by now the well +known scenario of poisoning CPU functional units - the Branch Target +Buffer (BTB) and Return Address Predictor (RAP) in this case - and then +tricking the elevated privilege domain (the kernel) into leaking +sensitive data. + +AMD CPUs predict RET instructions using a Return Address Predictor (aka +Return Address Stack/Return Stack Buffer). In some cases, a non-architectural +CALL instruction (i.e., an instruction predicted to be a CALL but is +not actually a CALL) can create an entry in the RAP which may be used +to predict the target of a subsequent RET instruction. + +The specific circumstances that lead to this varies by microarchitecture +but the concern is that an attacker can mis-train the CPU BTB to predict +non-architectural CALL instructions in kernel space and use this to +control the speculative target of a subsequent kernel RET, potentially +leading to information disclosure via a speculative side-channel. + +The issue is tracked under CVE-2023-20569. + +Affected processors +------------------- + +AMD Zen, generations 1-4. That is, all families 0x17 and 0x19. Older +processors have not been investigated. + +System information and options +------------------------------ + +First of all, it is required that the latest microcode be loaded for +mitigations to be effective. + +The sysfs file showing SRSO mitigation status is: + + /sys/devices/system/cpu/vulnerabilities/spec_rstack_overflow + +The possible values in this file are: + + * 'Not affected': + + The processor is not vulnerable + + * 'Vulnerable: no microcode': + + The processor is vulnerable, no microcode extending IBPB + functionality to address the vulnerability has been applied. + + * 'Mitigation: microcode': + + Extended IBPB functionality microcode patch has been applied. It does + not address User->Kernel and Guest->Host transitions protection but it + does address User->User and VM->VM attack vectors. + + Note that User->User mitigation is controlled by how the IBPB aspect in + the Spectre v2 mitigation is selected: + + * conditional IBPB: + + where each process can select whether it needs an IBPB issued + around it PR_SPEC_DISABLE/_ENABLE etc, see :doc:`spectre` + + * strict: + + i.e., always on - by supplying spectre_v2_user=on on the kernel + command line + + (spec_rstack_overflow=microcode) + + * 'Mitigation: safe RET': + + Software-only mitigation. It complements the extended IBPB microcode + patch functionality by addressing User->Kernel and Guest->Host + transitions protection. + + Selected by default or by spec_rstack_overflow=safe-ret + + * 'Mitigation: IBPB': + + Similar protection as "safe RET" above but employs an IBPB barrier on + privilege domain crossings (User->Kernel, Guest->Host). + + (spec_rstack_overflow=ibpb) + + * 'Mitigation: IBPB on VMEXIT': + + Mitigation addressing the cloud provider scenario - the Guest->Host + transitions only. + + (spec_rstack_overflow=ibpb-vmexit) + + + +In order to exploit vulnerability, an attacker needs to: + + - gain local access on the machine + + - break kASLR + + - find gadgets in the running kernel in order to use them in the exploit + + - potentially create and pin an additional workload on the sibling + thread, depending on the microarchitecture (not necessary on fam 0x19) + + - run the exploit + +Considering the performance implications of each mitigation type, the +default one is 'Mitigation: safe RET' which should take care of most +attack vectors, including the local User->Kernel one. + +As always, the user is advised to keep her/his system up-to-date by +applying software updates regularly. + +The default setting will be reevaluated when needed and especially when +new attack vectors appear. + +As one can surmise, 'Mitigation: safe RET' does come at the cost of some +performance depending on the workload. If one trusts her/his userspace +and does not want to suffer the performance impact, one can always +disable the mitigation with spec_rstack_overflow=off. + +Similarly, 'Mitigation: IBPB' is another full mitigation type employing +an indrect branch prediction barrier after having applied the required +microcode patch for one's system. This mitigation comes also at +a performance cost. + +Mitigation: safe RET +-------------------- + +The mitigation works by ensuring all RET instructions speculate to +a controlled location, similar to how speculation is controlled in the +retpoline sequence. To accomplish this, the __x86_return_thunk forces +the CPU to mispredict every function return using a 'safe return' +sequence. + +To ensure the safety of this mitigation, the kernel must ensure that the +safe return sequence is itself free from attacker interference. In Zen3 +and Zen4, this is accomplished by creating a BTB alias between the +untraining function srso_untrain_ret_alias() and the safe return +function srso_safe_ret_alias() which results in evicting a potentially +poisoned BTB entry and using that safe one for all function returns. + +In older Zen1 and Zen2, this is accomplished using a reinterpretation +technique similar to Retbleed one: srso_untrain_ret() and +srso_safe_ret(). diff --git a/Documentation/admin-guide/kdump/vmcoreinfo.rst b/Documentation/admin-guide/kdump/vmcoreinfo.rst index c18d94fa6470..f8ebb63b6c5d 100644 --- a/Documentation/admin-guide/kdump/vmcoreinfo.rst +++ b/Documentation/admin-guide/kdump/vmcoreinfo.rst @@ -624,3 +624,9 @@ Used to get the correct ranges: * VMALLOC_START ~ VMALLOC_END : vmalloc() / ioremap() space. * VMEMMAP_START ~ VMEMMAP_END : vmemmap space, used for struct page array. * KERNEL_LINK_ADDR : start address of Kernel link and BPF + +va_kernel_pa_offset +------------------- + +Indicates the offset between the kernel virtual and physical mappings. +Used to translate virtual to physical addresses. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index a1457995fd41..50fc93684423 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1623,6 +1623,26 @@ Format: off | on default: on + gather_data_sampling= + [X86,INTEL] Control the Gather Data Sampling (GDS) + mitigation. + + Gather Data Sampling is a hardware vulnerability which + allows unprivileged speculative access to data which was + previously stored in vector registers. + + This issue is mitigated by default in updated microcode. + The mitigation may have a performance impact but can be + disabled. On systems without the microcode mitigation + disabling AVX serves as a mitigation. + + force: Disable AVX to mitigate systems without + microcode mitigation. No effect if the microcode + mitigation is present. Known to cause crashes in + userspace with buggy AVX enumeration. + + off: Disable GDS mitigation. + gcov_persist= [GCOV] When non-zero (default), profiling data for kernel modules is saved and remains accessible via debugfs, even when the module is unloaded/reloaded. @@ -3273,24 +3293,25 @@ Disable all optional CPU mitigations. This improves system performance, but it may also expose users to several CPU vulnerabilities. - Equivalent to: nopti [X86,PPC] - if nokaslr then kpti=0 [ARM64] - nospectre_v1 [X86,PPC] - nobp=0 [S390] - nospectre_v2 [X86,PPC,S390,ARM64] - spectre_v2_user=off [X86] - spec_store_bypass_disable=off [X86,PPC] - ssbd=force-off [ARM64] - nospectre_bhb [ARM64] + Equivalent to: if nokaslr then kpti=0 [ARM64] + gather_data_sampling=off [X86] + kvm.nx_huge_pages=off [X86] l1tf=off [X86] mds=off [X86] - tsx_async_abort=off [X86] - kvm.nx_huge_pages=off [X86] - srbds=off [X86,INTEL] + mmio_stale_data=off [X86] no_entry_flush [PPC] no_uaccess_flush [PPC] - mmio_stale_data=off [X86] + nobp=0 [S390] + nopti [X86,PPC] + nospectre_bhb [ARM64] + nospectre_v1 [X86,PPC] + nospectre_v2 [X86,PPC,S390,ARM64] retbleed=off [X86] + spec_store_bypass_disable=off [X86,PPC] + spectre_v2_user=off [X86] + srbds=off [X86,INTEL] + ssbd=force-off [ARM64] + tsx_async_abort=off [X86] Exceptions: This does not have any effect on @@ -5875,6 +5896,17 @@ Not specifying this option is equivalent to spectre_v2_user=auto. + spec_rstack_overflow= + [X86] Control RAS overflow mitigation on AMD Zen CPUs + + off - Disable mitigation + microcode - Enable microcode mitigation only + safe-ret - Enable sw-only safe RET mitigation (default) + ibpb - Enable mitigation by issuing IBPB on + kernel entry + ibpb-vmexit - Issue IBPB only on VMEXIT + (cloud-specific mitigation) + spec_store_bypass_disable= [HW] Control Speculative Store Bypass (SSB) Disable mitigation (Speculative Store Bypass vulnerability) @@ -6607,7 +6639,7 @@ usbcore.authorized_default= [USB] Default USB device authorization: - (default -1 = authorized except for wireless USB, + (default -1 = authorized (same as 1), 0 = not authorized, 1 = authorized, 2 = authorized if device connected to internal port) diff --git a/Documentation/devicetree/bindings/iio/addac/adi,ad74115.yaml b/Documentation/devicetree/bindings/iio/addac/adi,ad74115.yaml index 72d2e910f206..2594fa192f93 100644 --- a/Documentation/devicetree/bindings/iio/addac/adi,ad74115.yaml +++ b/Documentation/devicetree/bindings/iio/addac/adi,ad74115.yaml @@ -216,7 +216,6 @@ properties: description: Whether to enable burnout current for EXT1. adi,ext1-burnout-current-nanoamp: - $ref: /schemas/types.yaml#/definitions/uint32 description: Burnout current in nanoamps to be applied to EXT1. enum: [0, 50, 500, 1000, 10000] @@ -233,7 +232,6 @@ properties: description: Whether to enable burnout current for EXT2. adi,ext2-burnout-current-nanoamp: - $ref: /schemas/types.yaml#/definitions/uint32 description: Burnout current in nanoamps to be applied to EXT2. enum: [0, 50, 500, 1000, 10000] default: 0 @@ -249,7 +247,6 @@ properties: description: Whether to enable burnout current for VIOUT. adi,viout-burnout-current-nanoamp: - $ref: /schemas/types.yaml#/definitions/uint32 description: Burnout current in nanoamps to be applied to VIOUT. enum: [0, 1000, 10000] default: 0 diff --git a/Documentation/devicetree/bindings/net/mediatek,net.yaml b/Documentation/devicetree/bindings/net/mediatek,net.yaml index acb2b2ac4fe1..31cc0c412805 100644 --- a/Documentation/devicetree/bindings/net/mediatek,net.yaml +++ b/Documentation/devicetree/bindings/net/mediatek,net.yaml @@ -293,7 +293,7 @@ allOf: patternProperties: "^mac@[0-1]$": type: object - additionalProperties: false + unevaluatedProperties: false allOf: - $ref: ethernet-controller.yaml# description: @@ -305,14 +305,9 @@ patternProperties: reg: maxItems: 1 - phy-handle: true - - phy-mode: true - required: - reg - compatible - - phy-handle required: - compatible diff --git a/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml b/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml index 176ea5f90251..7f324c6da915 100644 --- a/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml +++ b/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml @@ -91,12 +91,18 @@ properties: $ref: /schemas/types.yaml#/definitions/phandle tx_delay: - description: Delay value for TXD timing. Range value is 0~0x7F, 0x30 as default. + description: Delay value for TXD timing. $ref: /schemas/types.yaml#/definitions/uint32 + minimum: 0 + maximum: 0x7F + default: 0x30 rx_delay: - description: Delay value for RXD timing. Range value is 0~0x7F, 0x10 as default. + description: Delay value for RXD timing. $ref: /schemas/types.yaml#/definitions/uint32 + minimum: 0 + maximum: 0x7F + default: 0x10 phy-supply: description: PHY regulator diff --git a/Documentation/devicetree/bindings/phy/realtek,usb2phy.yaml b/Documentation/devicetree/bindings/phy/realtek,usb2phy.yaml new file mode 100644 index 000000000000..9911ada39ee7 --- /dev/null +++ b/Documentation/devicetree/bindings/phy/realtek,usb2phy.yaml @@ -0,0 +1,175 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +# Copyright 2023 Realtek Semiconductor Corporation +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/phy/realtek,usb2phy.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Realtek DHC SoCs USB 2.0 PHY + +maintainers: + - Stanley Chang <[email protected]> + +description: | + Realtek USB 2.0 PHY support the digital home center (DHC) RTD series SoCs. + The USB 2.0 PHY driver is designed to support the XHCI controller. The SoCs + support multiple XHCI controllers. One PHY device node maps to one XHCI + controller. + + RTD1295/RTD1619 SoCs USB + The USB architecture includes three XHCI controllers. + Each XHCI maps to one USB 2.0 PHY and map one USB 3.0 PHY on some + controllers. + XHCI controller#0 -- usb2phy -- phy#0 + |- usb3phy -- phy#0 + XHCI controller#1 -- usb2phy -- phy#0 + XHCI controller#2 -- usb2phy -- phy#0 + |- usb3phy -- phy#0 + + RTD1395 SoCs USB + The USB architecture includes two XHCI controllers. + The controller#0 has one USB 2.0 PHY. The controller#1 includes two USB 2.0 + PHY. + XHCI controller#0 -- usb2phy -- phy#0 + XHCI controller#1 -- usb2phy -- phy#0 + |- phy#1 + + RTD1319/RTD1619b SoCs USB + The USB architecture includes three XHCI controllers. + Each XHCI maps to one USB 2.0 PHY and map one USB 3.0 PHY on controllers#2. + XHCI controller#0 -- usb2phy -- phy#0 + XHCI controller#1 -- usb2phy -- phy#0 + XHCI controller#2 -- usb2phy -- phy#0 + |- usb3phy -- phy#0 + + RTD1319d SoCs USB + The USB architecture includes three XHCI controllers. + Each xhci maps to one USB 2.0 PHY and map one USB 3.0 PHY on controllers#0. + XHCI controller#0 -- usb2phy -- phy#0 + |- usb3phy -- phy#0 + XHCI controller#1 -- usb2phy -- phy#0 + XHCI controller#2 -- usb2phy -- phy#0 + + RTD1312c/RTD1315e SoCs USB + The USB architecture includes three XHCI controllers. + Each XHCI maps to one USB 2.0 PHY. + XHCI controller#0 -- usb2phy -- phy#0 + XHCI controller#1 -- usb2phy -- phy#0 + XHCI controller#2 -- usb2phy -- phy#0 + +properties: + compatible: + enum: + - realtek,rtd1295-usb2phy + - realtek,rtd1312c-usb2phy + - realtek,rtd1315e-usb2phy + - realtek,rtd1319-usb2phy + - realtek,rtd1319d-usb2phy + - realtek,rtd1395-usb2phy + - realtek,rtd1395-usb2phy-2port + - realtek,rtd1619-usb2phy + - realtek,rtd1619b-usb2phy + + reg: + items: + - description: PHY data registers + - description: PHY control registers + + "#phy-cells": + const: 0 + + nvmem-cells: + maxItems: 2 + description: + Phandles to nvmem cell that contains the trimming data. + If unspecified, default value is used. + + nvmem-cell-names: + items: + - const: usb-dc-cal + - const: usb-dc-dis + description: + The following names, which correspond to each nvmem-cells. + usb-dc-cal is the driving level for each phy specified via efuse. + usb-dc-dis is the disconnection level for each phy specified via efuse. + + realtek,inverse-hstx-sync-clock: + description: + For one of the phys of RTD1619b SoC, the synchronous clock of the + high-speed tx must be inverted. + type: boolean + + realtek,driving-level: + description: + Control the magnitude of High speed Dp/Dm output swing (mV). + For a different board or port, the original magnitude maybe not meet + the specification. In this situation we can adjust the value to meet + the specification. + $ref: /schemas/types.yaml#/definitions/uint32 + default: 8 + minimum: 0 + maximum: 31 + + realtek,driving-level-compensate: + description: + For RTD1315e SoC, the driving level can be adjusted by reading the + efuse table. This property provides drive compensation. + If the magnitude of High speed Dp/Dm output swing still not meet the + specification, then we can set this value to meet the specification. + $ref: /schemas/types.yaml#/definitions/int32 + default: 0 + minimum: -8 + maximum: 8 + + realtek,disconnection-compensate: + description: + This adjusts the disconnection level compensation for the different + boards with different disconnection level. + $ref: /schemas/types.yaml#/definitions/int32 + default: 0 + minimum: -8 + maximum: 8 + +required: + - compatible + - reg + - "#phy-cells" + +allOf: + - if: + not: + properties: + compatible: + contains: + enum: + - realtek,rtd1619b-usb2phy + then: + properties: + realtek,inverse-hstx-sync-clock: false + + - if: + not: + properties: + compatible: + contains: + enum: + - realtek,rtd1315e-usb2phy + then: + properties: + realtek,driving-level-compensate: false + +additionalProperties: false + +examples: + - | + usb-phy@13214 { + compatible = "realtek,rtd1619b-usb2phy"; + reg = <0x13214 0x4>, <0x28280 0x4>; + #phy-cells = <0>; + nvmem-cells = <&otp_usb_port0_dc_cal>, <&otp_usb_port0_dc_dis>; + nvmem-cell-names = "usb-dc-cal", "usb-dc-dis"; + + realtek,inverse-hstx-sync-clock; + realtek,driving-level = <0xa>; + realtek,disconnection-compensate = <(-1)>; + }; diff --git a/Documentation/devicetree/bindings/phy/realtek,usb3phy.yaml b/Documentation/devicetree/bindings/phy/realtek,usb3phy.yaml new file mode 100644 index 000000000000..dfe2bb4e59e7 --- /dev/null +++ b/Documentation/devicetree/bindings/phy/realtek,usb3phy.yaml @@ -0,0 +1,107 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +# Copyright 2023 Realtek Semiconductor Corporation +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/phy/realtek,usb3phy.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Realtek DHC SoCs USB 3.0 PHY + +maintainers: + - Stanley Chang <[email protected]> + +description: | + Realtek USB 3.0 PHY support the digital home center (DHC) RTD series SoCs. + The USB 3.0 PHY driver is designed to support the XHCI controller. The SoCs + support multiple XHCI controllers. One PHY device node maps to one XHCI + controller. + + RTD1295/RTD1619 SoCs USB + The USB architecture includes three XHCI controllers. + Each XHCI maps to one USB 2.0 PHY and map one USB 3.0 PHY on some + controllers. + XHCI controller#0 -- usb2phy -- phy#0 + |- usb3phy -- phy#0 + XHCI controller#1 -- usb2phy -- phy#0 + XHCI controller#2 -- usb2phy -- phy#0 + |- usb3phy -- phy#0 + + RTD1319/RTD1619b SoCs USB + The USB architecture includes three XHCI controllers. + Each XHCI maps to one USB 2.0 PHY and map one USB 3.0 PHY on controllers#2. + XHCI controller#0 -- usb2phy -- phy#0 + XHCI controller#1 -- usb2phy -- phy#0 + XHCI controller#2 -- usb2phy -- phy#0 + |- usb3phy -- phy#0 + + RTD1319d SoCs USB + The USB architecture includes three XHCI controllers. + Each xhci maps to one USB 2.0 PHY and map one USB 3.0 PHY on controllers#0. + XHCI controller#0 -- usb2phy -- phy#0 + |- usb3phy -- phy#0 + XHCI controller#1 -- usb2phy -- phy#0 + XHCI controller#2 -- usb2phy -- phy#0 + +properties: + compatible: + enum: + - realtek,rtd1295-usb3phy + - realtek,rtd1319-usb3phy + - realtek,rtd1319d-usb3phy + - realtek,rtd1619-usb3phy + - realtek,rtd1619b-usb3phy + + reg: + maxItems: 1 + + "#phy-cells": + const: 0 + + nvmem-cells: + maxItems: 1 + description: A phandle to the tx lfps swing trim data provided by + a nvmem device, if unspecified, default values shall be used. + + nvmem-cell-names: + items: + - const: usb_u3_tx_lfps_swing_trim + + realtek,amplitude-control-coarse-tuning: + description: + This adjusts the signal amplitude for normal operation and beacon LFPS. + This value is a parameter for coarse tuning. + For different boards, if the default value is inappropriate, this + property can be assigned to adjust. + $ref: /schemas/types.yaml#/definitions/uint32 + default: 255 + minimum: 0 + maximum: 255 + + realtek,amplitude-control-fine-tuning: + description: + This adjusts the signal amplitude for normal operation and beacon LFPS. + This value is used for fine-tuning parameters. + $ref: /schemas/types.yaml#/definitions/uint32 + default: 65535 + minimum: 0 + maximum: 65535 + +required: + - compatible + - reg + - "#phy-cells" + +additionalProperties: false + +examples: + - | + usb-phy@13e10 { + compatible = "realtek,rtd1319d-usb3phy"; + reg = <0x13e10 0x4>; + #phy-cells = <0>; + + nvmem-cells = <&otp_usb_u3_tx_lfps_swing_trim>; + nvmem-cell-names = "usb_u3_tx_lfps_swing_trim"; + + realtek,amplitude-control-coarse-tuning = <0x77>; + }; diff --git a/Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml b/Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml index 30b2131b5860..65cb2e5c5eee 100644 --- a/Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml +++ b/Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml @@ -16,7 +16,6 @@ properties: - enum: - atmel,at91rm9200-usart - atmel,at91sam9260-usart - - microchip,sam9x60-usart - items: - const: atmel,at91rm9200-dbgu - const: atmel,at91rm9200-usart @@ -24,6 +23,9 @@ properties: - const: atmel,at91sam9260-dbgu - const: atmel,at91sam9260-usart - items: + - const: microchip,sam9x60-usart + - const: atmel,at91sam9260-usart + - items: - const: microchip,sam9x60-dbgu - const: microchip,sam9x60-usart - const: atmel,at91sam9260-dbgu diff --git a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.yaml b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.yaml index 782402800d4a..d2303f9a638c 100644 --- a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.yaml +++ b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.yaml @@ -34,6 +34,7 @@ properties: - fsl,imx23-usb - fsl,imx25-usb - fsl,imx28-usb + - fsl,imx35-usb - fsl,imx50-usb - fsl,imx51-usb - fsl,imx53-usb @@ -76,11 +77,11 @@ properties: clocks: minItems: 1 - maxItems: 2 + maxItems: 3 clock-names: minItems: 1 - maxItems: 2 + maxItems: 3 dr_mode: true @@ -292,6 +293,18 @@ properties: minimum: 0x0 maximum: 0xf + fsl,picophy-rise-fall-time-adjust: + description: + HS Transmitter Rise/Fall Time Adjustment. Adjust the rise/fall times + of the high-speed transmitter waveform. It has no unit. The rise/fall + time will be increased or decreased by a certain percentage relative + to design default time. (0:-10%; 1:design default; 2:+15%; 3:+20%) + Details can refer to TXRISETUNE0 bit of USBNC_n_PHY_CFG1. + $ref: /schemas/types.yaml#/definitions/uint32 + minimum: 0 + maximum: 3 + default: 1 + usb-phy: description: phandle for the PHY device. Use "phys" instead. $ref: /schemas/types.yaml#/definitions/phandle diff --git a/Documentation/devicetree/bindings/usb/cypress,hx3.yaml b/Documentation/devicetree/bindings/usb/cypress,hx3.yaml new file mode 100644 index 000000000000..47add0d85fb8 --- /dev/null +++ b/Documentation/devicetree/bindings/usb/cypress,hx3.yaml @@ -0,0 +1,77 @@ +# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/usb/cypress,hx3.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Cypress HX3 USB 3.0 hub controller family + +maintainers: + - Benjamin Bara <[email protected]> + +allOf: + - $ref: usb-device.yaml# + +properties: + compatible: + enum: + - usb4b4,6504 + - usb4b4,6506 + + reg: true + + reset-gpios: + items: + - description: GPIO specifier for RESETN pin. + + vdd-supply: + description: + 1V2 power supply (VDD_EFUSE, AVDD12, DVDD12). + + vdd2-supply: + description: + 3V3 power supply (AVDD33, VDD_IO). + + peer-hub: + $ref: /schemas/types.yaml#/definitions/phandle + description: + phandle to the peer hub on the controller. + +required: + - compatible + - reg + - peer-hub + - vdd-supply + - vdd2-supply + +additionalProperties: false + +examples: + - | + #include <dt-bindings/gpio/gpio.h> + + usb { + dr_mode = "host"; + #address-cells = <1>; + #size-cells = <0>; + + /* 2.0 hub on port 1 */ + hub_2_0: hub@1 { + compatible = "usb4b4,6504"; + reg = <1>; + peer-hub = <&hub_3_0>; + reset-gpios = <&gpio1 11 GPIO_ACTIVE_LOW>; + vdd-supply = <®_1v2_usb>; + vdd2-supply = <®_3v3_usb>; + }; + + /* 3.0 hub on port 2 */ + hub_3_0: hub@2 { + compatible = "usb4b4,6506"; + reg = <2>; + peer-hub = <&hub_2_0>; + reset-gpios = <&gpio1 11 GPIO_ACTIVE_LOW>; + vdd-supply = <®_1v2_usb>; + vdd2-supply = <®_3v3_usb>; + }; + }; diff --git a/Documentation/devicetree/bindings/usb/generic-ehci.yaml b/Documentation/devicetree/bindings/usb/generic-ehci.yaml index b956bb5fada7..e5c8f4e085de 100644 --- a/Documentation/devicetree/bindings/usb/generic-ehci.yaml +++ b/Documentation/devicetree/bindings/usb/generic-ehci.yaml @@ -67,6 +67,7 @@ properties: - const: generic-ehci - items: - enum: + - atmel,at91sam9g45-ehci - cavium,octeon-6335-ehci - ibm,usb-ehci-440epx - ibm,usb-ehci-460ex diff --git a/Documentation/devicetree/bindings/usb/genesys,gl850g.yaml b/Documentation/devicetree/bindings/usb/genesys,gl850g.yaml index cc4cf92b70d1..383625c2ef00 100644 --- a/Documentation/devicetree/bindings/usb/genesys,gl850g.yaml +++ b/Documentation/devicetree/bindings/usb/genesys,gl850g.yaml @@ -17,6 +17,7 @@ properties: enum: - usb5e3,608 - usb5e3,610 + - usb5e3,620 reg: true diff --git a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml index ae24dac78d9a..67591057f234 100644 --- a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml +++ b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml @@ -14,6 +14,7 @@ properties: items: - enum: - qcom,ipq4019-dwc3 + - qcom,ipq5332-dwc3 - qcom,ipq6018-dwc3 - qcom,ipq8064-dwc3 - qcom,ipq8074-dwc3 @@ -82,15 +83,6 @@ properties: minItems: 1 maxItems: 9 - assigned-clocks: - items: - - description: Phandle and clock specifier of MOCK_UTMI_CLK. - - description: Phandle and clock specifoer of MASTER_CLK. - - assigned-clock-rates: - items: - - description: Must be 19.2MHz (19200000). - - description: Must be >= 60 MHz in HS mode, >= 125 MHz in SS mode. resets: maxItems: 1 @@ -246,6 +238,7 @@ allOf: compatible: contains: enum: + - qcom,ipq5332-dwc3 - qcom,msm8994-dwc3 - qcom,qcs404-dwc3 then: @@ -290,15 +283,23 @@ allOf: then: properties: clocks: - minItems: 6 + minItems: 5 + maxItems: 6 clock-names: - items: - - const: cfg_noc - - const: core - - const: iface - - const: sleep - - const: mock_utmi - - const: bus + oneOf: + - items: + - const: cfg_noc + - const: core + - const: iface + - const: sleep + - const: mock_utmi + - const: bus + - items: + - const: cfg_noc + - const: core + - const: sleep + - const: mock_utmi + - const: bus - if: properties: @@ -410,6 +411,7 @@ allOf: compatible: contains: enum: + - qcom,ipq5332-dwc3 - qcom,sdm660-dwc3 then: properties: diff --git a/Documentation/driver-api/usb/usb.rst b/Documentation/driver-api/usb/usb.rst index 2c94ff2f4385..fb41768696ec 100644 --- a/Documentation/driver-api/usb/usb.rst +++ b/Documentation/driver-api/usb/usb.rst @@ -420,6 +420,12 @@ USBDEVFS_CONNECTINFO know the devnum value already, it's the DDD value of the device file name. +USBDEVFS_GET_SPEED + Returns the speed of the device. The speed is returned as a + nummerical value in accordance with enum usb_device_speed + + File modification time is not updated by this request. + USBDEVFS_GETDRIVER Returns the name of the kernel driver bound to a given interface (a string). Parameter is a pointer to this structure, which is @@ -771,8 +777,7 @@ Speed may be: ======= ====================================================== 1.5 Mbit/s for low speed USB 12 Mbit/s for full speed USB - 480 Mbit/s for high speed USB (added for USB 2.0); - also used for Wireless USB, which has no fixed speed + 480 Mbit/s for high speed USB (added for USB 2.0) 5000 Mbit/s for SuperSpeed USB (added for USB 3.0) ======= ====================================================== diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst index ed148919e11a..0ca479dbb1cd 100644 --- a/Documentation/filesystems/locking.rst +++ b/Documentation/filesystems/locking.rst @@ -551,9 +551,8 @@ mutex or just to use i_size_read() instead. Note: this does not protect the file->f_pos against concurrent modifications since this is something the userspace has to take care about. -->iterate() is called with i_rwsem exclusive. - -->iterate_shared() is called with i_rwsem at least shared. +->iterate_shared() is called with i_rwsem held for reading, and with the +file f_pos_lock held exclusively ->fasync() is responsible for maintaining the FASYNC bit in filp->f_flags. Most instances call fasync_helper(), which does that maintenance, so it's diff --git a/Documentation/filesystems/porting.rst b/Documentation/filesystems/porting.rst index d2d684ae7798..0f5da78ef4f9 100644 --- a/Documentation/filesystems/porting.rst +++ b/Documentation/filesystems/porting.rst @@ -537,7 +537,7 @@ vfs_readdir() is gone; switch to iterate_dir() instead **mandatory** -->readdir() is gone now; switch to ->iterate() +->readdir() is gone now; switch to ->iterate_shared() **mandatory** @@ -693,24 +693,19 @@ parallel now. --- -**recommended** +**mandatory** -->iterate_shared() is added; it's a parallel variant of ->iterate(). +->iterate_shared() is added. Exclusion on struct file level is still provided (as well as that between it and lseek on the same struct file), but if your directory has been opened several times, you can get these called in parallel. Exclusion between that method and all directory-modifying ones is still provided, of course. -Often enough ->iterate() can serve as ->iterate_shared() without any -changes - it is a read-only operation, after all. If you have any -per-inode or per-dentry in-core data structures modified by ->iterate(), -you might need something to serialize the access to them. If you -do dcache pre-seeding, you'll need to switch to d_alloc_parallel() for -that; look for in-tree examples. - -Old method is only used if the new one is absent; eventually it will -be removed. Switch while you still can; the old one won't stay. +If you have any per-inode or per-dentry in-core data structures modified +by ->iterate_shared(), you might need something to serialize the access +to them. If you do dcache pre-seeding, you'll need to switch to +d_alloc_parallel() for that; look for in-tree examples. --- @@ -930,9 +925,9 @@ should be done by looking at FMODE_LSEEK in file->f_mode. filldir_t (readdir callbacks) calling conventions have changed. Instead of returning 0 or -E... it returns bool now. false means "no more" (as -E... used to) and true - "keep going" (as 0 in old calling conventions). Rationale: -callers never looked at specific -E... values anyway. ->iterate() and -->iterate_shared() instance require no changes at all, all filldir_t ones in -the tree converted. +callers never looked at specific -E... values anyway. -> iterate_shared() +instances require no changes at all, all filldir_t ones in the tree +converted. --- diff --git a/Documentation/usb/authorization.rst b/Documentation/usb/authorization.rst index 9e53909d04c2..150a14970e95 100644 --- a/Documentation/usb/authorization.rst +++ b/Documentation/usb/authorization.rst @@ -33,12 +33,9 @@ Remove the lock down:: $ echo 1 > /sys/bus/usb/devices/usbX/authorized_default -By default, Wired USB devices are authorized by default to -connect. Wireless USB hosts deauthorize by default all new connected -devices (this is so because we need to do an authentication phase -before authorizing). Writing "2" to the authorized_default attribute -causes kernel to only authorize by default devices connected to internal -USB ports. +By default, all USB devices are authorized. Writing "2" to the +authorized_default attribute causes the kernel to authorize by default +only devices connected to internal USB ports. Example system lockdown (lame) diff --git a/Documentation/usb/gadget-testing.rst b/Documentation/usb/gadget-testing.rst index 2fca40443dc9..394cd226bfae 100644 --- a/Documentation/usb/gadget-testing.rst +++ b/Documentation/usb/gadget-testing.rst @@ -27,6 +27,7 @@ provided by gadgets. 18. UVC function 19. PRINTER function 20. UAC1 function (new API) + 21. MIDI2 function 1. ACM function @@ -965,3 +966,156 @@ e.g.:: $ arecord -f dat -t wav -D hw:CARD=UAC1Gadget,DEV=0 | \ aplay -D default:CARD=OdroidU3 + + +21. MIDI2 function +================== + +The function is provided by usb_f_midi2.ko module. +It will create a virtual ALSA card containing a UMP rawmidi device +where the UMP packet is looped back. In addition, a legacy rawmidi +device is created. The UMP rawmidi is bound with ALSA sequencer +clients, too. + +Function-specific configfs interface +------------------------------------ + +The function name to use when creating the function directory is "midi2". +The midi2 function provides these attributes in its function directory +as the card top-level information: + + ============= ================================================= + process_ump Bool flag to process UMP Stream messages (0 or 1) + static_block Bool flag for static blocks (0 or 1) + iface_name Optional interface name string + ============= ================================================= + +The directory contains a subdirectory "ep.0", and this provides the +attributes for a UMP Endpoint (which is a pair of USB MIDI Endpoints): + + ============= ================================================= + protocol_caps MIDI protocol capabilities; + 1: MIDI 1.0, 2: MIDI 2.0, or 3: both protocols + protocol Default MIDI protocol (either 1 or 2) + ep_name UMP Endpoint name string + product_id Product ID string + manufacturer Manufacture ID number (24 bit) + family Device family ID number (16 bit) + model Device model ID number (16 bit) + sw_revision Software revision (32 bit) + ============= ================================================= + +Each Endpoint subdirectory contains a subdirectory "block.0", which +represents the Function Block for Block 0 information. +Its attributes are: + + ================= =============================================== + name Function Block name string + direction Direction of this FB + 1: input, 2: output, or 3: bidirectional + first_group The first UMP Group number (0-15) + num_groups The number of groups in this FB (1-16) + midi1_first_group The first UMP Group number for MIDI 1.0 (0-15) + midi1_num_groups The number of groups for MIDI 1.0 (0-16) + ui_hint UI-hint of this FB + 0: unknown, 1: receiver, 2: sender, 3: both + midi_ci_verison Supported MIDI-CI version number (8 bit) + is_midi1 Legacy MIDI 1.0 device (0-2) + 0: MIDI 2.0 device, + 1: MIDI 1.0 without restriction, or + 2: MIDI 1.0 with low speed + sysex8_streams Max number of SysEx8 streams (8 bit) + active Bool flag for FB activity (0 or 1) + ================= =============================================== + +If multiple Function Blocks are required, you can add more Function +Blocks by creating subdirectories "block.<num>" with the corresponding +Function Block number (1, 2, ....). The FB subdirectories can be +dynamically removed, too. Note that the Function Block numbers must be +continuous. + +Similarly, if you multiple UMP Endpoints are required, you can add +more Endpoints by creating subdirectories "ep.<num>". The number must +be continuous. + +For emulating the old MIDI 2.0 device without UMP v1.1 support, pass 0 +to `process_ump` flag. Then the whole UMP v1.1 requests are ignored. + +Testing the MIDI2 function +-------------------------- + +On the device: run the gadget, and running:: + + $ cat /proc/asound/cards + +will show a new sound card containing a MIDI2 device. + +OTOH, on the host:: + + $ cat /proc/asound/cards + +will show a new sound card containing either MIDI1 or MIDI2 device, +depending on the USB audio driver configuration. + +On both, when ALSA sequencer is enabled on the host, you can find the +UMP MIDI client such as "MIDI 2.0 Gadget". + +As the driver simply loops back the data, there is no need for a real +device just for testing. + +For testing a MIDI input from the gadget to the host (e.g. emulating a +MIDI keyboard), you can send a MIDI stream like the following. + +On the gadget:: + + $ aconnect -o + .... + client 20: 'MIDI 2.0 Gadget' [type=kernel,card=1] + 0 'MIDI 2.0 ' + 1 'Group 1 (MIDI 2.0 Gadget I/O)' + $ aplaymidi -p 20:1 to_host.mid + +On the host:: + + $ aconnect -i + .... + client 24: 'MIDI 2.0 Gadget' [type=kernel,card=2] + 0 'MIDI 2.0 ' + 1 'Group 1 (MIDI 2.0 Gadget I/O)' + $ arecordmidi -p 24:1 from_gadget.mid + +If you have a UMP-capable application, you can use the UMP port to +send/receive the raw UMP packets, too. For example, aseqdump program +with UMP support can receive from UMP port. On the host:: + + $ aseqdump -u 2 -p 24:1 + Waiting for data. Press Ctrl+C to end. + Source Group Event Ch Data + 24:1 Group 0, Program change 0, program 0, Bank select 0:0 + 24:1 Group 0, Channel pressure 0, value 0x80000000 + +For testing a MIDI output to the gadget to the host (e.g. emulating a +MIDI synth), it'll be just other way round. + +On the gadget:: + + $ arecordmidi -p 20:1 from_host.mid + +On the host:: + + $ aplaymidi -p 24:1 to_gadget.mid + +The access to MIDI 1.0 on altset 0 on the host is supported, and it's +translated from/to UMP packets on the gadget. It's bound to only +Function Block 0. + +The current operation mode can be observed in ALSA control element +"Operation Mode" for SND_CTL_IFACE_RAWMIDI. For example:: + + $ amixer -c1 contents + numid=1,iface=RAWMIDI,name='Operation Mode' + ; type=INTEGER,access=r--v----,values=1,min=0,max=2,step=0 + : values=2 + +where 0 = unused, 1 = MIDI 1.0 (altset 0), 2 = MIDI 2.0 (altset 1). +The example above shows it's running in 2, i.e. MIDI 2.0. diff --git a/MAINTAINERS b/MAINTAINERS index 53b7ca804465..0903d87b17cb 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2339,7 +2339,7 @@ F: drivers/phy/mediatek/ ARM/MICROCHIP (ARM64) SoC support M: Conor Dooley <[email protected]> M: Nicolas Ferre <[email protected]> -M: Claudiu Beznea <[email protected]> +M: Claudiu Beznea <[email protected]> L: [email protected] (moderated for non-subscribers) S: Supported T: git https://git.kernel.org/pub/scm/linux/kernel/git/at91/linux.git @@ -2348,7 +2348,7 @@ F: arch/arm64/boot/dts/microchip/ ARM/Microchip (AT91) SoC support M: Nicolas Ferre <[email protected]> M: Alexandre Belloni <[email protected]> -M: Claudiu Beznea <[email protected]> +M: Claudiu Beznea <[email protected]> L: [email protected] (moderated for non-subscribers) S: Supported W: http://www.linux4sam.org @@ -3250,7 +3250,7 @@ F: include/uapi/linux/atm* ATMEL MACB ETHERNET DRIVER M: Nicolas Ferre <[email protected]> -M: Claudiu Beznea <[email protected]> +M: Claudiu Beznea <[email protected]> S: Supported F: drivers/net/ethernet/cadence/ @@ -3262,9 +3262,8 @@ F: Documentation/devicetree/bindings/input/atmel,maxtouch.yaml F: drivers/input/touchscreen/atmel_mxt_ts.c ATMEL WIRELESS DRIVER -M: Simon Kelley <[email protected]> -S: Maintained +S: Orphan W: http://www.thekelleys.org.uk/atmel W: http://atmelwlandriver.sourceforge.net/ F: drivers/net/wireless/atmel/atmel* @@ -3394,7 +3393,7 @@ F: drivers/media/radio/radio-aztech* B43 WIRELESS DRIVER -S: Odd Fixes +S: Orphan W: https://wireless.wiki.kernel.org/en/users/Drivers/b43 F: drivers/net/wireless/broadcom/b43/ @@ -5462,8 +5461,7 @@ F: Documentation/devicetree/bindings/net/can/ctu,ctucanfd.yaml F: drivers/net/can/ctucanfd/ CW1200 WLAN driver -M: Solomon Peachy <[email protected]> -S: Maintained +S: Orphan F: drivers/net/wireless/st/cw1200/ CX18 VIDEO4LINUX DRIVER @@ -9377,7 +9375,6 @@ F: drivers/crypto/hisilicon/sgl.c F: include/linux/hisi_acc_qm.h HISILICON ROCE DRIVER -M: Haoyue Xu <[email protected]> M: Junxian Huang <[email protected]> S: Maintained @@ -9662,6 +9659,7 @@ F: tools/hv/ HYPERBUS SUPPORT M: Vignesh Raghavendra <[email protected]> +R: Tudor Ambarus <[email protected]> S: Supported Q: http://patchwork.ozlabs.org/project/linux-mtd/list/ @@ -12481,6 +12479,7 @@ F: net/mctp/ MAPLE TREE M: Liam R. Howlett <[email protected]> S: Supported F: Documentation/core-api/maple_tree.rst @@ -12592,18 +12591,14 @@ F: Documentation/devicetree/bindings/net/marvell,pp2.yaml F: drivers/net/ethernet/marvell/mvpp2/ MARVELL MWIFIEX WIRELESS DRIVER -M: Amitkumar Karwar <[email protected]> -M: Ganapathi Bhat <[email protected]> -M: Sharvari Harisangam <[email protected]> -M: Xinming Hu <[email protected]> +M: Brian Norris <[email protected]> -S: Maintained +S: Odd Fixes F: drivers/net/wireless/marvell/mwifiex/ MARVELL MWL8K WIRELESS DRIVER -M: Lennert Buytenhek <[email protected]> -S: Odd Fixes +S: Orphan F: drivers/net/wireless/marvell/mwl8k.c MARVELL NAND CONTROLLER DRIVER @@ -13791,7 +13786,7 @@ F: Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml F: drivers/spi/spi-at91-usart.c MICROCHIP AUDIO ASOC DRIVERS -M: Claudiu Beznea <[email protected]> +M: Claudiu Beznea <[email protected]> L: [email protected] (moderated for non-subscribers) S: Supported F: Documentation/devicetree/bindings/sound/atmel* @@ -13814,7 +13809,7 @@ S: Maintained F: drivers/crypto/atmel-ecc.* MICROCHIP EIC DRIVER -M: Claudiu Beznea <[email protected]> +M: Claudiu Beznea <[email protected]> L: [email protected] (moderated for non-subscribers) S: Supported F: Documentation/devicetree/bindings/interrupt-controller/microchip,sama7g5-eic.yaml @@ -13887,7 +13882,7 @@ F: drivers/video/fbdev/atmel_lcdfb.c F: include/video/atmel_lcdc.h MICROCHIP MCP16502 PMIC DRIVER -M: Claudiu Beznea <[email protected]> +M: Claudiu Beznea <[email protected]> L: [email protected] (moderated for non-subscribers) S: Supported F: Documentation/devicetree/bindings/regulator/mcp16502-regulator.txt @@ -13914,7 +13909,7 @@ F: Documentation/devicetree/bindings/mtd/atmel-nand.txt F: drivers/mtd/nand/raw/atmel/* MICROCHIP OTPC DRIVER -M: Claudiu Beznea <[email protected]> +M: Claudiu Beznea <[email protected]> L: [email protected] (moderated for non-subscribers) S: Supported F: Documentation/devicetree/bindings/nvmem/microchip,sama7g5-otpc.yaml @@ -13953,7 +13948,7 @@ F: Documentation/devicetree/bindings/fpga/microchip,mpf-spi-fpga-mgr.yaml F: drivers/fpga/microchip-spi.c MICROCHIP PWM DRIVER -M: Claudiu Beznea <[email protected]> +M: Claudiu Beznea <[email protected]> L: [email protected] (moderated for non-subscribers) S: Supported @@ -13969,7 +13964,7 @@ F: drivers/iio/adc/at91-sama5d2_adc.c F: include/dt-bindings/iio/adc/at91-sama5d2_adc.h MICROCHIP SAMA5D2-COMPATIBLE SHUTDOWN CONTROLLER -M: Claudiu Beznea <[email protected]> +M: Claudiu Beznea <[email protected]> S: Supported F: Documentation/devicetree/bindings/power/reset/atmel,sama5d2-shdwc.yaml F: drivers/power/reset/at91-sama5d2_shdwc.c @@ -13986,7 +13981,7 @@ S: Supported F: drivers/spi/spi-atmel.* MICROCHIP SSC DRIVER -M: Claudiu Beznea <[email protected]> +M: Claudiu Beznea <[email protected]> L: [email protected] (moderated for non-subscribers) S: Supported F: Documentation/devicetree/bindings/misc/atmel-ssc.txt @@ -14015,7 +14010,7 @@ F: drivers/usb/gadget/udc/atmel_usba_udc.* MICROCHIP WILC1000 WIFI DRIVER M: Ajay Singh <[email protected]> -M: Claudiu Beznea <[email protected]> +M: Claudiu Beznea <[email protected]> S: Supported F: drivers/net/wireless/microchip/wilc1000/ @@ -16298,6 +16293,7 @@ F: drivers/pci/controller/dwc/pci-exynos.c PCI DRIVER FOR SYNOPSYS DESIGNWARE M: Jingoo Han <[email protected]> M: Gustavo Pimentel <[email protected]> +M: Manivannan Sadhasivam <[email protected]> S: Maintained F: Documentation/devicetree/bindings/pci/snps,dw-pcie-ep.yaml @@ -17449,6 +17445,7 @@ F: drivers/media/tuners/qt1010* QUALCOMM ATH12K WIRELESS DRIVER M: Kalle Valo <[email protected]> +M: Jeff Johnson <[email protected]> S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git @@ -17456,6 +17453,7 @@ F: drivers/net/wireless/ath/ath12k/ QUALCOMM ATHEROS ATH10K WIRELESS DRIVER M: Kalle Valo <[email protected]> +M: Jeff Johnson <[email protected]> S: Supported W: https://wireless.wiki.kernel.org/en/users/Drivers/ath10k @@ -17465,6 +17463,7 @@ F: drivers/net/wireless/ath/ath10k/ QUALCOMM ATHEROS ATH11K WIRELESS DRIVER M: Kalle Valo <[email protected]> +M: Jeff Johnson <[email protected]> S: Supported W: https://wireless.wiki.kernel.org/en/users/Drivers/ath11k @@ -17985,7 +17984,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.g F: drivers/net/wireless/realtek/rtlwifi/ REALTEK WIRELESS DRIVER (rtw88) -M: Yan-Hsuan Chuang <[email protected]> +M: Ping-Ke Shih <[email protected]> S: Maintained F: drivers/net/wireless/realtek/rtw88/ @@ -18510,17 +18509,14 @@ RTL8180 WIRELESS DRIVER S: Orphan W: https://wireless.wiki.kernel.org/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git F: drivers/net/wireless/realtek/rtl818x/rtl8180/ RTL8187 WIRELESS DRIVER -M: Herton Ronaldo Krzesinski <[email protected]> -M: Hin-Tak Leung <[email protected]> +M: Hin-Tak Leung <[email protected]> M: Larry Finger <[email protected]> S: Maintained W: https://wireless.wiki.kernel.org/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git F: drivers/net/wireless/realtek/rtl818x/rtl8187/ RTL8XXXU WIRELESS DRIVER (rtl8xxxu) @@ -20404,7 +20400,6 @@ F: drivers/pwm/pwm-stm32* F: include/linux/*/stm32-*tim* STMMAC ETHERNET DRIVER -M: Giuseppe Cavallaro <[email protected]> M: Alexandre Torgue <[email protected]> M: Jose Abreu <[email protected]> @@ -21681,11 +21676,14 @@ S: Orphan F: drivers/net/ethernet/dec/tulip/ TUN/TAP driver -M: Maxim Krasnyansky <[email protected]> +M: Willem de Bruijn <[email protected]> +M: Jason Wang <[email protected]> S: Maintained W: http://vtun.sourceforge.net/tun F: Documentation/networking/tuntap.rst F: arch/um/os-Linux/drivers/ +F: drivers/net/tap.c +F: drivers/net/tun.c TURBOCHANNEL SUBSYSTEM M: "Maciej W. Rozycki" <[email protected]> @@ -21908,9 +21906,8 @@ S: Maintained F: drivers/usb/misc/apple-mfi-fastcharge.c USB AR5523 WIRELESS DRIVER -M: Pontus Fuchs <[email protected]> -S: Maintained +S: Orphan F: drivers/net/wireless/ath/ar5523/ USB ATTACHED SCSI @@ -22187,9 +22184,8 @@ F: drivers/usb/gadget/legacy/webcam.c F: include/uapi/linux/usb/g_uvc.h USB WIRELESS RNDIS DRIVER (rndis_wlan) -M: Jussi Kivilinna <[email protected]> -S: Maintained +S: Orphan F: drivers/net/wireless/legacy/rndis_wlan.c USB XHCI DRIVER @@ -22964,7 +22960,7 @@ F: drivers/input/misc/wistron_btns.c WL3501 WIRELESS PCMCIA CARD DRIVER -S: Odd fixes +S: Orphan F: drivers/net/wireless/legacy/wl3501* WMI BINARY MOF DRIVER @@ -23535,11 +23531,8 @@ S: Maintained F: mm/zbud.c ZD1211RW WIRELESS DRIVER -M: Ulrich Kunitz <[email protected]> -L: [email protected] (subscribers-only) -S: Maintained -W: http://zd1211.ath.cx/wiki/DriverRewrite +S: Orphan F: drivers/net/wireless/zydas/zd1211rw/ ZD1301 MEDIA DRIVER @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 5 SUBLEVEL = 0 -EXTRAVERSION = -rc4 +EXTRAVERSION = -rc6 NAME = Hurr durr I'ma ninja sloth # *DOCUMENTATION* diff --git a/arch/alpha/include/asm/processor.h b/arch/alpha/include/asm/processor.h index 714abe494e5f..55bb1c09fd39 100644 --- a/arch/alpha/include/asm/processor.h +++ b/arch/alpha/include/asm/processor.h @@ -47,12 +47,6 @@ unsigned long __get_wchan(struct task_struct *p); #define ARCH_HAS_PREFETCH #define ARCH_HAS_PREFETCHW -#define ARCH_HAS_SPINLOCK_PREFETCH - -#ifndef CONFIG_SMP -/* Nothing to prefetch. */ -#define spin_lock_prefetch(lock) do { } while (0) -#endif extern inline void prefetch(const void *ptr) { @@ -64,11 +58,4 @@ extern inline void prefetchw(const void *ptr) __builtin_prefetch(ptr, 1, 3); } -#ifdef CONFIG_SMP -extern inline void spin_lock_prefetch(const void *ptr) -{ - __builtin_prefetch(ptr, 1, 3); -} -#endif - #endif /* __ASM_ALPHA_PROCESSOR_H */ diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c index b650ff1cb022..3d7473531ab1 100644 --- a/arch/alpha/kernel/setup.c +++ b/arch/alpha/kernel/setup.c @@ -385,8 +385,7 @@ setup_memory(void *kernel_end) #endif /* CONFIG_BLK_DEV_INITRD */ } -int __init -page_is_ram(unsigned long pfn) +int page_is_ram(unsigned long pfn) { struct memclust_struct * cluster; struct memdesc_struct * memdesc; diff --git a/arch/arm/boot/dts/microchip/sam9x60.dtsi b/arch/arm/boot/dts/microchip/sam9x60.dtsi index 8b53997675e7..73d570a17269 100644 --- a/arch/arm/boot/dts/microchip/sam9x60.dtsi +++ b/arch/arm/boot/dts/microchip/sam9x60.dtsi @@ -172,7 +172,7 @@ status = "disabled"; uart4: serial@200 { - compatible = "microchip,sam9x60-dbgu", "microchip,sam9x60-usart", "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart"; + compatible = "microchip,sam9x60-usart", "atmel,at91sam9260-usart"; reg = <0x200 0x200>; interrupts = <13 IRQ_TYPE_LEVEL_HIGH 7>; dmas = <&dma0 @@ -240,7 +240,7 @@ status = "disabled"; uart5: serial@200 { - compatible = "microchip,sam9x60-dbgu", "microchip,sam9x60-usart", "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart"; + compatible = "microchip,sam9x60-usart", "atmel,at91sam9260-usart"; reg = <0x200 0x200>; atmel,usart-mode = <AT91_USART_MODE_SERIAL>; interrupts = <14 IRQ_TYPE_LEVEL_HIGH 7>; @@ -370,7 +370,7 @@ status = "disabled"; uart11: serial@200 { - compatible = "microchip,sam9x60-dbgu", "microchip,sam9x60-usart", "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart"; + compatible = "microchip,sam9x60-usart", "atmel,at91sam9260-usart"; reg = <0x200 0x200>; interrupts = <32 IRQ_TYPE_LEVEL_HIGH 7>; dmas = <&dma0 @@ -419,7 +419,7 @@ status = "disabled"; uart12: serial@200 { - compatible = "microchip,sam9x60-dbgu", "microchip,sam9x60-usart", "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart"; + compatible = "microchip,sam9x60-usart", "atmel,at91sam9260-usart"; reg = <0x200 0x200>; interrupts = <33 IRQ_TYPE_LEVEL_HIGH 7>; dmas = <&dma0 @@ -576,7 +576,7 @@ status = "disabled"; uart6: serial@200 { - compatible = "microchip,sam9x60-dbgu", "microchip,sam9x60-usart", "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart"; + compatible = "microchip,sam9x60-usart", "atmel,at91sam9260-usart"; reg = <0x200 0x200>; interrupts = <9 IRQ_TYPE_LEVEL_HIGH 7>; dmas = <&dma0 @@ -625,7 +625,7 @@ status = "disabled"; uart7: serial@200 { - compatible = "microchip,sam9x60-dbgu", "microchip,sam9x60-usart", "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart"; + compatible = "microchip,sam9x60-usart", "atmel,at91sam9260-usart"; reg = <0x200 0x200>; interrupts = <10 IRQ_TYPE_LEVEL_HIGH 7>; dmas = <&dma0 @@ -674,7 +674,7 @@ status = "disabled"; uart8: serial@200 { - compatible = "microchip,sam9x60-dbgu", "microchip,sam9x60-usart", "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart"; + compatible = "microchip,sam9x60-usart", "atmel,at91sam9260-usart"; reg = <0x200 0x200>; interrupts = <11 IRQ_TYPE_LEVEL_HIGH 7>; dmas = <&dma0 @@ -723,7 +723,7 @@ status = "disabled"; uart0: serial@200 { - compatible = "microchip,sam9x60-dbgu", "microchip,sam9x60-usart", "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart"; + compatible = "microchip,sam9x60-usart", "atmel,at91sam9260-usart"; reg = <0x200 0x200>; interrupts = <5 IRQ_TYPE_LEVEL_HIGH 7>; dmas = <&dma0 @@ -791,7 +791,7 @@ status = "disabled"; uart1: serial@200 { - compatible = "microchip,sam9x60-dbgu", "microchip,sam9x60-usart", "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart"; + compatible = "microchip,sam9x60-usart", "atmel,at91sam9260-usart"; reg = <0x200 0x200>; interrupts = <6 IRQ_TYPE_LEVEL_HIGH 7>; dmas = <&dma0 @@ -859,7 +859,7 @@ status = "disabled"; uart2: serial@200 { - compatible = "microchip,sam9x60-dbgu", "microchip,sam9x60-usart", "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart"; + compatible = "microchip,sam9x60-usart", "atmel,at91sam9260-usart"; reg = <0x200 0x200>; interrupts = <7 IRQ_TYPE_LEVEL_HIGH 7>; dmas = <&dma0 @@ -927,7 +927,7 @@ status = "disabled"; uart3: serial@200 { - compatible = "microchip,sam9x60-dbgu", "microchip,sam9x60-usart", "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart"; + compatible = "microchip,sam9x60-usart", "atmel,at91sam9260-usart"; reg = <0x200 0x200>; interrupts = <8 IRQ_TYPE_LEVEL_HIGH 7>; dmas = <&dma0 @@ -1050,7 +1050,7 @@ status = "disabled"; uart9: serial@200 { - compatible = "microchip,sam9x60-dbgu", "microchip,sam9x60-usart", "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart"; + compatible = "microchip,sam9x60-usart", "atmel,at91sam9260-usart"; reg = <0x200 0x200>; interrupts = <15 IRQ_TYPE_LEVEL_HIGH 7>; dmas = <&dma0 @@ -1099,7 +1099,7 @@ status = "disabled"; uart10: serial@200 { - compatible = "microchip,sam9x60-dbgu", "microchip,sam9x60-usart", "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart"; + compatible = "microchip,sam9x60-usart", "atmel,at91sam9260-usart"; reg = <0x200 0x200>; interrupts = <16 IRQ_TYPE_LEVEL_HIGH 7>; dmas = <&dma0 diff --git a/arch/arm/boot/dts/nspire/nspire.dtsi b/arch/arm/boot/dts/nspire/nspire.dtsi index bb240e6a3a6f..088bcc38589f 100644 --- a/arch/arm/boot/dts/nspire/nspire.dtsi +++ b/arch/arm/boot/dts/nspire/nspire.dtsi @@ -161,7 +161,7 @@ }; watchdog: watchdog@90060000 { - compatible = "arm,amba-primecell"; + compatible = "arm,primecell"; reg = <0x90060000 0x1000>; interrupts = <3>; }; diff --git a/arch/arm/boot/dts/nxp/imx/imx53-sk-imx53.dts b/arch/arm/boot/dts/nxp/imx/imx53-sk-imx53.dts index 103e73176e47..1a00d290092a 100644 --- a/arch/arm/boot/dts/nxp/imx/imx53-sk-imx53.dts +++ b/arch/arm/boot/dts/nxp/imx/imx53-sk-imx53.dts @@ -60,6 +60,16 @@ status = "okay"; }; +&cpu0 { + /* CPU rated to 800 MHz, not the default 1.2GHz. */ + operating-points = < + /* kHz uV */ + 166666 850000 + 400000 900000 + 800000 1050000 + >; +}; + &ecspi1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_ecspi1>; diff --git a/arch/arm/boot/dts/nxp/imx/imx6sll.dtsi b/arch/arm/boot/dts/nxp/imx/imx6sll.dtsi index 2873369a57c0..3659fd5ecfa6 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6sll.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6sll.dtsi @@ -552,7 +552,7 @@ reg = <0x020ca000 0x1000>; interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clks IMX6SLL_CLK_USBPHY2>; - phy-reg_3p0-supply = <®_3p0>; + phy-3p0-supply = <®_3p0>; fsl,anatop = <&anatop>; }; diff --git a/arch/arm/mach-pxa/sharpsl_pm.h b/arch/arm/mach-pxa/sharpsl_pm.h index 20e4cab64d85..623167f30ec2 100644 --- a/arch/arm/mach-pxa/sharpsl_pm.h +++ b/arch/arm/mach-pxa/sharpsl_pm.h @@ -105,5 +105,4 @@ void sharpsl_pm_led(int val); #define MAX1111_ACIN_VOLT 6u int sharpsl_pm_pxa_read_max1111(int channel); -void corgi_lcd_limit_intensity(int limit); #endif diff --git a/arch/arm/mach-pxa/spitz_pm.c b/arch/arm/mach-pxa/spitz_pm.c index 1c021cef965f..8bc4ea51a0c1 100644 --- a/arch/arm/mach-pxa/spitz_pm.c +++ b/arch/arm/mach-pxa/spitz_pm.c @@ -15,6 +15,7 @@ #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/apm-emulation.h> +#include <linux/spi/corgi_lcd.h> #include <asm/irq.h> #include <asm/mach-types.h> diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts index 38ae674f2f02..3037f58057c9 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts @@ -145,7 +145,7 @@ status = "okay"; clock-frequency = <100000>; i2c-sda-falling-time-ns = <890>; /* hcnt */ - i2c-sdl-falling-time-ns = <890>; /* lcnt */ + i2c-scl-falling-time-ns = <890>; /* lcnt */ pinctrl-names = "default", "gpio"; pinctrl-0 = <&i2c1_pmx_func>; diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts index ede99dcc0558..f4cf30bac557 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts @@ -141,7 +141,7 @@ status = "okay"; clock-frequency = <100000>; i2c-sda-falling-time-ns = <890>; /* hcnt */ - i2c-sdl-falling-time-ns = <890>; /* lcnt */ + i2c-scl-falling-time-ns = <890>; /* lcnt */ adc@14 { compatible = "lltc,ltc2497"; diff --git a/arch/arm64/boot/dts/arm/vexpress-v2m-rs1.dtsi b/arch/arm64/boot/dts/arm/vexpress-v2m-rs1.dtsi deleted file mode 120000 index 68fd0f8f1dee..000000000000 --- a/arch/arm64/boot/dts/arm/vexpress-v2m-rs1.dtsi +++ /dev/null @@ -1 +0,0 @@ -../../../../arm/boot/dts/vexpress-v2m-rs1.dtsi
\ No newline at end of file diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-rdk.dts b/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-rdk.dts index 03e7679217b2..479948f8a4b7 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-rdk.dts +++ b/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-rdk.dts @@ -141,7 +141,7 @@ }; &gpio1 { - gpio-line-names = "nINT_ETHPHY", "LED_RED", "WDOG_INT", "X_RTC_INT", + gpio-line-names = "", "LED_RED", "WDOG_INT", "X_RTC_INT", "", "", "", "RESET_ETHPHY", "CAN_nINT", "CAN_EN", "nENABLE_FLATLINK", "", "USB_OTG_VBUS_EN", "", "LED_GREEN", "LED_BLUE"; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phycore-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-phycore-som.dtsi index 92616bc4f71f..847f08537b48 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-phycore-som.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm-phycore-som.dtsi @@ -111,7 +111,7 @@ }; &gpio1 { - gpio-line-names = "nINT_ETHPHY", "", "WDOG_INT", "X_RTC_INT", + gpio-line-names = "", "", "WDOG_INT", "X_RTC_INT", "", "", "", "RESET_ETHPHY", "", "", "nENABLE_FLATLINK"; }; @@ -210,7 +210,7 @@ }; }; - reg_vdd_gpu: buck3 { + reg_vdd_vpu: buck3 { regulator-always-on; regulator-boot-on; regulator-max-microvolt = <1000000>; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts index 6f26914602c8..07b07dc954fd 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts +++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts @@ -567,6 +567,10 @@ status = "okay"; }; +&disp_blk_ctrl { + status = "disabled"; +}; + &pgc_mipi { status = "disabled"; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7904.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7904.dts index 93088fa1c3b9..d5b716855812 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7904.dts +++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7904.dts @@ -628,6 +628,10 @@ status = "okay"; }; +&disp_blk_ctrl { + status = "disabled"; +}; + &pgc_mipi { status = "disabled"; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi index d3a67109d55b..b8946edf317b 100644 --- a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi @@ -358,7 +358,7 @@ MX8MN_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x91 MX8MN_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x91 MX8MN_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x1f - MX8MN_IOMUXC_GPIO1_IO09_GPIO1_IO9 0x19 + MX8MN_IOMUXC_GPIO1_IO09_GPIO1_IO9 0x159 >; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi index 1a2d2c04db32..01eec424f7f7 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi @@ -772,7 +772,7 @@ <&clk IMX8MQ_SYS1_PLL_800M>, <&clk IMX8MQ_VPU_PLL>; assigned-clock-rates = <600000000>, - <600000000>, + <300000000>, <800000000>, <0>; }; diff --git a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi index 232910e07444..66f68fc2b241 100644 --- a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi +++ b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi @@ -223,20 +223,20 @@ <GIC_SPI 212 IRQ_TYPE_EDGE_RISING>, <GIC_SPI 213 IRQ_TYPE_EDGE_RISING>; interrupt-names = "tgia0", "tgib0", "tgic0", "tgid0", - "tgiv0", "tgie0", "tgif0", - "tgia1", "tgib1", "tgiv1", "tgiu1", - "tgia2", "tgib2", "tgiv2", "tgiu2", + "tciv0", "tgie0", "tgif0", + "tgia1", "tgib1", "tciv1", "tciu1", + "tgia2", "tgib2", "tciv2", "tciu2", "tgia3", "tgib3", "tgic3", "tgid3", - "tgiv3", + "tciv3", "tgia4", "tgib4", "tgic4", "tgid4", - "tgiv4", + "tciv4", "tgiu5", "tgiv5", "tgiw5", "tgia6", "tgib6", "tgic6", "tgid6", - "tgiv6", + "tciv6", "tgia7", "tgib7", "tgic7", "tgid7", - "tgiv7", + "tciv7", "tgia8", "tgib8", "tgic8", "tgid8", - "tgiv8", "tgiu8"; + "tciv8", "tciu8"; clocks = <&cpg CPG_MOD R9A07G044_MTU_X_MCK_MTU3>; power-domains = <&cpg>; resets = <&cpg R9A07G044_MTU_X_PRESET_MTU3>; diff --git a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi index 2eba3a8a100d..1f1d481dc783 100644 --- a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi +++ b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi @@ -223,20 +223,20 @@ <GIC_SPI 212 IRQ_TYPE_EDGE_RISING>, <GIC_SPI 213 IRQ_TYPE_EDGE_RISING>; interrupt-names = "tgia0", "tgib0", "tgic0", "tgid0", - "tgiv0", "tgie0", "tgif0", - "tgia1", "tgib1", "tgiv1", "tgiu1", - "tgia2", "tgib2", "tgiv2", "tgiu2", + "tciv0", "tgie0", "tgif0", + "tgia1", "tgib1", "tciv1", "tciu1", + "tgia2", "tgib2", "tciv2", "tciu2", "tgia3", "tgib3", "tgic3", "tgid3", - "tgiv3", + "tciv3", "tgia4", "tgib4", "tgic4", "tgid4", - "tgiv4", + "tciv4", "tgiu5", "tgiv5", "tgiw5", "tgia6", "tgib6", "tgic6", "tgid6", - "tgiv6", + "tciv6", "tgia7", "tgib7", "tgic7", "tgid7", - "tgiv7", + "tciv7", "tgia8", "tgib8", "tgic8", "tgid8", - "tgiv8", "tgiu8"; + "tciv8", "tciu8"; clocks = <&cpg CPG_MOD R9A07G054_MTU_X_MCK_MTU3>; power-domains = <&cpg>; resets = <&cpg R9A07G054_MTU_X_PRESET_MTU3>; diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 8e5ffb58f83e..b7afaa026842 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -31,6 +31,13 @@ .Lskip_hcrx_\@: .endm +/* Check if running in host at EL2 mode, i.e., (h)VHE. Jump to fail if not. */ +.macro __check_hvhe fail, tmp + mrs \tmp, hcr_el2 + and \tmp, \tmp, #HCR_E2H + cbz \tmp, \fail +.endm + /* * Allow Non-secure EL1 and EL0 to access physical timer and counter. * This is not necessary for VHE, since the host kernel runs in EL2, @@ -43,9 +50,7 @@ */ .macro __init_el2_timers mov x0, #3 // Enable EL1 physical timers - mrs x1, hcr_el2 - and x1, x1, #HCR_E2H - cbz x1, .LnVHE_\@ + __check_hvhe .LnVHE_\@, x1 lsl x0, x0, #10 .LnVHE_\@: msr cnthctl_el2, x0 @@ -139,15 +144,14 @@ /* Coprocessor traps */ .macro __init_el2_cptr - mrs x1, hcr_el2 - and x1, x1, #HCR_E2H - cbz x1, .LnVHE_\@ + __check_hvhe .LnVHE_\@, x1 mov x0, #(CPACR_EL1_FPEN_EL1EN | CPACR_EL1_FPEN_EL0EN) - b .Lset_cptr_\@ + msr cpacr_el1, x0 + b .Lskip_set_cptr_\@ .LnVHE_\@: mov x0, #0x33ff -.Lset_cptr_\@: msr cptr_el2, x0 // Disable copro. traps to EL2 +.Lskip_set_cptr_\@: .endm /* Disable any fine grained traps */ @@ -268,19 +272,19 @@ check_override id_aa64pfr0, ID_AA64PFR0_EL1_SVE_SHIFT, .Linit_sve_\@, .Lskip_sve_\@, x1, x2 .Linit_sve_\@: /* SVE register access */ - mrs x0, cptr_el2 // Disable SVE traps - mrs x1, hcr_el2 - and x1, x1, #HCR_E2H - cbz x1, .Lcptr_nvhe_\@ + __check_hvhe .Lcptr_nvhe_\@, x1 - // VHE case + // (h)VHE case + mrs x0, cpacr_el1 // Disable SVE traps orr x0, x0, #(CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN) - b .Lset_cptr_\@ + msr cpacr_el1, x0 + b .Lskip_set_cptr_\@ .Lcptr_nvhe_\@: // nVHE case + mrs x0, cptr_el2 // Disable SVE traps bic x0, x0, #CPTR_EL2_TZ -.Lset_cptr_\@: msr cptr_el2, x0 +.Lskip_set_cptr_\@: isb mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector msr_s SYS_ZCR_EL2, x1 // length for EL1. @@ -289,9 +293,19 @@ check_override id_aa64pfr1, ID_AA64PFR1_EL1_SME_SHIFT, .Linit_sme_\@, .Lskip_sme_\@, x1, x2 .Linit_sme_\@: /* SME register access and priority mapping */ + __check_hvhe .Lcptr_nvhe_sme_\@, x1 + + // (h)VHE case + mrs x0, cpacr_el1 // Disable SME traps + orr x0, x0, #(CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN) + msr cpacr_el1, x0 + b .Lskip_set_cptr_sme_\@ + +.Lcptr_nvhe_sme_\@: // nVHE case mrs x0, cptr_el2 // Disable SME traps bic x0, x0, #CPTR_EL2_TSM msr cptr_el2, x0 +.Lskip_set_cptr_sme_\@: isb mrs x1, sctlr_el2 diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 7d170aaa2db4..24e28bb2d95b 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -278,7 +278,7 @@ asmlinkage void __noreturn hyp_panic_bad_stack(void); asmlinkage void kvm_unexpected_el2_exception(void); struct kvm_cpu_context; void handle_trap(struct kvm_cpu_context *host_ctxt); -asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on); +asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on); void __noreturn __pkvm_init_finalise(void); void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc); void kvm_patch_vector_branch(struct alt_instr *alt, diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index efc0b45d79c3..3d6725ff0bf6 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -571,6 +571,14 @@ static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature) return test_bit(feature, vcpu->arch.features); } +static __always_inline void kvm_write_cptr_el2(u64 val) +{ + if (has_vhe() || has_hvhe()) + write_sysreg(val, cpacr_el1); + else + write_sysreg(val, cptr_el2); +} + static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu) { u64 val; @@ -578,8 +586,16 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu) if (has_vhe()) { val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN | CPACR_EL1_ZEN_EL1EN); + if (cpus_have_final_cap(ARM64_SME)) + val |= CPACR_EL1_SMEN_EL1EN; } else if (has_hvhe()) { val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN); + + if (!vcpu_has_sve(vcpu) || + (vcpu->arch.fp_state != FP_STATE_GUEST_OWNED)) + val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN; + if (cpus_have_final_cap(ARM64_SME)) + val |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN; } else { val = CPTR_NVHE_EL2_RES1; @@ -597,9 +613,6 @@ static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu) { u64 val = kvm_get_reset_cptr_el2(vcpu); - if (has_vhe() || has_hvhe()) - write_sysreg(val, cpacr_el1); - else - write_sysreg(val, cptr_el2); + kvm_write_cptr_el2(val); } #endif /* __ARM64_KVM_EMULATE_H__ */ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 3918f2a67970..e5bc54522e71 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -359,14 +359,6 @@ static inline void prefetchw(const void *ptr) asm volatile("prfm pstl1keep, %a0\n" : : "p" (ptr)); } -#define ARCH_HAS_SPINLOCK_PREFETCH -static inline void spin_lock_prefetch(const void *ptr) -{ - asm volatile(ARM64_LSE_ATOMIC_INSN( - "prfm pstl1strm, %a0", - "nop") : : "p" (ptr)); -} - extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */ extern void __init minsigstksz_setup(void); diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 520b681a07bb..75c37b1c55aa 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -679,7 +679,7 @@ static void fpsimd_to_sve(struct task_struct *task) void *sst = task->thread.sve_state; struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; - if (!system_supports_sve()) + if (!system_supports_sve() && !system_supports_sme()) return; vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread)); @@ -705,7 +705,7 @@ static void sve_to_fpsimd(struct task_struct *task) unsigned int i; __uint128_t const *p; - if (!system_supports_sve()) + if (!system_supports_sve() && !system_supports_sme()) return; vl = thread_get_cur_vl(&task->thread); @@ -835,7 +835,8 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task) void *sst = task->thread.sve_state; struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; - if (!test_tsk_thread_flag(task, TIF_SVE)) + if (!test_tsk_thread_flag(task, TIF_SVE) && + !thread_sm_enabled(&task->thread)) return; vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread)); @@ -909,7 +910,7 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type, */ task->thread.svcr &= ~(SVCR_SM_MASK | SVCR_ZA_MASK); - clear_thread_flag(TIF_SME); + clear_tsk_thread_flag(task, TIF_SME); free_sme = true; } } diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index d7f4f0d1ae12..5b9b4305248b 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -932,11 +932,13 @@ static int sve_set_common(struct task_struct *target, /* * Ensure target->thread.sve_state is up to date with target's * FPSIMD regs, so that a short copyin leaves trailing - * registers unmodified. Always enable SVE even if going into - * streaming mode. + * registers unmodified. Only enable SVE if we are + * configuring normal SVE, a system with streaming SVE may not + * have normal SVE. */ fpsimd_sync_to_sve(target); - set_tsk_thread_flag(target, TIF_SVE); + if (type == ARM64_VEC_SVE) + set_tsk_thread_flag(target, TIF_SVE); target->thread.fp_type = FP_STATE_SVE; BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); @@ -1180,6 +1182,8 @@ static int zt_set(struct task_struct *target, if (ret == 0) target->thread.svcr |= SVCR_ZA_MASK; + fpsimd_flush_task_state(target); + return ret; } diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 72dc53a75d1c..d1cb298a58a0 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -55,7 +55,7 @@ DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); static bool vgic_present, kvm_arm_initialised; -static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled); +static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized); DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use); bool is_kvm_arm_initialised(void) @@ -1864,18 +1864,24 @@ static void cpu_hyp_reinit(void) cpu_hyp_init_features(); } -static void _kvm_arch_hardware_enable(void *discard) +static void cpu_hyp_init(void *discard) { - if (!__this_cpu_read(kvm_arm_hardware_enabled)) { + if (!__this_cpu_read(kvm_hyp_initialized)) { cpu_hyp_reinit(); - __this_cpu_write(kvm_arm_hardware_enabled, 1); + __this_cpu_write(kvm_hyp_initialized, 1); } } -int kvm_arch_hardware_enable(void) +static void cpu_hyp_uninit(void *discard) { - int was_enabled; + if (__this_cpu_read(kvm_hyp_initialized)) { + cpu_hyp_reset(); + __this_cpu_write(kvm_hyp_initialized, 0); + } +} +int kvm_arch_hardware_enable(void) +{ /* * Most calls to this function are made with migration * disabled, but not with preemption disabled. The former is @@ -1884,36 +1890,23 @@ int kvm_arch_hardware_enable(void) */ preempt_disable(); - was_enabled = __this_cpu_read(kvm_arm_hardware_enabled); - _kvm_arch_hardware_enable(NULL); + cpu_hyp_init(NULL); - if (!was_enabled) { - kvm_vgic_cpu_up(); - kvm_timer_cpu_up(); - } + kvm_vgic_cpu_up(); + kvm_timer_cpu_up(); preempt_enable(); return 0; } -static void _kvm_arch_hardware_disable(void *discard) -{ - if (__this_cpu_read(kvm_arm_hardware_enabled)) { - cpu_hyp_reset(); - __this_cpu_write(kvm_arm_hardware_enabled, 0); - } -} - void kvm_arch_hardware_disable(void) { - if (__this_cpu_read(kvm_arm_hardware_enabled)) { - kvm_timer_cpu_down(); - kvm_vgic_cpu_down(); - } + kvm_timer_cpu_down(); + kvm_vgic_cpu_down(); if (!is_protected_kvm_enabled()) - _kvm_arch_hardware_disable(NULL); + cpu_hyp_uninit(NULL); } #ifdef CONFIG_CPU_PM @@ -1922,16 +1915,16 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self, void *v) { /* - * kvm_arm_hardware_enabled is left with its old value over + * kvm_hyp_initialized is left with its old value over * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should * re-enable hyp. */ switch (cmd) { case CPU_PM_ENTER: - if (__this_cpu_read(kvm_arm_hardware_enabled)) + if (__this_cpu_read(kvm_hyp_initialized)) /* - * don't update kvm_arm_hardware_enabled here - * so that the hardware will be re-enabled + * don't update kvm_hyp_initialized here + * so that the hyp will be re-enabled * when we resume. See below. */ cpu_hyp_reset(); @@ -1939,8 +1932,8 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self, return NOTIFY_OK; case CPU_PM_ENTER_FAILED: case CPU_PM_EXIT: - if (__this_cpu_read(kvm_arm_hardware_enabled)) - /* The hardware was enabled before suspend. */ + if (__this_cpu_read(kvm_hyp_initialized)) + /* The hyp was enabled before suspend. */ cpu_hyp_reinit(); return NOTIFY_OK; @@ -2021,7 +2014,7 @@ static int __init init_subsystems(void) /* * Enable hardware so that subsystem initialisation can access EL2. */ - on_each_cpu(_kvm_arch_hardware_enable, NULL, 1); + on_each_cpu(cpu_hyp_init, NULL, 1); /* * Register CPU lower-power notifier @@ -2059,7 +2052,7 @@ out: hyp_cpu_pm_exit(); if (err || !is_protected_kvm_enabled()) - on_each_cpu(_kvm_arch_hardware_disable, NULL, 1); + on_each_cpu(cpu_hyp_uninit, NULL, 1); return err; } @@ -2097,7 +2090,7 @@ static int __init do_pkvm_init(u32 hyp_va_bits) * The stub hypercalls are now disabled, so set our local flag to * prevent a later re-init attempt in kvm_arch_hardware_enable(). */ - __this_cpu_write(kvm_arm_hardware_enabled, 1); + __this_cpu_write(kvm_hyp_initialized, 1); preempt_enable(); return ret; diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 4bddb8541bec..34f222af6165 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -457,6 +457,7 @@ static bool handle_ampere1_tcr(struct kvm_vcpu *vcpu) */ val &= ~(TCR_HD | TCR_HA); write_sysreg_el1(val, SYS_TCR); + __kvm_skip_instr(vcpu); return true; } diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c index 58dcd92bf346..ab4f5d160c58 100644 --- a/arch/arm64/kvm/hyp/nvhe/ffa.c +++ b/arch/arm64/kvm/hyp/nvhe/ffa.c @@ -705,7 +705,20 @@ int hyp_ffa_init(void *pages) if (res.a0 == FFA_RET_NOT_SUPPORTED) return 0; - if (res.a0 != FFA_VERSION_1_0) + /* + * Firmware returns the maximum supported version of the FF-A + * implementation. Check that the returned version is + * backwards-compatible with the hyp according to the rules in DEN0077A + * v1.1 REL0 13.2.1. + * + * Of course, things are never simple when dealing with firmware. v1.1 + * broke ABI with v1.0 on several structures, which is itself + * incompatible with the aforementioned versioning scheme. The + * expectation is that v1.x implementations that do not support the v1.0 + * ABI return NOT_SUPPORTED rather than a version number, according to + * DEN0077A v1.1 REL0 18.6.4. + */ + if (FFA_MAJOR_VERSION(res.a0) != 1) return -EOPNOTSUPP; arm_smccc_1_1_smc(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0, &res); diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 0a6271052def..e89a23153e85 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -63,7 +63,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu) __activate_traps_fpsimd32(vcpu); } - write_sysreg(val, cptr_el2); + kvm_write_cptr_el2(val); write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2); if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index d1978e004054..47e3801b526a 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h @@ -634,7 +634,6 @@ ia64_imva (void *addr) #define ARCH_HAS_PREFETCH #define ARCH_HAS_PREFETCHW -#define ARCH_HAS_SPINLOCK_PREFETCH #define PREFETCH_STRIDE L1_CACHE_BYTES static inline void @@ -649,8 +648,6 @@ prefetchw (const void *x) ia64_lfetch_excl(ia64_lfhint_none, x); } -#define spin_lock_prefetch(x) prefetchw(x) - extern unsigned long boot_option_idle_override; enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT, diff --git a/arch/mips/cavium-octeon/Makefile b/arch/mips/cavium-octeon/Makefile index 7c02e542959a..2a5926578841 100644 --- a/arch/mips/cavium-octeon/Makefile +++ b/arch/mips/cavium-octeon/Makefile @@ -18,4 +18,3 @@ obj-y += crypto/ obj-$(CONFIG_MTD) += flash_setup.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_OCTEON_ILM) += oct_ilm.o -obj-$(CONFIG_USB) += octeon-usb.o diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c index ce05c0dd3acd..235c77ce7b18 100644 --- a/arch/mips/cavium-octeon/octeon-platform.c +++ b/arch/mips/cavium-octeon/octeon-platform.c @@ -450,7 +450,6 @@ static const struct of_device_id octeon_ids[] __initconst = { { .compatible = "cavium,octeon-3860-bootbus", }, { .compatible = "cavium,mdio-mux", }, { .compatible = "gpio-leds", }, - { .compatible = "cavium,octeon-7130-usb-uctl", }, {}, }; diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h index 9151dcd9d0d5..af9cea21c853 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h @@ -58,8 +58,6 @@ #define cpu_has_rixi (cpu_data[0].cputype != CPU_CAVIUM_OCTEON) -#define ARCH_HAS_SPINLOCK_PREFETCH 1 -#define spin_lock_prefetch(x) prefetch(x) #define PREFETCH_STRIDE 128 #ifdef __OCTEON__ diff --git a/arch/parisc/Kconfig.debug b/arch/parisc/Kconfig.debug index 1401e4c5fe5f..bf2b21b96f0b 100644 --- a/arch/parisc/Kconfig.debug +++ b/arch/parisc/Kconfig.debug @@ -2,7 +2,7 @@ # config LIGHTWEIGHT_SPINLOCK_CHECK bool "Enable lightweight spinlock checks" - depends on SMP && !DEBUG_SPINLOCK + depends on DEBUG_KERNEL && SMP && !DEBUG_SPINLOCK default y help Add checks with low performance impact to the spinlock functions diff --git a/arch/parisc/boot/compressed/misc.c b/arch/parisc/boot/compressed/misc.c index 7ee49f5881d1..d389359e22ac 100644 --- a/arch/parisc/boot/compressed/misc.c +++ b/arch/parisc/boot/compressed/misc.c @@ -117,7 +117,7 @@ char *strchr(const char *s, int c) return NULL; } -int puts(const char *s) +static int puts(const char *s) { const char *nuline = s; @@ -172,7 +172,7 @@ static int print_num(unsigned long num, int base) return 0; } -int printf(const char *fmt, ...) +static int printf(const char *fmt, ...) { va_list args; int i = 0; @@ -204,13 +204,13 @@ void abort(void) } #undef malloc -void *malloc(size_t size) +static void *malloc(size_t size) { return malloc_gzip(size); } #undef free -void free(void *ptr) +static void free(void *ptr) { return free_gzip(ptr); } @@ -278,7 +278,7 @@ static void parse_elf(void *output) free(phdrs); } -unsigned long decompress_kernel(unsigned int started_wide, +asmlinkage unsigned long __visible decompress_kernel(unsigned int started_wide, unsigned int command_line, const unsigned int rd_start, const unsigned int rd_end) diff --git a/arch/parisc/include/asm/dma.h b/arch/parisc/include/asm/dma.h index 9e8c101de902..582fb5d1a5d5 100644 --- a/arch/parisc/include/asm/dma.h +++ b/arch/parisc/include/asm/dma.h @@ -14,6 +14,8 @@ #define dma_outb outb #define dma_inb inb +extern unsigned long pcxl_dma_start; + /* ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up ** (or rather not merge) DMAs into manageable chunks. diff --git a/arch/parisc/include/asm/ftrace.h b/arch/parisc/include/asm/ftrace.h index a7cf0d05ccf4..f1cc1ee3a647 100644 --- a/arch/parisc/include/asm/ftrace.h +++ b/arch/parisc/include/asm/ftrace.h @@ -12,6 +12,10 @@ extern void mcount(void); extern unsigned long sys_call_table[]; extern unsigned long return_address(unsigned int); +struct ftrace_regs; +extern void ftrace_function_trampoline(unsigned long parent, + unsigned long self_addr, unsigned long org_sp_gr3, + struct ftrace_regs *fregs); #ifdef CONFIG_DYNAMIC_FTRACE extern void ftrace_caller(void); diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index edfcb9858bcb..0b326e52255e 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -7,8 +7,6 @@ #include <asm/processor.h> #include <asm/spinlock_types.h> -#define SPINLOCK_BREAK_INSN 0x0000c006 /* break 6,6 */ - static inline void arch_spin_val_check(int lock_val) { if (IS_ENABLED(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK)) diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h index d65934079ebd..efd06a897c6a 100644 --- a/arch/parisc/include/asm/spinlock_types.h +++ b/arch/parisc/include/asm/spinlock_types.h @@ -4,6 +4,10 @@ #define __ARCH_SPIN_LOCK_UNLOCKED_VAL 0x1a46 +#define SPINLOCK_BREAK_INSN 0x0000c006 /* break 6,6 */ + +#ifndef __ASSEMBLY__ + typedef struct { #ifdef CONFIG_PA20 volatile unsigned int slock; @@ -27,6 +31,8 @@ typedef struct { volatile unsigned int counter; } arch_rwlock_t; +#endif /* __ASSEMBLY__ */ + #define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000 #define __ARCH_RW_LOCK_UNLOCKED { .lock_mutex = __ARCH_SPIN_LOCK_UNLOCKED, \ .counter = __ARCH_RW_LOCK_UNLOCKED__ } diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c index 6d1c781eb1db..8f37e75f2fb9 100644 --- a/arch/parisc/kernel/firmware.c +++ b/arch/parisc/kernel/firmware.c @@ -74,8 +74,8 @@ static DEFINE_SPINLOCK(pdc_lock); #endif -unsigned long pdc_result[NUM_PDC_RESULT] __aligned(8); -unsigned long pdc_result2[NUM_PDC_RESULT] __aligned(8); +static unsigned long pdc_result[NUM_PDC_RESULT] __aligned(8); +static unsigned long pdc_result2[NUM_PDC_RESULT] __aligned(8); #ifdef CONFIG_64BIT #define WIDE_FIRMWARE 0x1 @@ -334,7 +334,7 @@ int __pdc_cpu_rendezvous(void) /** * pdc_cpu_rendezvous_lock - Lock PDC while transitioning to rendezvous state */ -void pdc_cpu_rendezvous_lock(void) +void pdc_cpu_rendezvous_lock(void) __acquires(&pdc_lock) { spin_lock(&pdc_lock); } @@ -342,7 +342,7 @@ void pdc_cpu_rendezvous_lock(void) /** * pdc_cpu_rendezvous_unlock - Unlock PDC after reaching rendezvous state */ -void pdc_cpu_rendezvous_unlock(void) +void pdc_cpu_rendezvous_unlock(void) __releases(&pdc_lock) { spin_unlock(&pdc_lock); } diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c index 4d392e4ed358..d1defb9ede70 100644 --- a/arch/parisc/kernel/ftrace.c +++ b/arch/parisc/kernel/ftrace.c @@ -53,7 +53,7 @@ static void __hot prepare_ftrace_return(unsigned long *parent, static ftrace_func_t ftrace_func; -void notrace __hot ftrace_function_trampoline(unsigned long parent, +asmlinkage void notrace __hot ftrace_function_trampoline(unsigned long parent, unsigned long self_addr, unsigned long org_sp_gr3, struct ftrace_regs *fregs) diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c index 00297e8e1c88..6f0c92e8149d 100644 --- a/arch/parisc/kernel/parisc_ksyms.c +++ b/arch/parisc/kernel/parisc_ksyms.c @@ -14,6 +14,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/syscalls.h> +#include <linux/libgcc.h> #include <linux/string.h> EXPORT_SYMBOL(memset); @@ -92,12 +93,6 @@ EXPORT_SYMBOL($$divI_12); EXPORT_SYMBOL($$divI_14); EXPORT_SYMBOL($$divI_15); -extern void __ashrdi3(void); -extern void __ashldi3(void); -extern void __lshrdi3(void); -extern void __muldi3(void); -extern void __ucmpdi2(void); - EXPORT_SYMBOL(__ashrdi3); EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__lshrdi3); diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index d818ece23b4a..bf9f192c826e 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -39,7 +39,7 @@ static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL; static unsigned long pcxl_used_bytes __read_mostly; static unsigned long pcxl_used_pages __read_mostly; -extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */ +unsigned long pcxl_dma_start __ro_after_init; /* pcxl dma mapping area start */ static DEFINE_SPINLOCK(pcxl_res_lock); static char *pcxl_res_map; static int pcxl_res_hint; @@ -381,7 +381,7 @@ pcxl_dma_init(void) pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL, get_order(pcxl_res_size)); memset(pcxl_res_map, 0, pcxl_res_size); - proc_gsc_root = proc_mkdir("gsc", NULL); + proc_gsc_root = proc_mkdir("bus/gsc", NULL); if (!proc_gsc_root) printk(KERN_WARNING "pcxl_dma_init: Unable to create gsc /proc dir entry\n"); @@ -417,14 +417,6 @@ void *arch_dma_alloc(struct device *dev, size_t size, map_uncached_pages(vaddr, size, paddr); *dma_handle = (dma_addr_t) paddr; -#if 0 -/* This probably isn't needed to support EISA cards. -** ISA cards will certainly only support 24-bit DMA addressing. -** Not clear if we can, want, or need to support ISA. -*/ - if (!dev || *dev->coherent_dma_mask < 0xffffffff) - gfp |= GFP_DMA; -#endif return (void *)vaddr; } diff --git a/arch/parisc/kernel/pdt.c b/arch/parisc/kernel/pdt.c index 0d24735bd918..0f9b3b5914cf 100644 --- a/arch/parisc/kernel/pdt.c +++ b/arch/parisc/kernel/pdt.c @@ -354,10 +354,8 @@ static int __init pdt_initcall(void) return -ENODEV; kpdtd_task = kthread_run(pdt_mainloop, NULL, "kpdtd"); - if (IS_ERR(kpdtd_task)) - return PTR_ERR(kpdtd_task); - return 0; + return PTR_ERR_OR_ZERO(kpdtd_task); } late_initcall(pdt_initcall); diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c index 90b04d8af212..b0f0816879df 100644 --- a/arch/parisc/kernel/perf.c +++ b/arch/parisc/kernel/perf.c @@ -57,7 +57,7 @@ struct rdr_tbl_ent { static int perf_processor_interface __read_mostly = UNKNOWN_INTF; static int perf_enabled __read_mostly; static DEFINE_SPINLOCK(perf_lock); -struct parisc_device *cpu_device __read_mostly; +static struct parisc_device *cpu_device __read_mostly; /* RDRs to write for PCX-W */ static const int perf_rdrs_W[] = diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index 00b0df97afb1..762289b9984e 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c @@ -26,6 +26,7 @@ #include <asm/processor.h> #include <asm/page.h> #include <asm/pdc.h> +#include <asm/smp.h> #include <asm/pdcpat.h> #include <asm/irq.h> /* for struct irq_region */ #include <asm/parisc-device.h> diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 573f8303e2b0..211a4afdd282 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -40,11 +40,6 @@ static char __initdata command_line[COMMAND_LINE_SIZE]; -/* Intended for ccio/sba/cpu statistics under /proc/bus/{runway|gsc} */ -struct proc_dir_entry * proc_runway_root __read_mostly = NULL; -struct proc_dir_entry * proc_gsc_root __read_mostly = NULL; -struct proc_dir_entry * proc_mckinley_root __read_mostly = NULL; - static void __init setup_cmdline(char **cmdline_p) { extern unsigned int boot_args[]; @@ -196,48 +191,6 @@ const struct seq_operations cpuinfo_op = { .show = show_cpuinfo }; -static void __init parisc_proc_mkdir(void) -{ - /* - ** Can't call proc_mkdir() until after proc_root_init() has been - ** called by start_kernel(). In other words, this code can't - ** live in arch/.../setup.c because start_parisc() calls - ** start_kernel(). - */ - switch (boot_cpu_data.cpu_type) { - case pcxl: - case pcxl2: - if (NULL == proc_gsc_root) - { - proc_gsc_root = proc_mkdir("bus/gsc", NULL); - } - break; - case pcxt_: - case pcxu: - case pcxu_: - case pcxw: - case pcxw_: - case pcxw2: - if (NULL == proc_runway_root) - { - proc_runway_root = proc_mkdir("bus/runway", NULL); - } - break; - case mako: - case mako2: - if (NULL == proc_mckinley_root) - { - proc_mckinley_root = proc_mkdir("bus/mckinley", NULL); - } - break; - default: - /* FIXME: this was added to prevent the compiler - * complaining about missing pcx, pcxs and pcxt - * I'm assuming they have neither gsc nor runway */ - break; - } -} - static struct resource central_bus = { .name = "Central Bus", .start = F_EXTEND(0xfff80000), @@ -294,7 +247,6 @@ static int __init parisc_init(void) { u32 osid = (OS_ID_LINUX << 16); - parisc_proc_mkdir(); parisc_init_resources(); do_device_inventory(); /* probe for hardware */ diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index f886ff0c75df..e8d27def6c52 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -423,7 +423,7 @@ static void check_syscallno_in_delay_branch(struct pt_regs *regs) regs->gr[31] -= 8; /* delayed branching */ /* Get assembler opcode of code in delay branch */ - uaddr = (unsigned int *) ((regs->gr[31] & ~3) + 4); + uaddr = (u32 __user *) ((regs->gr[31] & ~3) + 4); err = get_user(opcode, uaddr); if (err) return; diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index ca2d537e25b1..9915062d5243 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -27,17 +27,12 @@ #include <linux/elf-randomize.h> /* - * Construct an artificial page offset for the mapping based on the virtual + * Construct an artificial page offset for the mapping based on the physical * address of the kernel file mapping variable. - * If filp is zero the calculated pgoff value aliases the memory of the given - * address. This is useful for io_uring where the mapping shall alias a kernel - * address and a userspace adress where both the kernel and the userspace - * access the same memory region. */ -#define GET_FILP_PGOFF(filp, addr) \ - ((filp ? (((unsigned long) filp->f_mapping) >> 8) \ - & ((SHM_COLOUR-1) >> PAGE_SHIFT) : 0UL) \ - + (addr >> PAGE_SHIFT)) +#define GET_FILP_PGOFF(filp) \ + (filp ? (((unsigned long) filp->f_mapping) >> 8) \ + & ((SHM_COLOUR-1) >> PAGE_SHIFT) : 0UL) static unsigned long shared_align_offset(unsigned long filp_pgoff, unsigned long pgoff) @@ -117,7 +112,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; - filp_pgoff = GET_FILP_PGOFF(filp, addr); + filp_pgoff = GET_FILP_PGOFF(filp); if (flags & MAP_FIXED) { /* Even MAP_FIXED mappings must reside within TASK_SIZE */ diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 1373e5129868..1f51aa9c8230 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -39,6 +39,7 @@ registers). #include <asm/assembly.h> #include <asm/processor.h> #include <asm/cache.h> +#include <asm/spinlock_types.h> #include <linux/linkage.h> @@ -66,6 +67,16 @@ registers). stw \reg1, 0(%sr2,\reg2) .endm + /* raise exception if spinlock content is not zero or + * __ARCH_SPIN_LOCK_UNLOCKED_VAL */ + .macro spinlock_check spin_val,tmpreg +#ifdef CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK + ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmpreg + andcm,= \spin_val, \tmpreg, %r0 + .word SPINLOCK_BREAK_INSN +#endif + .endm + .text .import syscall_exit,code @@ -508,7 +519,8 @@ lws_start: lws_exit_noerror: lws_pagefault_enable %r1,%r21 - stw,ma %r20, 0(%sr2,%r20) + ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, %r21 + stw,ma %r21, 0(%sr2,%r20) ssm PSW_SM_I, %r0 b lws_exit copy %r0, %r21 @@ -521,7 +533,8 @@ lws_wouldblock: lws_pagefault: lws_pagefault_enable %r1,%r21 - stw,ma %r20, 0(%sr2,%r20) + ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, %r21 + stw,ma %r21, 0(%sr2,%r20) ssm PSW_SM_I, %r0 ldo 3(%r0),%r28 b lws_exit @@ -619,6 +632,7 @@ lws_compare_and_swap: /* Try to acquire the lock */ LDCW 0(%sr2,%r20), %r28 + spinlock_check %r28, %r21 comclr,<> %r0, %r28, %r0 b,n lws_wouldblock @@ -772,6 +786,7 @@ cas2_lock_start: /* Try to acquire the lock */ LDCW 0(%sr2,%r20), %r28 + spinlock_check %r28, %r21 comclr,<> %r0, %r28, %r0 b,n lws_wouldblock @@ -1001,6 +1016,7 @@ atomic_xchg_start: /* Try to acquire the lock */ LDCW 0(%sr2,%r20), %r28 + spinlock_check %r28, %r21 comclr,<> %r0, %r28, %r0 b,n lws_wouldblock @@ -1199,6 +1215,7 @@ atomic_store_start: /* Try to acquire the lock */ LDCW 0(%sr2,%r20), %r28 + spinlock_check %r28, %r21 comclr,<> %r0, %r28, %r0 b,n lws_wouldblock @@ -1330,7 +1347,7 @@ ENTRY(lws_lock_start) /* lws locks */ .rept 256 /* Keep locks aligned at 16-bytes */ - .word 1 + .word __ARCH_SPIN_LOCK_UNLOCKED_VAL .word 0 .word 0 .word 0 diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index 033b9e50b44a..170d0dda4213 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c @@ -11,6 +11,7 @@ #include <linux/signal.h> #include <linux/ratelimit.h> #include <linux/uaccess.h> +#include <linux/sysctl.h> #include <asm/unaligned.h> #include <asm/hardirq.h> #include <asm/traps.h> @@ -337,7 +338,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop) : "r19", "r20", "r21", "r22", "r1" ); #else { - unsigned long valh=(val>>32),vall=(val&0xffffffffl); + unsigned long valh = (val >> 32), vall = (val & 0xffffffffl); __asm__ __volatile__ ( " mtsp %4, %%sr1\n" " zdep %2, 29, 2, %%r19\n" @@ -473,7 +474,7 @@ void handle_unaligned(struct pt_regs *regs) case OPCODE_LDWA_I: case OPCODE_LDW_S: case OPCODE_LDWA_S: - ret = emulate_ldw(regs, R3(regs->iir),0); + ret = emulate_ldw(regs, R3(regs->iir), 0); break; case OPCODE_STH: @@ -482,7 +483,7 @@ void handle_unaligned(struct pt_regs *regs) case OPCODE_STW: case OPCODE_STWA: - ret = emulate_stw(regs, R2(regs->iir),0); + ret = emulate_stw(regs, R2(regs->iir), 0); break; #ifdef CONFIG_64BIT @@ -490,12 +491,12 @@ void handle_unaligned(struct pt_regs *regs) case OPCODE_LDDA_I: case OPCODE_LDD_S: case OPCODE_LDDA_S: - ret = emulate_ldd(regs, R3(regs->iir),0); + ret = emulate_ldd(regs, R3(regs->iir), 0); break; case OPCODE_STD: case OPCODE_STDA: - ret = emulate_std(regs, R2(regs->iir),0); + ret = emulate_std(regs, R2(regs->iir), 0); break; #endif @@ -503,24 +504,24 @@ void handle_unaligned(struct pt_regs *regs) case OPCODE_FLDWS: case OPCODE_FLDWXR: case OPCODE_FLDWSR: - ret = emulate_ldw(regs,FR3(regs->iir),1); + ret = emulate_ldw(regs, FR3(regs->iir), 1); break; case OPCODE_FLDDX: case OPCODE_FLDDS: - ret = emulate_ldd(regs,R3(regs->iir),1); + ret = emulate_ldd(regs, R3(regs->iir), 1); break; case OPCODE_FSTWX: case OPCODE_FSTWS: case OPCODE_FSTWXR: case OPCODE_FSTWSR: - ret = emulate_stw(regs,FR3(regs->iir),1); + ret = emulate_stw(regs, FR3(regs->iir), 1); break; case OPCODE_FSTDX: case OPCODE_FSTDS: - ret = emulate_std(regs,R3(regs->iir),1); + ret = emulate_std(regs, R3(regs->iir), 1); break; case OPCODE_LDCD_I: diff --git a/arch/parisc/lib/ucmpdi2.c b/arch/parisc/lib/ucmpdi2.c index 8e6014a142ef..9d8b4dbae273 100644 --- a/arch/parisc/lib/ucmpdi2.c +++ b/arch/parisc/lib/ucmpdi2.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/module.h> +#include <linux/libgcc.h> union ull_union { unsigned long long ull; @@ -9,7 +10,7 @@ union ull_union { } ui; }; -int __ucmpdi2(unsigned long long a, unsigned long long b) +word_type __ucmpdi2(unsigned long long a, unsigned long long b) { union ull_union au = {.ull = a}; union ull_union bu = {.ull = b}; diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index a4c7c7630f48..2fe5b44986e0 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -192,31 +192,31 @@ int fixup_exception(struct pt_regs *regs) * For implementation see handle_interruption() in traps.c */ static const char * const trap_description[] = { - [1] "High-priority machine check (HPMC)", - [2] "Power failure interrupt", - [3] "Recovery counter trap", - [5] "Low-priority machine check", - [6] "Instruction TLB miss fault", - [7] "Instruction access rights / protection trap", - [8] "Illegal instruction trap", - [9] "Break instruction trap", - [10] "Privileged operation trap", - [11] "Privileged register trap", - [12] "Overflow trap", - [13] "Conditional trap", - [14] "FP Assist Exception trap", - [15] "Data TLB miss fault", - [16] "Non-access ITLB miss fault", - [17] "Non-access DTLB miss fault", - [18] "Data memory protection/unaligned access trap", - [19] "Data memory break trap", - [20] "TLB dirty bit trap", - [21] "Page reference trap", - [22] "Assist emulation trap", - [25] "Taken branch trap", - [26] "Data memory access rights trap", - [27] "Data memory protection ID trap", - [28] "Unaligned data reference trap", + [1] = "High-priority machine check (HPMC)", + [2] = "Power failure interrupt", + [3] = "Recovery counter trap", + [5] = "Low-priority machine check", + [6] = "Instruction TLB miss fault", + [7] = "Instruction access rights / protection trap", + [8] = "Illegal instruction trap", + [9] = "Break instruction trap", + [10] = "Privileged operation trap", + [11] = "Privileged register trap", + [12] = "Overflow trap", + [13] = "Conditional trap", + [14] = "FP Assist Exception trap", + [15] = "Data TLB miss fault", + [16] = "Non-access ITLB miss fault", + [17] = "Non-access DTLB miss fault", + [18] = "Data memory protection/unaligned access trap", + [19] = "Data memory break trap", + [20] = "TLB dirty bit trap", + [21] = "Page reference trap", + [22] = "Assist emulation trap", + [25] = "Taken branch trap", + [26] = "Data memory access rights trap", + [27] = "Data memory protection ID trap", + [28] = "Unaligned data reference trap", }; const char *trap_name(unsigned long code) diff --git a/arch/parisc/mm/fixmap.c b/arch/parisc/mm/fixmap.c index cc15d737fda6..ae3493dae9dc 100644 --- a/arch/parisc/mm/fixmap.c +++ b/arch/parisc/mm/fixmap.c @@ -19,9 +19,6 @@ void notrace set_fixmap(enum fixed_addresses idx, phys_addr_t phys) pmd_t *pmd = pmd_offset(pud, vaddr); pte_t *pte; - if (pmd_none(*pmd)) - pte = pte_alloc_kernel(pmd, vaddr); - pte = pte_offset_kernel(pmd, vaddr); set_pte_at(&init_mm, vaddr, pte, __mk_pte(phys, PAGE_KERNEL_RWX)); flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 406c52fe23d5..a088c243edea 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -523,10 +523,6 @@ void mark_rodata_ro(void) void *parisc_vmalloc_start __ro_after_init; EXPORT_SYMBOL(parisc_vmalloc_start); -#ifdef CONFIG_PA11 -unsigned long pcxl_dma_start __ro_after_init; -#endif - void __init mem_init(void) { /* Do sanity checks on IPC (compat) structures */ @@ -669,6 +665,39 @@ static void __init gateway_init(void) PAGE_SIZE, PAGE_GATEWAY, 1); } +static void __init fixmap_init(void) +{ + unsigned long addr = FIXMAP_START; + unsigned long end = FIXMAP_START + FIXMAP_SIZE; + pgd_t *pgd = pgd_offset_k(addr); + p4d_t *p4d = p4d_offset(pgd, addr); + pud_t *pud = pud_offset(p4d, addr); + pmd_t *pmd; + + BUILD_BUG_ON(FIXMAP_SIZE > PMD_SIZE); + +#if CONFIG_PGTABLE_LEVELS == 3 + if (pud_none(*pud)) { + pmd = memblock_alloc(PAGE_SIZE << PMD_TABLE_ORDER, + PAGE_SIZE << PMD_TABLE_ORDER); + if (!pmd) + panic("fixmap: pmd allocation failed.\n"); + pud_populate(NULL, pud, pmd); + } +#endif + + pmd = pmd_offset(pud, addr); + do { + pte_t *pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + if (!pte) + panic("fixmap: pte allocation failed.\n"); + + pmd_populate_kernel(&init_mm, pmd, pte); + + addr += PAGE_SIZE; + } while (addr < end); +} + static void __init parisc_bootmem_free(void) { unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, }; @@ -683,6 +712,7 @@ void __init paging_init(void) setup_bootmem(); pagetable_init(); gateway_init(); + fixmap_init(); flush_cache_all_local(); /* start with known state */ flush_tlb_all_local(NULL); diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c index 345ff0b66499..d7ee1f43d997 100644 --- a/arch/parisc/mm/ioremap.c +++ b/arch/parisc/mm/ioremap.c @@ -27,7 +27,7 @@ */ void __iomem *ioremap(unsigned long phys_addr, unsigned long size) { - void __iomem *addr; + uintptr_t addr; struct vm_struct *area; unsigned long offset, last_addr; pgprot_t pgprot; @@ -79,10 +79,9 @@ void __iomem *ioremap(unsigned long phys_addr, unsigned long size) if (!area) return NULL; - addr = (void __iomem *) area->addr; - if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, - phys_addr, pgprot)) { - vunmap(addr); + addr = (uintptr_t) area->addr; + if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { + vunmap(area->addr); return NULL; } diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 8a6754ffdc7e..a6c7069bec5d 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -393,7 +393,6 @@ int validate_sp_size(unsigned long sp, struct task_struct *p, */ #define ARCH_HAS_PREFETCH #define ARCH_HAS_PREFETCHW -#define ARCH_HAS_SPINLOCK_PREFETCH static inline void prefetch(const void *x) { @@ -411,8 +410,6 @@ static inline void prefetchw(const void *x) __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x)); } -#define spin_lock_prefetch(x) prefetchw(x) - /* asm stubs */ extern unsigned long isa300_idle_stop_noloss(unsigned long psscr_val); extern unsigned long isa300_idle_stop_mayloss(unsigned long psscr_val); diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h index 46c31fb8748d..30a12d208687 100644 --- a/arch/powerpc/include/asm/word-at-a-time.h +++ b/arch/powerpc/include/asm/word-at-a-time.h @@ -34,7 +34,7 @@ static inline long find_zero(unsigned long mask) return leading_zero_bits >> 3; } -static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) +static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) { unsigned long rhs = val | c->low_bits; *data = rhs; diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index f132d8704263..6440b1bb332a 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -375,8 +375,7 @@ _GLOBAL(generic_secondary_smp_init) beq 20f /* start the specified thread */ - LOAD_REG_ADDR(r5, fsl_secondary_thread_init) - ld r4, 0(r5) + LOAD_REG_ADDR(r5, DOTSYM(fsl_secondary_thread_init)) bl book3e_start_thread /* stop the current thread */ diff --git a/arch/powerpc/kernel/trace/ftrace_mprofile.S b/arch/powerpc/kernel/trace/ftrace_mprofile.S index ffb1db386849..1f7d86de1538 100644 --- a/arch/powerpc/kernel/trace/ftrace_mprofile.S +++ b/arch/powerpc/kernel/trace/ftrace_mprofile.S @@ -33,6 +33,9 @@ * and then arrange for the ftrace function to be called. */ .macro ftrace_regs_entry allregs + /* Create a minimal stack frame for representing B */ + PPC_STLU r1, -STACK_FRAME_MIN_SIZE(r1) + /* Create our stack frame + pt_regs */ PPC_STLU r1,-SWITCH_FRAME_SIZE(r1) @@ -42,7 +45,7 @@ #ifdef CONFIG_PPC64 /* Save the original return address in A's stack frame */ - std r0, LRSAVE+SWITCH_FRAME_SIZE(r1) + std r0, LRSAVE+SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE(r1) /* Ok to continue? */ lbz r3, PACA_FTRACE_ENABLED(r13) cmpdi r3, 0 @@ -77,6 +80,8 @@ mflr r7 /* Save it as pt_regs->nip */ PPC_STL r7, _NIP(r1) + /* Also save it in B's stackframe header for proper unwind */ + PPC_STL r7, LRSAVE+SWITCH_FRAME_SIZE(r1) /* Save the read LR in pt_regs->link */ PPC_STL r0, _LINK(r1) @@ -142,7 +147,7 @@ #endif /* Pop our stack frame */ - addi r1, r1, SWITCH_FRAME_SIZE + addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE #ifdef CONFIG_LIVEPATCH_64 /* Based on the cmpd above, if the NIP was altered handle livepatch */ diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index fe1b83020e0d..0ec5b45b1e86 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -314,8 +314,7 @@ void __ref vmemmap_free(unsigned long start, unsigned long end, start = ALIGN_DOWN(start, page_size); if (altmap) { alt_start = altmap->base_pfn; - alt_end = altmap->base_pfn + altmap->reserve + - altmap->free + altmap->alloc + altmap->align; + alt_end = altmap->base_pfn + altmap->reserve + altmap->free; } pr_debug("vmemmap_free %lx...%lx\n", start, end); diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c index 9c43cf32f4c9..40aa58206888 100644 --- a/arch/powerpc/platforms/85xx/smp.c +++ b/arch/powerpc/platforms/85xx/smp.c @@ -180,7 +180,7 @@ static void wake_hw_thread(void *info) unsigned long inia; int cpu = *(const int *)info; - inia = *(unsigned long *)fsl_secondary_thread_init; + inia = ppc_function_entry(fsl_secondary_thread_init); book3e_start_thread(cpu_thread_in_core(cpu), inia); } #endif diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c index 4c5790aff1b5..8633891b7aa5 100644 --- a/arch/powerpc/platforms/powermac/time.c +++ b/arch/powerpc/platforms/powermac/time.c @@ -26,8 +26,8 @@ #include <linux/rtc.h> #include <linux/of_address.h> +#include <asm/early_ioremap.h> #include <asm/sections.h> -#include <asm/io.h> #include <asm/machdep.h> #include <asm/time.h> #include <asm/nvram.h> @@ -182,7 +182,7 @@ static int __init via_calibrate_decr(void) return 0; } of_node_put(vias); - via = ioremap(rsrc.start, resource_size(&rsrc)); + via = early_ioremap(rsrc.start, resource_size(&rsrc)); if (via == NULL) { printk(KERN_ERR "Failed to map VIA for timer calibration !\n"); return 0; @@ -207,7 +207,7 @@ static int __init via_calibrate_decr(void) ppc_tb_freq = (dstart - dend) * 100 / 6; - iounmap(via); + early_iounmap((void *)via, resource_size(&rsrc)); return 1; } diff --git a/arch/riscv/include/asm/acpi.h b/arch/riscv/include/asm/acpi.h index f71ce21ff684..d5604d2073bc 100644 --- a/arch/riscv/include/asm/acpi.h +++ b/arch/riscv/include/asm/acpi.h @@ -19,7 +19,7 @@ typedef u64 phys_cpuid_t; #define PHYS_CPUID_INVALID INVALID_HARTID /* ACPI table mapping after acpi_permanent_mmap is set */ -void *acpi_os_ioremap(acpi_physical_address phys, acpi_size size); +void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size); #define acpi_os_ioremap acpi_os_ioremap #define acpi_strict 1 /* No out-of-spec workarounds on RISC-V */ diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h index 8091b8bf4883..b93ffddf8a61 100644 --- a/arch/riscv/include/asm/cacheflush.h +++ b/arch/riscv/include/asm/cacheflush.h @@ -37,6 +37,10 @@ static inline void flush_dcache_page(struct page *page) #define flush_icache_user_page(vma, pg, addr, len) \ flush_icache_mm(vma->vm_mm, 0) +#ifdef CONFIG_64BIT +#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end) +#endif + #ifndef CONFIG_SMP #define flush_icache_all() local_flush_icache_all() diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h index aff6c33ab0c0..4c58ee7f95ec 100644 --- a/arch/riscv/include/asm/mmio.h +++ b/arch/riscv/include/asm/mmio.h @@ -101,9 +101,9 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) * Relaxed I/O memory access primitives. These follow the Device memory * ordering rules but do not guarantee any ordering relative to Normal memory * accesses. These are defined to order the indicated access (either a read or - * write) with all other I/O memory accesses. Since the platform specification - * defines that all I/O regions are strongly ordered on channel 2, no explicit - * fences are required to enforce this ordering. + * write) with all other I/O memory accesses to the same peripheral. Since the + * platform specification defines that all I/O regions are strongly ordered on + * channel 0, no explicit fences are required to enforce this ordering. */ /* FIXME: These are now the same as asm-generic */ #define __io_rbr() do {} while (0) @@ -125,14 +125,14 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) #endif /* - * I/O memory access primitives. Reads are ordered relative to any - * following Normal memory access. Writes are ordered relative to any prior - * Normal memory access. The memory barriers here are necessary as RISC-V + * I/O memory access primitives. Reads are ordered relative to any following + * Normal memory read and delay() loop. Writes are ordered relative to any + * prior Normal memory write. The memory barriers here are necessary as RISC-V * doesn't define any ordering between the memory space and the I/O space. */ #define __io_br() do {} while (0) -#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory") -#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory") +#define __io_ar(v) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); }) +#define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); }) #define __io_aw() mmiowb_set_pending() #define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; }) diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 75970ee2bda2..b5680c940c1e 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -188,6 +188,8 @@ extern struct pt_alloc_ops pt_ops __initdata; #define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP) extern pgd_t swapper_pg_dir[]; +extern pgd_t trampoline_pg_dir[]; +extern pgd_t early_pg_dir[]; #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline int pmd_present(pmd_t pmd) diff --git a/arch/riscv/include/asm/vmalloc.h b/arch/riscv/include/asm/vmalloc.h index 58d3e447f191..924d01b56c9a 100644 --- a/arch/riscv/include/asm/vmalloc.h +++ b/arch/riscv/include/asm/vmalloc.h @@ -3,12 +3,14 @@ #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +extern bool pgtable_l4_enabled, pgtable_l5_enabled; + #define IOREMAP_MAX_ORDER (PUD_SHIFT) #define arch_vmap_pud_supported arch_vmap_pud_supported static inline bool arch_vmap_pud_supported(pgprot_t prot) { - return true; + return pgtable_l4_enabled || pgtable_l5_enabled; } #define arch_vmap_pmd_supported arch_vmap_pmd_supported diff --git a/arch/riscv/kernel/acpi.c b/arch/riscv/kernel/acpi.c index 5ee03ebab80e..56cb2c986c48 100644 --- a/arch/riscv/kernel/acpi.c +++ b/arch/riscv/kernel/acpi.c @@ -215,9 +215,9 @@ void __init __acpi_unmap_table(void __iomem *map, unsigned long size) early_iounmap(map, size); } -void *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) +void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) { - return memremap(phys, size, MEMREMAP_WB); + return (void __iomem *)memremap(phys, size, MEMREMAP_WB); } #ifdef CONFIG_PCI diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c index a2fc952318e9..35b854cf078e 100644 --- a/arch/riscv/kernel/cpu.c +++ b/arch/riscv/kernel/cpu.c @@ -17,6 +17,11 @@ #include <asm/smp.h> #include <asm/pgtable.h> +bool arch_match_cpu_phys_id(int cpu, u64 phys_id) +{ + return phys_id == cpuid_to_hartid_map(cpu); +} + /* * Returns the hart ID of the given device tree node, or -ENODEV if the node * isn't an enabled and valid RISC-V hart node. diff --git a/arch/riscv/kernel/crash_core.c b/arch/riscv/kernel/crash_core.c index b351a3c01355..55f1d7856b54 100644 --- a/arch/riscv/kernel/crash_core.c +++ b/arch/riscv/kernel/crash_core.c @@ -18,4 +18,6 @@ void arch_crash_save_vmcoreinfo(void) vmcoreinfo_append_str("NUMBER(MODULES_END)=0x%lx\n", MODULES_END); #endif vmcoreinfo_append_str("NUMBER(KERNEL_LINK_ADDR)=0x%lx\n", KERNEL_LINK_ADDR); + vmcoreinfo_append_str("NUMBER(va_kernel_pa_offset)=0x%lx\n", + kernel_map.va_kernel_pa_offset); } diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c index 5372b708fae2..c08bb5c3b385 100644 --- a/arch/riscv/kernel/elf_kexec.c +++ b/arch/riscv/kernel/elf_kexec.c @@ -281,7 +281,7 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf, kbuf.buffer = initrd; kbuf.bufsz = kbuf.memsz = initrd_len; kbuf.buf_align = PAGE_SIZE; - kbuf.top_down = false; + kbuf.top_down = true; kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; ret = kexec_add_buffer(&kbuf); if (ret) @@ -425,6 +425,7 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi, * sym, instead of searching the whole relsec. */ case R_RISCV_PCREL_HI20: + case R_RISCV_CALL_PLT: case R_RISCV_CALL: *(u64 *)loc = CLEAN_IMM(UITYPE, *(u64 *)loc) | ENCODE_UJTYPE_IMM(val - addr); diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c index 85bbce0f758c..40420afbb1a0 100644 --- a/arch/riscv/kernel/smp.c +++ b/arch/riscv/kernel/smp.c @@ -61,11 +61,6 @@ int riscv_hartid_to_cpuid(unsigned long hartid) return -ENOENT; } -bool arch_match_cpu_phys_id(int cpu, u64 phys_id) -{ - return phys_id == cpuid_to_hartid_map(cpu); -} - static void ipi_stop(void) { set_cpu_online(smp_processor_id(), false); diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 9ce504737d18..e4c35ac2357f 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -26,12 +26,13 @@ #include <linux/kfence.h> #include <asm/fixmap.h> -#include <asm/tlbflush.h> -#include <asm/sections.h> -#include <asm/soc.h> #include <asm/io.h> -#include <asm/ptdump.h> #include <asm/numa.h> +#include <asm/pgtable.h> +#include <asm/ptdump.h> +#include <asm/sections.h> +#include <asm/soc.h> +#include <asm/tlbflush.h> #include "../kernel/head.h" @@ -214,8 +215,13 @@ static void __init setup_bootmem(void) memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); phys_ram_end = memblock_end_of_DRAM(); + + /* + * Make sure we align the start of the memory on a PMD boundary so that + * at worst, we map the linear mapping with PMD mappings. + */ if (!IS_ENABLED(CONFIG_XIP_KERNEL)) - phys_ram_base = memblock_start_of_DRAM(); + phys_ram_base = memblock_start_of_DRAM() & PMD_MASK; /* * In 64-bit, any use of __va/__pa before this point is wrong as we diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c index 8fc0efcf905c..a01bc15dce24 100644 --- a/arch/riscv/mm/kasan_init.c +++ b/arch/riscv/mm/kasan_init.c @@ -22,7 +22,6 @@ * region is not and then we have to go down to the PUD level. */ -extern pgd_t early_pg_dir[PTRS_PER_PGD]; pgd_t tmp_pg_dir[PTRS_PER_PGD] __page_aligned_bss; p4d_t tmp_p4d[PTRS_PER_P4D] __page_aligned_bss; pud_t tmp_pud[PTRS_PER_PUD] __page_aligned_bss; diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index d03d4cb9332c..3a6a9a88bd38 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -116,7 +116,6 @@ CONFIG_UNIX=y CONFIG_UNIX_DIAG=m CONFIG_XFRM_USER=m CONFIG_NET_KEY=m -CONFIG_NET_TC_SKB_EXT=y CONFIG_SMC=m CONFIG_SMC_DIAG=m CONFIG_INET=y @@ -193,6 +192,7 @@ CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m CONFIG_NFT_FIB_INET=m +CONFIG_NETFILTER_XTABLES_COMPAT=y CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_AUDIT=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m @@ -379,6 +379,7 @@ CONFIG_NET_ACT_SIMP=m CONFIG_NET_ACT_SKBEDIT=m CONFIG_NET_ACT_CSUM=m CONFIG_NET_ACT_GATE=m +CONFIG_NET_TC_SKB_EXT=y CONFIG_DNS_RESOLVER=y CONFIG_OPENVSWITCH=m CONFIG_VSOCKETS=m @@ -395,6 +396,7 @@ CONFIG_HOTPLUG_PCI=y CONFIG_HOTPLUG_PCI_S390=y CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_SAFE=y +# CONFIG_FW_LOADER is not set CONFIG_CONNECTOR=y CONFIG_ZRAM=y CONFIG_BLK_DEV_LOOP=m @@ -502,7 +504,6 @@ CONFIG_NLMON=m # CONFIG_NET_VENDOR_GOOGLE is not set # CONFIG_NET_VENDOR_HUAWEI is not set # CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_WANGXUN is not set # CONFIG_NET_VENDOR_LITEX is not set # CONFIG_NET_VENDOR_MARVELL is not set CONFIG_MLX4_EN=m @@ -542,6 +543,7 @@ CONFIG_MLX5_CORE_EN=y # CONFIG_NET_VENDOR_TI is not set # CONFIG_NET_VENDOR_VERTEXCOM is not set # CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WANGXUN is not set # CONFIG_NET_VENDOR_WIZNET is not set # CONFIG_NET_VENDOR_XILINX is not set CONFIG_PPP=m @@ -646,7 +648,6 @@ CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_TMPFS_INODE64=y CONFIG_HUGETLBFS=y -CONFIG_CONFIGFS_FS=m CONFIG_ECRYPT_FS=m CONFIG_CRAMFS=m CONFIG_SQUASHFS=m @@ -690,7 +691,6 @@ CONFIG_HARDENED_USERCOPY=y CONFIG_FORTIFY_SOURCE=y CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y -CONFIG_SECURITY_SELINUX_DISABLE=y CONFIG_SECURITY_LOCKDOWN_LSM=y CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y CONFIG_SECURITY_LANDLOCK=y @@ -744,7 +744,6 @@ CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3_GENERIC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_WP512=m @@ -844,6 +843,7 @@ CONFIG_PREEMPT_TRACER=y CONFIG_SCHED_TRACER=y CONFIG_FTRACE_SYSCALLS=y CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_USER_EVENTS=y CONFIG_HIST_TRIGGERS=y CONFIG_FTRACE_STARTUP_TEST=y # CONFIG_EVENT_TRACE_STARTUP_TEST is not set @@ -866,6 +866,7 @@ CONFIG_FAIL_MAKE_REQUEST=y CONFIG_FAIL_IO_TIMEOUT=y CONFIG_FAIL_FUTEX=y CONFIG_FAULT_INJECTION_DEBUG_FS=y +CONFIG_FAULT_INJECTION_CONFIGFS=y CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y CONFIG_LKDTM=m CONFIG_TEST_MIN_HEAP=y diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index 1855759cdc6a..b13a5a0d99d2 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -107,7 +107,6 @@ CONFIG_UNIX=y CONFIG_UNIX_DIAG=m CONFIG_XFRM_USER=m CONFIG_NET_KEY=m -CONFIG_NET_TC_SKB_EXT=y CONFIG_SMC=m CONFIG_SMC_DIAG=m CONFIG_INET=y @@ -184,6 +183,7 @@ CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m CONFIG_NFT_FIB_INET=m +CONFIG_NETFILTER_XTABLES_COMPAT=y CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_AUDIT=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m @@ -369,6 +369,7 @@ CONFIG_NET_ACT_SIMP=m CONFIG_NET_ACT_SKBEDIT=m CONFIG_NET_ACT_CSUM=m CONFIG_NET_ACT_GATE=m +CONFIG_NET_TC_SKB_EXT=y CONFIG_DNS_RESOLVER=y CONFIG_OPENVSWITCH=m CONFIG_VSOCKETS=m @@ -385,6 +386,7 @@ CONFIG_HOTPLUG_PCI_S390=y CONFIG_UEVENT_HELPER=y CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_SAFE=y +# CONFIG_FW_LOADER is not set CONFIG_CONNECTOR=y CONFIG_ZRAM=y CONFIG_BLK_DEV_LOOP=m @@ -492,7 +494,6 @@ CONFIG_NLMON=m # CONFIG_NET_VENDOR_GOOGLE is not set # CONFIG_NET_VENDOR_HUAWEI is not set # CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_WANGXUN is not set # CONFIG_NET_VENDOR_LITEX is not set # CONFIG_NET_VENDOR_MARVELL is not set CONFIG_MLX4_EN=m @@ -532,6 +533,7 @@ CONFIG_MLX5_CORE_EN=y # CONFIG_NET_VENDOR_TI is not set # CONFIG_NET_VENDOR_VERTEXCOM is not set # CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WANGXUN is not set # CONFIG_NET_VENDOR_WIZNET is not set # CONFIG_NET_VENDOR_XILINX is not set CONFIG_PPP=m @@ -673,7 +675,6 @@ CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y -CONFIG_SECURITY_SELINUX_DISABLE=y CONFIG_SECURITY_LOCKDOWN_LSM=y CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y CONFIG_SECURITY_LANDLOCK=y @@ -729,7 +730,6 @@ CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3_GENERIC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_WP512=m @@ -793,6 +793,7 @@ CONFIG_STACK_TRACER=y CONFIG_SCHED_TRACER=y CONFIG_FTRACE_SYSCALLS=y CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_USER_EVENTS=y CONFIG_HIST_TRIGGERS=y CONFIG_SAMPLES=y CONFIG_SAMPLE_TRACE_PRINTK=m diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index 6f68b39817ef..e62fb2015102 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig @@ -53,7 +53,6 @@ CONFIG_ZFCP=y # CONFIG_HVC_IUCV is not set # CONFIG_HW_RANDOM_S390 is not set # CONFIG_HMC_DRV is not set -# CONFIG_S390_UV_UAPI is not set # CONFIG_S390_TAPE is not set # CONFIG_VMCP is not set # CONFIG_MONWRITER is not set diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h index f0fe3bcc78a8..bb0826024bb9 100644 --- a/arch/s390/include/uapi/asm/ptrace.h +++ b/arch/s390/include/uapi/asm/ptrace.h @@ -8,6 +8,8 @@ #ifndef _UAPI_S390_PTRACE_H #define _UAPI_S390_PTRACE_H +#include <linux/const.h> + /* * Offsets in the user_regs_struct. They are used for the ptrace * system call and in entry.S diff --git a/arch/s390/kernel/sthyi.c b/arch/s390/kernel/sthyi.c index 4d141e2c132e..2ea7f208f0e7 100644 --- a/arch/s390/kernel/sthyi.c +++ b/arch/s390/kernel/sthyi.c @@ -459,9 +459,9 @@ static int sthyi_update_cache(u64 *rc) * * Fills the destination with system information returned by the STHYI * instruction. The data is generated by emulation or execution of STHYI, - * if available. The return value is the condition code that would be - * returned, the rc parameter is the return code which is passed in - * register R2 + 1. + * if available. The return value is either a negative error value or + * the condition code that would be returned, the rc parameter is the + * return code which is passed in register R2 + 1. */ int sthyi_fill(void *dst, u64 *rc) { diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 954d39adf85c..341abafb96e4 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -389,8 +389,8 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu) */ int handle_sthyi(struct kvm_vcpu *vcpu) { - int reg1, reg2, r = 0; - u64 code, addr, cc = 0, rc = 0; + int reg1, reg2, cc = 0, r = 0; + u64 code, addr, rc = 0; struct sthyi_sctns *sctns = NULL; if (!test_kvm_facility(vcpu->kvm, 74)) @@ -421,7 +421,10 @@ int handle_sthyi(struct kvm_vcpu *vcpu) return -ENOMEM; cc = sthyi_fill(sctns, &rc); - + if (cc < 0) { + free_page((unsigned long)sctns); + return cc; + } out: if (!cc) { if (kvm_s390_pv_cpu_is_protected(vcpu)) { diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index b26649233d12..24a66670f5c3 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -763,6 +763,8 @@ void __init vmem_map_init(void) if (static_key_enabled(&cpu_has_bear)) set_memory_nx(0, 1); set_memory_nx(PAGE_SIZE, 1); + if (debug_pagealloc_enabled()) + set_memory_4k(0, ident_map_size >> PAGE_SHIFT); pr_info("Write protected kernel read-only data: %luk\n", (unsigned long)(__end_rodata - _stext) >> 10); diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h index 2667f35d5ea5..0a0d5c3d184c 100644 --- a/arch/sparc/include/asm/processor_64.h +++ b/arch/sparc/include/asm/processor_64.h @@ -213,7 +213,6 @@ unsigned long __get_wchan(struct task_struct *task); */ #define ARCH_HAS_PREFETCH #define ARCH_HAS_PREFETCHW -#define ARCH_HAS_SPINLOCK_PREFETCH static inline void prefetch(const void *x) { @@ -239,8 +238,6 @@ static inline void prefetchw(const void *x) : "r" (x)); } -#define spin_lock_prefetch(x) prefetchw(x) - #define HAVE_ARCH_PICK_MMAP_LAYOUT int do_mathemu(struct pt_regs *regs, struct fpustate *f, bool illegal_insn_trap); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 7422db409770..e36261b4ea14 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2593,6 +2593,13 @@ config CPU_IBRS_ENTRY This mitigates both spectre_v2 and retbleed at great cost to performance. +config CPU_SRSO + bool "Mitigate speculative RAS overflow on AMD" + depends on CPU_SUP_AMD && X86_64 && RETHUNK + default y + help + Enable the SRSO mitigation needed on AMD Zen1-4 machines. + config SLS bool "Mitigate Straight-Line-Speculation" depends on CC_HAS_SLS && X86_64 @@ -2603,6 +2610,25 @@ config SLS against straight line speculation. The kernel image might be slightly larger. +config GDS_FORCE_MITIGATION + bool "Force GDS Mitigation" + depends on CPU_SUP_INTEL + default n + help + Gather Data Sampling (GDS) is a hardware vulnerability which allows + unprivileged speculative access to data which was previously stored in + vector registers. + + This option is equivalent to setting gather_data_sampling=force on the + command line. The microcode mitigation is used if present, otherwise + AVX is disabled as a mitigation. On affected systems that are missing + the microcode any userspace code that unconditionally uses AVX will + break with this option set. + + Setting this option on systems not vulnerable to GDS has no effect. + + If in doubt, say N. + endif config ARCH_HAS_ADD_PAGES diff --git a/arch/x86/boot/compressed/idt_64.c b/arch/x86/boot/compressed/idt_64.c index 6debb816e83d..3cdf94b41456 100644 --- a/arch/x86/boot/compressed/idt_64.c +++ b/arch/x86/boot/compressed/idt_64.c @@ -63,7 +63,14 @@ void load_stage2_idt(void) set_idt_entry(X86_TRAP_PF, boot_page_fault); #ifdef CONFIG_AMD_MEM_ENCRYPT - set_idt_entry(X86_TRAP_VC, boot_stage2_vc); + /* + * Clear the second stage #VC handler in case guest types + * needing #VC have not been detected. + */ + if (sev_status & BIT(1)) + set_idt_entry(X86_TRAP_VC, boot_stage2_vc); + else + set_idt_entry(X86_TRAP_VC, NULL); #endif load_boot_idt(&boot_idt_desc); diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c index 09dc8c187b3c..c3e343bd4760 100644 --- a/arch/x86/boot/compressed/sev.c +++ b/arch/x86/boot/compressed/sev.c @@ -405,12 +405,45 @@ void sev_enable(struct boot_params *bp) bp->cc_blob_address = 0; /* + * Do an initial SEV capability check before snp_init() which + * loads the CPUID page and the same checks afterwards are done + * without the hypervisor and are trustworthy. + * + * If the HV fakes SEV support, the guest will crash'n'burn + * which is good enough. + */ + + /* Check for the SME/SEV support leaf */ + eax = 0x80000000; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + if (eax < 0x8000001f) + return; + + /* + * Check for the SME/SEV feature: + * CPUID Fn8000_001F[EAX] + * - Bit 0 - Secure Memory Encryption support + * - Bit 1 - Secure Encrypted Virtualization support + * CPUID Fn8000_001F[EBX] + * - Bits 5:0 - Pagetable bit position used to indicate encryption + */ + eax = 0x8000001f; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + /* Check whether SEV is supported */ + if (!(eax & BIT(1))) + return; + + /* * Setup/preliminary detection of SNP. This will be sanity-checked * against CPUID/MSR values later. */ snp = snp_init(bp); - /* Check for the SME/SEV support leaf */ + /* Now repeat the checks with the SNP CPUID table. */ + + /* Recheck the SME/SEV support leaf */ eax = 0x80000000; ecx = 0; native_cpuid(&eax, &ebx, &ecx, &edx); @@ -418,7 +451,7 @@ void sev_enable(struct boot_params *bp) return; /* - * Check for the SME/SEV feature: + * Recheck for the SME/SEV feature: * CPUID Fn8000_001F[EAX] * - Bit 0 - Secure Memory Encryption support * - Bit 1 - Secure Encrypted Virtualization support diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 11a5c68d1218..7645730dc228 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -299,8 +299,8 @@ static unsigned long vdso_addr(unsigned long start, unsigned len) /* Round the lowest possible end address up to a PMD boundary. */ end = (start + len + PMD_SIZE - 1) & PMD_MASK; - if (end >= TASK_SIZE_MAX) - end = TASK_SIZE_MAX; + if (end >= DEFAULT_MAP_WINDOW) + end = DEFAULT_MAP_WINDOW; end -= len; if (end > start) { diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c index 1fbda2f94184..b21335e6a210 100644 --- a/arch/x86/hyperv/hv_apic.c +++ b/arch/x86/hyperv/hv_apic.c @@ -107,7 +107,6 @@ static bool cpu_is_self(int cpu) static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector, bool exclude_self) { - struct hv_send_ipi_ex **arg; struct hv_send_ipi_ex *ipi_arg; unsigned long flags; int nr_bank = 0; @@ -117,9 +116,8 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector, return false; local_irq_save(flags); - arg = (struct hv_send_ipi_ex **)this_cpu_ptr(hyperv_pcpu_input_arg); + ipi_arg = *this_cpu_ptr(hyperv_pcpu_input_arg); - ipi_arg = *arg; if (unlikely(!ipi_arg)) goto ipi_mask_ex_done; diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index 6c04b52f139b..953e280c07c3 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -14,6 +14,7 @@ #include <asm/apic.h> #include <asm/desc.h> #include <asm/sev.h> +#include <asm/ibt.h> #include <asm/hypervisor.h> #include <asm/hyperv-tlfs.h> #include <asm/mshyperv.h> @@ -472,6 +473,26 @@ void __init hyperv_init(void) } /* + * Some versions of Hyper-V that provide IBT in guest VMs have a bug + * in that there's no ENDBR64 instruction at the entry to the + * hypercall page. Because hypercalls are invoked via an indirect call + * to the hypercall page, all hypercall attempts fail when IBT is + * enabled, and Linux panics. For such buggy versions, disable IBT. + * + * Fixed versions of Hyper-V always provide ENDBR64 on the hypercall + * page, so if future Linux kernel versions enable IBT for 32-bit + * builds, additional hypercall page hackery will be required here + * to provide an ENDBR32. + */ +#ifdef CONFIG_X86_KERNEL_IBT + if (cpu_feature_enabled(X86_FEATURE_IBT) && + *(u32 *)hv_hypercall_pg != gen_endbr()) { + setup_clear_cpu_cap(X86_FEATURE_IBT); + pr_warn("Hyper-V: Disabling IBT because of Hyper-V bug\n"); + } +#endif + + /* * hyperv_init() is called before LAPIC is initialized: see * apic_intr_mode_init() -> x86_platform.apic_post_init() and * apic_bsp_setup() -> setup_local_APIC(). The direct-mode STIMER diff --git a/arch/x86/hyperv/hv_vtl.c b/arch/x86/hyperv/hv_vtl.c index 85d38b9f3586..db5d2ea39fc0 100644 --- a/arch/x86/hyperv/hv_vtl.c +++ b/arch/x86/hyperv/hv_vtl.c @@ -25,6 +25,10 @@ void __init hv_vtl_init_platform(void) x86_init.irqs.pre_vector_init = x86_init_noop; x86_init.timers.timer_init = x86_init_noop; + /* Avoid searching for BIOS MP tables */ + x86_init.mpparse.find_smp_config = x86_init_noop; + x86_init.mpparse.get_smp_config = x86_init_uint_noop; + x86_platform.get_wallclock = get_rtc_noop; x86_platform.set_wallclock = set_rtc_noop; x86_platform.get_nmi_reason = hv_get_nmi_reason; diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c index 14f46ad2ca64..28be6df88063 100644 --- a/arch/x86/hyperv/ivm.c +++ b/arch/x86/hyperv/ivm.c @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(hv_ghcb_msr_read); static int hv_mark_gpa_visibility(u16 count, const u64 pfn[], enum hv_mem_host_visibility visibility) { - struct hv_gpa_range_for_visibility **input_pcpu, *input; + struct hv_gpa_range_for_visibility *input; u16 pages_processed; u64 hv_status; unsigned long flags; @@ -263,9 +263,8 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[], } local_irq_save(flags); - input_pcpu = (struct hv_gpa_range_for_visibility **) - this_cpu_ptr(hyperv_pcpu_input_arg); - input = *input_pcpu; + input = *this_cpu_ptr(hyperv_pcpu_input_arg); + if (unlikely(!input)) { local_irq_restore(flags); return -EINVAL; diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c index 8460bd35e10c..1cc113200ff5 100644 --- a/arch/x86/hyperv/mmu.c +++ b/arch/x86/hyperv/mmu.c @@ -61,7 +61,6 @@ static void hyperv_flush_tlb_multi(const struct cpumask *cpus, const struct flush_tlb_info *info) { int cpu, vcpu, gva_n, max_gvas; - struct hv_tlb_flush **flush_pcpu; struct hv_tlb_flush *flush; u64 status; unsigned long flags; @@ -74,10 +73,7 @@ static void hyperv_flush_tlb_multi(const struct cpumask *cpus, local_irq_save(flags); - flush_pcpu = (struct hv_tlb_flush **) - this_cpu_ptr(hyperv_pcpu_input_arg); - - flush = *flush_pcpu; + flush = *this_cpu_ptr(hyperv_pcpu_input_arg); if (unlikely(!flush)) { local_irq_restore(flags); @@ -178,17 +174,13 @@ static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus, const struct flush_tlb_info *info) { int nr_bank = 0, max_gvas, gva_n; - struct hv_tlb_flush_ex **flush_pcpu; struct hv_tlb_flush_ex *flush; u64 status; if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) return HV_STATUS_INVALID_PARAMETER; - flush_pcpu = (struct hv_tlb_flush_ex **) - this_cpu_ptr(hyperv_pcpu_input_arg); - - flush = *flush_pcpu; + flush = *this_cpu_ptr(hyperv_pcpu_input_arg); if (info->mm) { /* diff --git a/arch/x86/hyperv/nested.c b/arch/x86/hyperv/nested.c index 5d70968c8538..9dc259fa322e 100644 --- a/arch/x86/hyperv/nested.c +++ b/arch/x86/hyperv/nested.c @@ -19,7 +19,6 @@ int hyperv_flush_guest_mapping(u64 as) { - struct hv_guest_mapping_flush **flush_pcpu; struct hv_guest_mapping_flush *flush; u64 status; unsigned long flags; @@ -30,10 +29,7 @@ int hyperv_flush_guest_mapping(u64 as) local_irq_save(flags); - flush_pcpu = (struct hv_guest_mapping_flush **) - this_cpu_ptr(hyperv_pcpu_input_arg); - - flush = *flush_pcpu; + flush = *this_cpu_ptr(hyperv_pcpu_input_arg); if (unlikely(!flush)) { local_irq_restore(flags); @@ -90,7 +86,6 @@ EXPORT_SYMBOL_GPL(hyperv_fill_flush_guest_mapping_list); int hyperv_flush_guest_mapping_range(u64 as, hyperv_fill_flush_list_func fill_flush_list_func, void *data) { - struct hv_guest_mapping_flush_list **flush_pcpu; struct hv_guest_mapping_flush_list *flush; u64 status; unsigned long flags; @@ -102,10 +97,8 @@ int hyperv_flush_guest_mapping_range(u64 as, local_irq_save(flags); - flush_pcpu = (struct hv_guest_mapping_flush_list **) - this_cpu_ptr(hyperv_pcpu_input_arg); + flush = *this_cpu_ptr(hyperv_pcpu_input_arg); - flush = *flush_pcpu; if (unlikely(!flush)) { local_irq_restore(flags); goto fault; diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 8eb74cf386db..2888c0ee4df0 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -15,6 +15,7 @@ #include <asm/mpspec.h> #include <asm/x86_init.h> #include <asm/cpufeature.h> +#include <asm/irq_vectors.h> #ifdef CONFIG_ACPI_APEI # include <asm/pgtable_types.h> @@ -31,6 +32,7 @@ extern int acpi_skip_timer_override; extern int acpi_use_timer_override; extern int acpi_fix_pin2_polarity; extern int acpi_disable_cmcff; +extern bool acpi_int_src_ovr[NR_IRQS_LEGACY]; extern u8 acpi_sci_flags; extern u32 acpi_sci_override_gsi; diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index cb8ca46213be..b69b0d7756aa 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -14,7 +14,7 @@ * Defines x86 CPU feature bits */ #define NCAPINTS 21 /* N 32-bit words worth of info */ -#define NBUGINTS 1 /* N 32-bit bug flags */ +#define NBUGINTS 2 /* N 32-bit bug flags */ /* * Note: If the comment begins with a quoted string, that string is used @@ -309,6 +309,10 @@ #define X86_FEATURE_SMBA (11*32+21) /* "" Slow Memory Bandwidth Allocation */ #define X86_FEATURE_BMEC (11*32+22) /* "" Bandwidth Monitoring Event Configuration */ +#define X86_FEATURE_SRSO (11*32+24) /* "" AMD BTB untrain RETs */ +#define X86_FEATURE_SRSO_ALIAS (11*32+25) /* "" AMD BTB untrain RETs through aliasing */ +#define X86_FEATURE_IBPB_ON_VMEXIT (11*32+26) /* "" Issue an IBPB only on VMEXIT */ + /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ @@ -442,6 +446,10 @@ #define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */ #define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* "" SMM_CTL MSR is not present */ +#define X86_FEATURE_SBPB (20*32+27) /* "" Selective Branch Prediction Barrier */ +#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */ +#define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */ + /* * BUG word(s) */ @@ -483,5 +491,9 @@ #define X86_BUG_RETBLEED X86_BUG(27) /* CPU is affected by RETBleed */ #define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */ #define X86_BUG_SMT_RSB X86_BUG(29) /* CPU is vulnerable to Cross-Thread Return Address Predictions */ +#define X86_BUG_GDS X86_BUG(30) /* CPU is affected by Gather Data Sampling */ +/* BUG word 2 */ +#define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */ +#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h index 0953aa32a324..97a3de7892d3 100644 --- a/arch/x86/include/asm/linkage.h +++ b/arch/x86/include/asm/linkage.h @@ -21,7 +21,7 @@ #define FUNCTION_PADDING #endif -#if (CONFIG_FUNCTION_ALIGNMENT > 8) && !defined(__DISABLE_EXPORTS) && !defined(BULID_VDSO) +#if (CONFIG_FUNCTION_ALIGNMENT > 8) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO) # define __FUNC_ALIGN __ALIGN; FUNCTION_PADDING #else # define __FUNC_ALIGN __ALIGN diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 88d9ef98e087..fa83d88e4c99 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -5,7 +5,7 @@ #include <linux/types.h> #include <linux/nmi.h> #include <linux/msi.h> -#include <asm/io.h> +#include <linux/io.h> #include <asm/hyperv-tlfs.h> #include <asm/nospec-branch.h> #include <asm/paravirt.h> diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index a00a53e15ab7..1d111350197f 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -57,6 +57,7 @@ #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ #define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */ +#define PRED_CMD_SBPB BIT(7) /* Selective Branch Prediction Barrier */ #define MSR_PPIN_CTL 0x0000004e #define MSR_PPIN 0x0000004f @@ -155,6 +156,15 @@ * Not susceptible to Post-Barrier * Return Stack Buffer Predictions. */ +#define ARCH_CAP_GDS_CTRL BIT(25) /* + * CPU is vulnerable to Gather + * Data Sampling (GDS) and + * has controls for mitigation. + */ +#define ARCH_CAP_GDS_NO BIT(26) /* + * CPU is not vulnerable to Gather + * Data Sampling (GDS). + */ #define ARCH_CAP_XAPIC_DISABLE BIT(21) /* * IA32_XAPIC_DISABLE_STATUS MSR @@ -178,6 +188,8 @@ #define RNGDS_MITG_DIS BIT(0) /* SRBDS support */ #define RTM_ALLOW BIT(1) /* TSX development mode */ #define FB_CLEAR_DIS BIT(3) /* CPU Fill buffer clear disable */ +#define GDS_MITG_DIS BIT(4) /* Disable GDS mitigation */ +#define GDS_MITG_LOCKED BIT(5) /* GDS mitigation locked */ #define MSR_IA32_SYSENTER_CS 0x00000174 #define MSR_IA32_SYSENTER_ESP 0x00000175 diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 1a65cf4acb2b..3faf044569a5 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -211,7 +211,8 @@ * eventually turn into it's own annotation. */ .macro VALIDATE_UNRET_END -#if defined(CONFIG_NOINSTR_VALIDATION) && defined(CONFIG_CPU_UNRET_ENTRY) +#if defined(CONFIG_NOINSTR_VALIDATION) && \ + (defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)) ANNOTATE_RETPOLINE_SAFE nop #endif @@ -289,13 +290,18 @@ */ .macro UNTRAIN_RET #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \ - defined(CONFIG_CALL_DEPTH_TRACKING) + defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO) VALIDATE_UNRET_END ALTERNATIVE_3 "", \ CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ __stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH #endif + +#ifdef CONFIG_CPU_SRSO + ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \ + "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS +#endif .endm .macro UNTRAIN_RET_FROM_CALL @@ -307,6 +313,11 @@ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ __stringify(RESET_CALL_DEPTH_FROM_CALL), X86_FEATURE_CALL_DEPTH #endif + +#ifdef CONFIG_CPU_SRSO + ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \ + "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS +#endif .endm @@ -332,6 +343,8 @@ extern retpoline_thunk_t __x86_indirect_jump_thunk_array[]; extern void __x86_return_thunk(void); extern void zen_untrain_ret(void); +extern void srso_untrain_ret(void); +extern void srso_untrain_ret_alias(void); extern void entry_ibpb(void); #ifdef CONFIG_CALL_THUNKS @@ -479,11 +492,11 @@ void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature) : "memory"); } +extern u64 x86_pred_cmd; + static inline void indirect_branch_prediction_barrier(void) { - u64 val = PRED_CMD_IBPB; - - alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB); + alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_USE_IBPB); } /* The Intel SPEC CTRL MSR base value cache */ diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index d46300e94f85..fd750247ca89 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -586,7 +586,6 @@ extern char ignore_fpu_irq; #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 #define ARCH_HAS_PREFETCHW -#define ARCH_HAS_SPINLOCK_PREFETCH #ifdef CONFIG_X86_32 # define BASE_PREFETCH "" @@ -620,11 +619,6 @@ static __always_inline void prefetchw(const void *x) "m" (*(const char *)x)); } -static inline void spin_lock_prefetch(const void *x) -{ - prefetchw(x); -} - #define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \ TOP_OF_KERNEL_STACK_PADDING) @@ -682,9 +676,13 @@ extern u16 get_llc_id(unsigned int cpu); #ifdef CONFIG_CPU_SUP_AMD extern u32 amd_get_nodes_per_socket(void); extern u32 amd_get_highest_perf(void); +extern bool cpu_has_ibpb_brtype_microcode(void); +extern void amd_clear_divider(void); #else static inline u32 amd_get_nodes_per_socket(void) { return 0; } static inline u32 amd_get_highest_perf(void) { return 0; } +static inline bool cpu_has_ibpb_brtype_microcode(void) { return false; } +static inline void amd_clear_divider(void) { } #endif extern unsigned long arch_align_stack(unsigned long sp); @@ -727,4 +725,6 @@ bool arch_is_platform_page(u64 paddr); #define arch_is_platform_page arch_is_platform_page #endif +extern bool gds_ucode_mitigated(void); + #endif /* _ASM_X86_PROCESSOR_H */ diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index 794f69625780..9d6411c65920 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h @@ -56,7 +56,7 @@ #define GDT_ENTRY_INVALID_SEG 0 -#ifdef CONFIG_X86_32 +#if defined(CONFIG_X86_32) && !defined(BUILD_VDSO32_64) /* * The layout of the per-CPU GDT under Linux: * diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 21b542a6866c..53369c57751e 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -52,6 +52,7 @@ int acpi_lapic; int acpi_ioapic; int acpi_strict; int acpi_disable_cmcff; +bool acpi_int_src_ovr[NR_IRQS_LEGACY]; /* ACPI SCI override configuration */ u8 acpi_sci_flags __initdata; @@ -588,6 +589,9 @@ acpi_parse_int_src_ovr(union acpi_subtable_headers * header, acpi_table_print_madt_entry(&header->common); + if (intsrc->source_irq < NR_IRQS_LEGACY) + acpi_int_src_ovr[intsrc->source_irq] = true; + if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) { acpi_sci_ioapic_setup(intsrc->source_irq, intsrc->inti_flags & ACPI_MADT_POLARITY_MASK, diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 26ad7ca423e7..70f9d56f9305 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -73,8 +73,13 @@ static const int amd_erratum_1054[] = static const int amd_zenbleed[] = AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf), AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf), + AMD_MODEL_RANGE(0x17, 0x90, 0x0, 0x91, 0xf), AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf)); +static const int amd_div0[] = + AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf), + AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf)); + static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) { int osvw_id = *erratum++; @@ -1130,6 +1135,11 @@ static void init_amd(struct cpuinfo_x86 *c) WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS)); zenbleed_check(c); + + if (cpu_has_amd_erratum(c, amd_div0)) { + pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n"); + setup_force_cpu_bug(X86_BUG_DIV0); + } } #ifdef CONFIG_X86_32 @@ -1290,3 +1300,32 @@ void amd_check_microcode(void) { on_each_cpu(zenbleed_check_cpu, NULL, 1); } + +bool cpu_has_ibpb_brtype_microcode(void) +{ + switch (boot_cpu_data.x86) { + /* Zen1/2 IBPB flushes branch type predictions too. */ + case 0x17: + return boot_cpu_has(X86_FEATURE_AMD_IBPB); + case 0x19: + /* Poke the MSR bit on Zen3/4 to check its presence. */ + if (!wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) { + setup_force_cpu_cap(X86_FEATURE_SBPB); + return true; + } else { + return false; + } + default: + return false; + } +} + +/* + * Issue a DIV 0/1 insn to clear any division data from previous DIV + * operations. + */ +void noinstr amd_clear_divider(void) +{ + asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) + :: "a" (0), "d" (0), "r" (1)); +} diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 95507448e781..d02f73c5339d 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -47,6 +47,8 @@ static void __init taa_select_mitigation(void); static void __init mmio_select_mitigation(void); static void __init srbds_select_mitigation(void); static void __init l1d_flush_select_mitigation(void); +static void __init srso_select_mitigation(void); +static void __init gds_select_mitigation(void); /* The base value of the SPEC_CTRL MSR without task-specific bits set */ u64 x86_spec_ctrl_base; @@ -56,6 +58,9 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); DEFINE_PER_CPU(u64, x86_spec_ctrl_current); EXPORT_SYMBOL_GPL(x86_spec_ctrl_current); +u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB; +EXPORT_SYMBOL_GPL(x86_pred_cmd); + static DEFINE_MUTEX(spec_ctrl_mutex); /* Update SPEC_CTRL MSR and its cached copy unconditionally */ @@ -160,6 +165,8 @@ void __init cpu_select_mitigations(void) md_clear_select_mitigation(); srbds_select_mitigation(); l1d_flush_select_mitigation(); + srso_select_mitigation(); + gds_select_mitigation(); } /* @@ -646,6 +653,149 @@ static int __init l1d_flush_parse_cmdline(char *str) early_param("l1d_flush", l1d_flush_parse_cmdline); #undef pr_fmt +#define pr_fmt(fmt) "GDS: " fmt + +enum gds_mitigations { + GDS_MITIGATION_OFF, + GDS_MITIGATION_UCODE_NEEDED, + GDS_MITIGATION_FORCE, + GDS_MITIGATION_FULL, + GDS_MITIGATION_FULL_LOCKED, + GDS_MITIGATION_HYPERVISOR, +}; + +#if IS_ENABLED(CONFIG_GDS_FORCE_MITIGATION) +static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FORCE; +#else +static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FULL; +#endif + +static const char * const gds_strings[] = { + [GDS_MITIGATION_OFF] = "Vulnerable", + [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", + [GDS_MITIGATION_FORCE] = "Mitigation: AVX disabled, no microcode", + [GDS_MITIGATION_FULL] = "Mitigation: Microcode", + [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)", + [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", +}; + +bool gds_ucode_mitigated(void) +{ + return (gds_mitigation == GDS_MITIGATION_FULL || + gds_mitigation == GDS_MITIGATION_FULL_LOCKED); +} +EXPORT_SYMBOL_GPL(gds_ucode_mitigated); + +void update_gds_msr(void) +{ + u64 mcu_ctrl_after; + u64 mcu_ctrl; + + switch (gds_mitigation) { + case GDS_MITIGATION_OFF: + rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + mcu_ctrl |= GDS_MITG_DIS; + break; + case GDS_MITIGATION_FULL_LOCKED: + /* + * The LOCKED state comes from the boot CPU. APs might not have + * the same state. Make sure the mitigation is enabled on all + * CPUs. + */ + case GDS_MITIGATION_FULL: + rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + mcu_ctrl &= ~GDS_MITG_DIS; + break; + case GDS_MITIGATION_FORCE: + case GDS_MITIGATION_UCODE_NEEDED: + case GDS_MITIGATION_HYPERVISOR: + return; + }; + + wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + + /* + * Check to make sure that the WRMSR value was not ignored. Writes to + * GDS_MITG_DIS will be ignored if this processor is locked but the boot + * processor was not. + */ + rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after); + WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after); +} + +static void __init gds_select_mitigation(void) +{ + u64 mcu_ctrl; + + if (!boot_cpu_has_bug(X86_BUG_GDS)) + return; + + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { + gds_mitigation = GDS_MITIGATION_HYPERVISOR; + goto out; + } + + if (cpu_mitigations_off()) + gds_mitigation = GDS_MITIGATION_OFF; + /* Will verify below that mitigation _can_ be disabled */ + + /* No microcode */ + if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) { + if (gds_mitigation == GDS_MITIGATION_FORCE) { + /* + * This only needs to be done on the boot CPU so do it + * here rather than in update_gds_msr() + */ + setup_clear_cpu_cap(X86_FEATURE_AVX); + pr_warn("Microcode update needed! Disabling AVX as mitigation.\n"); + } else { + gds_mitigation = GDS_MITIGATION_UCODE_NEEDED; + } + goto out; + } + + /* Microcode has mitigation, use it */ + if (gds_mitigation == GDS_MITIGATION_FORCE) + gds_mitigation = GDS_MITIGATION_FULL; + + rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + if (mcu_ctrl & GDS_MITG_LOCKED) { + if (gds_mitigation == GDS_MITIGATION_OFF) + pr_warn("Mitigation locked. Disable failed.\n"); + + /* + * The mitigation is selected from the boot CPU. All other CPUs + * _should_ have the same state. If the boot CPU isn't locked + * but others are then update_gds_msr() will WARN() of the state + * mismatch. If the boot CPU is locked update_gds_msr() will + * ensure the other CPUs have the mitigation enabled. + */ + gds_mitigation = GDS_MITIGATION_FULL_LOCKED; + } + + update_gds_msr(); +out: + pr_info("%s\n", gds_strings[gds_mitigation]); +} + +static int __init gds_parse_cmdline(char *str) +{ + if (!str) + return -EINVAL; + + if (!boot_cpu_has_bug(X86_BUG_GDS)) + return 0; + + if (!strcmp(str, "off")) + gds_mitigation = GDS_MITIGATION_OFF; + else if (!strcmp(str, "force")) + gds_mitigation = GDS_MITIGATION_FORCE; + + return 0; +} +early_param("gather_data_sampling", gds_parse_cmdline); + +#undef pr_fmt #define pr_fmt(fmt) "Spectre V1 : " fmt enum spectre_v1_mitigation { @@ -2188,6 +2338,165 @@ static int __init l1tf_cmdline(char *str) early_param("l1tf", l1tf_cmdline); #undef pr_fmt +#define pr_fmt(fmt) "Speculative Return Stack Overflow: " fmt + +enum srso_mitigation { + SRSO_MITIGATION_NONE, + SRSO_MITIGATION_MICROCODE, + SRSO_MITIGATION_SAFE_RET, + SRSO_MITIGATION_IBPB, + SRSO_MITIGATION_IBPB_ON_VMEXIT, +}; + +enum srso_mitigation_cmd { + SRSO_CMD_OFF, + SRSO_CMD_MICROCODE, + SRSO_CMD_SAFE_RET, + SRSO_CMD_IBPB, + SRSO_CMD_IBPB_ON_VMEXIT, +}; + +static const char * const srso_strings[] = { + [SRSO_MITIGATION_NONE] = "Vulnerable", + [SRSO_MITIGATION_MICROCODE] = "Mitigation: microcode", + [SRSO_MITIGATION_SAFE_RET] = "Mitigation: safe RET", + [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB", + [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only" +}; + +static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE; +static enum srso_mitigation_cmd srso_cmd __ro_after_init = SRSO_CMD_SAFE_RET; + +static int __init srso_parse_cmdline(char *str) +{ + if (!str) + return -EINVAL; + + if (!strcmp(str, "off")) + srso_cmd = SRSO_CMD_OFF; + else if (!strcmp(str, "microcode")) + srso_cmd = SRSO_CMD_MICROCODE; + else if (!strcmp(str, "safe-ret")) + srso_cmd = SRSO_CMD_SAFE_RET; + else if (!strcmp(str, "ibpb")) + srso_cmd = SRSO_CMD_IBPB; + else if (!strcmp(str, "ibpb-vmexit")) + srso_cmd = SRSO_CMD_IBPB_ON_VMEXIT; + else + pr_err("Ignoring unknown SRSO option (%s).", str); + + return 0; +} +early_param("spec_rstack_overflow", srso_parse_cmdline); + +#define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options." + +static void __init srso_select_mitigation(void) +{ + bool has_microcode; + + if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off()) + goto pred_cmd; + + /* + * The first check is for the kernel running as a guest in order + * for guests to verify whether IBPB is a viable mitigation. + */ + has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) || cpu_has_ibpb_brtype_microcode(); + if (!has_microcode) { + pr_warn("IBPB-extending microcode not applied!\n"); + pr_warn(SRSO_NOTICE); + } else { + /* + * Enable the synthetic (even if in a real CPUID leaf) + * flags for guests. + */ + setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE); + + /* + * Zen1/2 with SMT off aren't vulnerable after the right + * IBPB microcode has been applied. + */ + if ((boot_cpu_data.x86 < 0x19) && + (!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED))) + setup_force_cpu_cap(X86_FEATURE_SRSO_NO); + } + + if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { + if (has_microcode) { + pr_err("Retbleed IBPB mitigation enabled, using same for SRSO\n"); + srso_mitigation = SRSO_MITIGATION_IBPB; + goto pred_cmd; + } + } + + switch (srso_cmd) { + case SRSO_CMD_OFF: + return; + + case SRSO_CMD_MICROCODE: + if (has_microcode) { + srso_mitigation = SRSO_MITIGATION_MICROCODE; + pr_warn(SRSO_NOTICE); + } + break; + + case SRSO_CMD_SAFE_RET: + if (IS_ENABLED(CONFIG_CPU_SRSO)) { + /* + * Enable the return thunk for generated code + * like ftrace, static_call, etc. + */ + setup_force_cpu_cap(X86_FEATURE_RETHUNK); + + if (boot_cpu_data.x86 == 0x19) + setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); + else + setup_force_cpu_cap(X86_FEATURE_SRSO); + srso_mitigation = SRSO_MITIGATION_SAFE_RET; + } else { + pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); + goto pred_cmd; + } + break; + + case SRSO_CMD_IBPB: + if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) { + if (has_microcode) { + setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); + srso_mitigation = SRSO_MITIGATION_IBPB; + } + } else { + pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n"); + goto pred_cmd; + } + break; + + case SRSO_CMD_IBPB_ON_VMEXIT: + if (IS_ENABLED(CONFIG_CPU_SRSO)) { + if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) { + setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); + srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; + } + } else { + pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); + goto pred_cmd; + } + break; + + default: + break; + } + + pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode")); + +pred_cmd: + if ((boot_cpu_has(X86_FEATURE_SRSO_NO) || srso_cmd == SRSO_CMD_OFF) && + boot_cpu_has(X86_FEATURE_SBPB)) + x86_pred_cmd = PRED_CMD_SBPB; +} + +#undef pr_fmt #define pr_fmt(fmt) fmt #ifdef CONFIG_SYSFS @@ -2385,6 +2694,18 @@ static ssize_t retbleed_show_state(char *buf) return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]); } +static ssize_t srso_show_state(char *buf) +{ + return sysfs_emit(buf, "%s%s\n", + srso_strings[srso_mitigation], + (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode")); +} + +static ssize_t gds_show_state(char *buf) +{ + return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]); +} + static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, char *buf, unsigned int bug) { @@ -2434,6 +2755,12 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr case X86_BUG_RETBLEED: return retbleed_show_state(buf); + case X86_BUG_SRSO: + return srso_show_state(buf); + + case X86_BUG_GDS: + return gds_show_state(buf); + default: break; } @@ -2498,4 +2825,14 @@ ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, cha { return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED); } + +ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_SRSO); +} + +ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_GDS); +} #endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 0ba1067f4e5f..e3a65e9fc750 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1250,6 +1250,10 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { #define RETBLEED BIT(3) /* CPU is affected by SMT (cross-thread) return predictions */ #define SMT_RSB BIT(4) +/* CPU is affected by SRSO */ +#define SRSO BIT(5) +/* CPU is affected by GDS */ +#define GDS BIT(6) static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), @@ -1262,27 +1266,30 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO), VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS), VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED), - VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED), + VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS), VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED), - VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED), - VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED), + VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED | GDS), + VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED | GDS), VULNBL_INTEL_STEPPINGS(CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED), - VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), - VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO), - VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO), - VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), + VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), + VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS), + VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS), + VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED), - VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), + VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), + VULNBL_INTEL_STEPPINGS(TIGERLAKE_L, X86_STEPPING_ANY, GDS), + VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS), VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), - VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED), + VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS), VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS), VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO), VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS), VULNBL_AMD(0x15, RETBLEED), VULNBL_AMD(0x16, RETBLEED), - VULNBL_AMD(0x17, RETBLEED | SMT_RSB), + VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO), VULNBL_HYGON(0x18, RETBLEED | SMT_RSB), + VULNBL_AMD(0x19, SRSO), {} }; @@ -1406,6 +1413,21 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) if (cpu_matches(cpu_vuln_blacklist, SMT_RSB)) setup_force_cpu_bug(X86_BUG_SMT_RSB); + if (!cpu_has(c, X86_FEATURE_SRSO_NO)) { + if (cpu_matches(cpu_vuln_blacklist, SRSO)) + setup_force_cpu_bug(X86_BUG_SRSO); + } + + /* + * Check if CPU is vulnerable to GDS. If running in a virtual machine on + * an affected processor, the VMM may have disabled the use of GATHER by + * disabling AVX2. The only way to do this in HW is to clear XCR0[2], + * which means that AVX will be disabled. + */ + if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) && + boot_cpu_has(X86_FEATURE_AVX)) + setup_force_cpu_bug(X86_BUG_GDS); + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) return; @@ -1962,6 +1984,8 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c) validate_apic_and_package_id(c); x86_spec_ctrl_setup_ap(); update_srbds_msr(); + if (boot_cpu_has_bug(X86_BUG_GDS)) + update_gds_msr(); tsx_ap_init(); } diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index 1c44630d4789..1dcd7d4e38ef 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h @@ -83,6 +83,7 @@ void cpu_select_mitigations(void); extern void x86_spec_ctrl_setup_ap(void); extern void update_srbds_msr(void); +extern void update_gds_msr(void); extern enum spectre_v2_mitigation spectre_v2_enabled; diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 4a817d20ce3b..1885326a8f65 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -206,6 +206,8 @@ DEFINE_IDTENTRY(exc_divide_error) { do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE, FPE_INTDIV, error_get_trap_addr(regs)); + + amd_clear_divider(); } DEFINE_IDTENTRY(exc_overflow) diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 03c885d3640f..ef06211bae4c 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -134,13 +134,27 @@ SECTIONS SOFTIRQENTRY_TEXT #ifdef CONFIG_RETPOLINE __indirect_thunk_start = .; - *(.text.__x86.*) + *(.text.__x86.indirect_thunk) + *(.text.__x86.return_thunk) __indirect_thunk_end = .; #endif STATIC_CALL_TEXT ALIGN_ENTRY_TEXT_BEGIN +#ifdef CONFIG_CPU_SRSO + *(.text.__x86.rethunk_untrain) +#endif + ENTRY_TEXT + +#ifdef CONFIG_CPU_SRSO + /* + * See the comment above srso_untrain_ret_alias()'s + * definition. + */ + . = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); + *(.text.__x86.rethunk_safe) +#endif ALIGN_ENTRY_TEXT_END *(.gnu.warning) @@ -509,7 +523,24 @@ INIT_PER_CPU(irq_stack_backing_store); #endif #ifdef CONFIG_RETHUNK -. = ASSERT((__x86_return_thunk & 0x3f) == 0, "__x86_return_thunk not cacheline-aligned"); +. = ASSERT((__ret & 0x3f) == 0, "__ret not cacheline-aligned"); +. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned"); +#endif + +#ifdef CONFIG_CPU_SRSO +/* + * GNU ld cannot do XOR until 2.41. + * https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1 + * + * LLVM lld cannot do XOR until lld-17. + * https://github.com/llvm/llvm-project/commit/fae96104d4378166cbe5c875ef8ed808a356f3fb + * + * Instead do: (A | B) - (A & B) in order to compute the XOR + * of the two function addresses: + */ +. = ASSERT(((ABSOLUTE(srso_untrain_ret_alias) | srso_safe_ret_alias) - + (ABSOLUTE(srso_untrain_ret_alias) & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), + "SRSO function pair won't alias"); #endif #endif /* CONFIG_X86_64 */ diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 7f4d13383cf2..d3432687c9e6 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -729,6 +729,9 @@ void kvm_set_cpu_caps(void) F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */ ); + if (cpu_feature_enabled(X86_FEATURE_SRSO_NO)) + kvm_cpu_cap_set(X86_FEATURE_SRSO_NO); + kvm_cpu_cap_init_kvm_defined(CPUID_8000_0022_EAX, F(PERFMON_V2) ); diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 07756b7348ae..d3aec1f2cad2 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2417,15 +2417,18 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm) */ memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); - vcpu->arch.regs[VCPU_REGS_RAX] = ghcb_get_rax_if_valid(ghcb); - vcpu->arch.regs[VCPU_REGS_RBX] = ghcb_get_rbx_if_valid(ghcb); - vcpu->arch.regs[VCPU_REGS_RCX] = ghcb_get_rcx_if_valid(ghcb); - vcpu->arch.regs[VCPU_REGS_RDX] = ghcb_get_rdx_if_valid(ghcb); - vcpu->arch.regs[VCPU_REGS_RSI] = ghcb_get_rsi_if_valid(ghcb); + BUILD_BUG_ON(sizeof(svm->sev_es.valid_bitmap) != sizeof(ghcb->save.valid_bitmap)); + memcpy(&svm->sev_es.valid_bitmap, &ghcb->save.valid_bitmap, sizeof(ghcb->save.valid_bitmap)); - svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb); + vcpu->arch.regs[VCPU_REGS_RAX] = kvm_ghcb_get_rax_if_valid(svm, ghcb); + vcpu->arch.regs[VCPU_REGS_RBX] = kvm_ghcb_get_rbx_if_valid(svm, ghcb); + vcpu->arch.regs[VCPU_REGS_RCX] = kvm_ghcb_get_rcx_if_valid(svm, ghcb); + vcpu->arch.regs[VCPU_REGS_RDX] = kvm_ghcb_get_rdx_if_valid(svm, ghcb); + vcpu->arch.regs[VCPU_REGS_RSI] = kvm_ghcb_get_rsi_if_valid(svm, ghcb); - if (ghcb_xcr0_is_valid(ghcb)) { + svm->vmcb->save.cpl = kvm_ghcb_get_cpl_if_valid(svm, ghcb); + + if (kvm_ghcb_xcr0_is_valid(svm)) { vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb); kvm_update_cpuid_runtime(vcpu); } @@ -2436,84 +2439,88 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm) control->exit_code_hi = upper_32_bits(exit_code); control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb); control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb); + svm->sev_es.sw_scratch = kvm_ghcb_get_sw_scratch_if_valid(svm, ghcb); /* Clear the valid entries fields */ memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); } +static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control) +{ + return (((u64)control->exit_code_hi) << 32) | control->exit_code; +} + static int sev_es_validate_vmgexit(struct vcpu_svm *svm) { - struct kvm_vcpu *vcpu; - struct ghcb *ghcb; + struct vmcb_control_area *control = &svm->vmcb->control; + struct kvm_vcpu *vcpu = &svm->vcpu; u64 exit_code; u64 reason; - ghcb = svm->sev_es.ghcb; - /* * Retrieve the exit code now even though it may not be marked valid * as it could help with debugging. */ - exit_code = ghcb_get_sw_exit_code(ghcb); + exit_code = kvm_ghcb_get_sw_exit_code(control); /* Only GHCB Usage code 0 is supported */ - if (ghcb->ghcb_usage) { + if (svm->sev_es.ghcb->ghcb_usage) { reason = GHCB_ERR_INVALID_USAGE; goto vmgexit_err; } reason = GHCB_ERR_MISSING_INPUT; - if (!ghcb_sw_exit_code_is_valid(ghcb) || - !ghcb_sw_exit_info_1_is_valid(ghcb) || - !ghcb_sw_exit_info_2_is_valid(ghcb)) + if (!kvm_ghcb_sw_exit_code_is_valid(svm) || + !kvm_ghcb_sw_exit_info_1_is_valid(svm) || + !kvm_ghcb_sw_exit_info_2_is_valid(svm)) goto vmgexit_err; - switch (ghcb_get_sw_exit_code(ghcb)) { + switch (exit_code) { case SVM_EXIT_READ_DR7: break; case SVM_EXIT_WRITE_DR7: - if (!ghcb_rax_is_valid(ghcb)) + if (!kvm_ghcb_rax_is_valid(svm)) goto vmgexit_err; break; case SVM_EXIT_RDTSC: break; case SVM_EXIT_RDPMC: - if (!ghcb_rcx_is_valid(ghcb)) + if (!kvm_ghcb_rcx_is_valid(svm)) goto vmgexit_err; break; case SVM_EXIT_CPUID: - if (!ghcb_rax_is_valid(ghcb) || - !ghcb_rcx_is_valid(ghcb)) + if (!kvm_ghcb_rax_is_valid(svm) || + !kvm_ghcb_rcx_is_valid(svm)) goto vmgexit_err; - if (ghcb_get_rax(ghcb) == 0xd) - if (!ghcb_xcr0_is_valid(ghcb)) + if (vcpu->arch.regs[VCPU_REGS_RAX] == 0xd) + if (!kvm_ghcb_xcr0_is_valid(svm)) goto vmgexit_err; break; case SVM_EXIT_INVD: break; case SVM_EXIT_IOIO: - if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) { - if (!ghcb_sw_scratch_is_valid(ghcb)) + if (control->exit_info_1 & SVM_IOIO_STR_MASK) { + if (!kvm_ghcb_sw_scratch_is_valid(svm)) goto vmgexit_err; } else { - if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK)) - if (!ghcb_rax_is_valid(ghcb)) + if (!(control->exit_info_1 & SVM_IOIO_TYPE_MASK)) + if (!kvm_ghcb_rax_is_valid(svm)) goto vmgexit_err; } break; case SVM_EXIT_MSR: - if (!ghcb_rcx_is_valid(ghcb)) + if (!kvm_ghcb_rcx_is_valid(svm)) goto vmgexit_err; - if (ghcb_get_sw_exit_info_1(ghcb)) { - if (!ghcb_rax_is_valid(ghcb) || - !ghcb_rdx_is_valid(ghcb)) + if (control->exit_info_1) { + if (!kvm_ghcb_rax_is_valid(svm) || + !kvm_ghcb_rdx_is_valid(svm)) goto vmgexit_err; } break; case SVM_EXIT_VMMCALL: - if (!ghcb_rax_is_valid(ghcb) || - !ghcb_cpl_is_valid(ghcb)) + if (!kvm_ghcb_rax_is_valid(svm) || + !kvm_ghcb_cpl_is_valid(svm)) goto vmgexit_err; break; case SVM_EXIT_RDTSCP: @@ -2521,19 +2528,19 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) case SVM_EXIT_WBINVD: break; case SVM_EXIT_MONITOR: - if (!ghcb_rax_is_valid(ghcb) || - !ghcb_rcx_is_valid(ghcb) || - !ghcb_rdx_is_valid(ghcb)) + if (!kvm_ghcb_rax_is_valid(svm) || + !kvm_ghcb_rcx_is_valid(svm) || + !kvm_ghcb_rdx_is_valid(svm)) goto vmgexit_err; break; case SVM_EXIT_MWAIT: - if (!ghcb_rax_is_valid(ghcb) || - !ghcb_rcx_is_valid(ghcb)) + if (!kvm_ghcb_rax_is_valid(svm) || + !kvm_ghcb_rcx_is_valid(svm)) goto vmgexit_err; break; case SVM_VMGEXIT_MMIO_READ: case SVM_VMGEXIT_MMIO_WRITE: - if (!ghcb_sw_scratch_is_valid(ghcb)) + if (!kvm_ghcb_sw_scratch_is_valid(svm)) goto vmgexit_err; break; case SVM_VMGEXIT_NMI_COMPLETE: @@ -2549,11 +2556,9 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) return 0; vmgexit_err: - vcpu = &svm->vcpu; - if (reason == GHCB_ERR_INVALID_USAGE) { vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n", - ghcb->ghcb_usage); + svm->sev_es.ghcb->ghcb_usage); } else if (reason == GHCB_ERR_INVALID_EVENT) { vcpu_unimpl(vcpu, "vmgexit: exit code %#llx is not valid\n", exit_code); @@ -2563,11 +2568,8 @@ vmgexit_err: dump_ghcb(svm); } - /* Clear the valid entries fields */ - memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); - - ghcb_set_sw_exit_info_1(ghcb, 2); - ghcb_set_sw_exit_info_2(ghcb, reason); + ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2); + ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, reason); /* Resume the guest to "return" the error code. */ return 1; @@ -2586,7 +2588,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm) */ if (svm->sev_es.ghcb_sa_sync) { kvm_write_guest(svm->vcpu.kvm, - ghcb_get_sw_scratch(svm->sev_es.ghcb), + svm->sev_es.sw_scratch, svm->sev_es.ghcb_sa, svm->sev_es.ghcb_sa_len); svm->sev_es.ghcb_sa_sync = false; @@ -2632,12 +2634,11 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu) static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) { struct vmcb_control_area *control = &svm->vmcb->control; - struct ghcb *ghcb = svm->sev_es.ghcb; u64 ghcb_scratch_beg, ghcb_scratch_end; u64 scratch_gpa_beg, scratch_gpa_end; void *scratch_va; - scratch_gpa_beg = ghcb_get_sw_scratch(ghcb); + scratch_gpa_beg = svm->sev_es.sw_scratch; if (!scratch_gpa_beg) { pr_err("vmgexit: scratch gpa not provided\n"); goto e_scratch; @@ -2708,8 +2709,8 @@ static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) return 0; e_scratch: - ghcb_set_sw_exit_info_1(ghcb, 2); - ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_SCRATCH_AREA); + ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2); + ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_SCRATCH_AREA); return 1; } @@ -2822,7 +2823,6 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) struct vcpu_svm *svm = to_svm(vcpu); struct vmcb_control_area *control = &svm->vmcb->control; u64 ghcb_gpa, exit_code; - struct ghcb *ghcb; int ret; /* Validate the GHCB */ @@ -2847,20 +2847,18 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) } svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva; - ghcb = svm->sev_es.ghcb_map.hva; - trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb); - - exit_code = ghcb_get_sw_exit_code(ghcb); + trace_kvm_vmgexit_enter(vcpu->vcpu_id, svm->sev_es.ghcb); + sev_es_sync_from_ghcb(svm); ret = sev_es_validate_vmgexit(svm); if (ret) return ret; - sev_es_sync_from_ghcb(svm); - ghcb_set_sw_exit_info_1(ghcb, 0); - ghcb_set_sw_exit_info_2(ghcb, 0); + ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 0); + ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 0); + exit_code = kvm_ghcb_get_sw_exit_code(control); switch (exit_code) { case SVM_VMGEXIT_MMIO_READ: ret = setup_vmgexit_scratch(svm, true, control->exit_info_2); @@ -2898,13 +2896,13 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) break; case 1: /* Get AP jump table address */ - ghcb_set_sw_exit_info_2(ghcb, sev->ap_jump_table); + ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, sev->ap_jump_table); break; default: pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n", control->exit_info_1); - ghcb_set_sw_exit_info_1(ghcb, 2); - ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_INPUT); + ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2); + ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_INPUT); } ret = 1; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 956726d867aa..03e852dedcc1 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1498,7 +1498,9 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (sd->current_vmcb != svm->vmcb) { sd->current_vmcb = svm->vmcb; - indirect_branch_prediction_barrier(); + + if (!cpu_feature_enabled(X86_FEATURE_IBPB_ON_VMEXIT)) + indirect_branch_prediction_barrier(); } if (kvm_vcpu_apicv_active(vcpu)) avic_vcpu_load(vcpu, cpu); diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 18af7e712a5a..8239c8de45ac 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -190,10 +190,12 @@ struct vcpu_sev_es_state { /* SEV-ES support */ struct sev_es_save_area *vmsa; struct ghcb *ghcb; + u8 valid_bitmap[16]; struct kvm_host_map ghcb_map; bool received_first_sipi; /* SEV-ES scratch area support */ + u64 sw_scratch; void *ghcb_sa; u32 ghcb_sa_len; bool ghcb_sa_sync; @@ -744,4 +746,28 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm); void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted); void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted); +#define DEFINE_KVM_GHCB_ACCESSORS(field) \ + static __always_inline bool kvm_ghcb_##field##_is_valid(const struct vcpu_svm *svm) \ + { \ + return test_bit(GHCB_BITMAP_IDX(field), \ + (unsigned long *)&svm->sev_es.valid_bitmap); \ + } \ + \ + static __always_inline u64 kvm_ghcb_get_##field##_if_valid(struct vcpu_svm *svm, struct ghcb *ghcb) \ + { \ + return kvm_ghcb_##field##_is_valid(svm) ? ghcb->save.field : 0; \ + } \ + +DEFINE_KVM_GHCB_ACCESSORS(cpl) +DEFINE_KVM_GHCB_ACCESSORS(rax) +DEFINE_KVM_GHCB_ACCESSORS(rcx) +DEFINE_KVM_GHCB_ACCESSORS(rdx) +DEFINE_KVM_GHCB_ACCESSORS(rbx) +DEFINE_KVM_GHCB_ACCESSORS(rsi) +DEFINE_KVM_GHCB_ACCESSORS(sw_exit_code) +DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_1) +DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_2) +DEFINE_KVM_GHCB_ACCESSORS(sw_scratch) +DEFINE_KVM_GHCB_ACCESSORS(xcr0) + #endif diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S index 8e8295e774f0..265452fc9ebe 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S @@ -224,6 +224,9 @@ SYM_FUNC_START(__svm_vcpu_run) */ UNTRAIN_RET + /* SRSO */ + ALTERNATIVE "", "call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT + /* * Clear all general purpose registers except RSP and RAX to prevent * speculative use of the guest's values, even those that are reloaded diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 278dbd37dab2..c381770bcbf1 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1616,7 +1616,7 @@ static bool kvm_is_immutable_feature_msr(u32 msr) ARCH_CAP_SKIP_VMENTRY_L1DFLUSH | ARCH_CAP_SSB_NO | ARCH_CAP_MDS_NO | \ ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \ ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \ - ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO) + ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO) static u64 kvm_get_arch_capabilities(void) { @@ -1673,6 +1673,9 @@ static u64 kvm_get_arch_capabilities(void) */ } + if (!boot_cpu_has_bug(X86_BUG_GDS) || gds_ucode_mitigated()) + data |= ARCH_CAP_GDS_NO; + return data; } diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 3fd066d42ec0..2cff585f22f2 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -11,6 +11,7 @@ #include <asm/unwind_hints.h> #include <asm/percpu.h> #include <asm/frame.h> +#include <asm/nops.h> .section .text.__x86.indirect_thunk @@ -131,6 +132,46 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array) */ #ifdef CONFIG_RETHUNK +/* + * srso_untrain_ret_alias() and srso_safe_ret_alias() are placed at + * special addresses: + * + * - srso_untrain_ret_alias() is 2M aligned + * - srso_safe_ret_alias() is also in the same 2M page but bits 2, 8, 14 + * and 20 in its virtual address are set (while those bits in the + * srso_untrain_ret_alias() function are cleared). + * + * This guarantees that those two addresses will alias in the branch + * target buffer of Zen3/4 generations, leading to any potential + * poisoned entries at that BTB slot to get evicted. + * + * As a result, srso_safe_ret_alias() becomes a safe return. + */ +#ifdef CONFIG_CPU_SRSO + .section .text.__x86.rethunk_untrain + +SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) + ANNOTATE_NOENDBR + ASM_NOP2 + lfence + jmp __x86_return_thunk +SYM_FUNC_END(srso_untrain_ret_alias) +__EXPORT_THUNK(srso_untrain_ret_alias) + + .section .text.__x86.rethunk_safe +#endif + +/* Needs a definition for the __x86_return_thunk alternative below. */ +SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) +#ifdef CONFIG_CPU_SRSO + add $8, %_ASM_SP + UNWIND_HINT_FUNC +#endif + ANNOTATE_UNRET_SAFE + ret + int3 +SYM_FUNC_END(srso_safe_ret_alias) + .section .text.__x86.return_thunk /* @@ -143,7 +184,7 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array) * from re-poisioning the BTB prediction. */ .align 64 - .skip 64 - (__x86_return_thunk - zen_untrain_ret), 0xcc + .skip 64 - (__ret - zen_untrain_ret), 0xcc SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) ANNOTATE_NOENDBR /* @@ -175,10 +216,10 @@ SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) * evicted, __x86_return_thunk will suffer Straight Line Speculation * which will be contained safely by the INT3. */ -SYM_INNER_LABEL(__x86_return_thunk, SYM_L_GLOBAL) +SYM_INNER_LABEL(__ret, SYM_L_GLOBAL) ret int3 -SYM_CODE_END(__x86_return_thunk) +SYM_CODE_END(__ret) /* * Ensure the TEST decoding / BTB invalidation is complete. @@ -189,11 +230,45 @@ SYM_CODE_END(__x86_return_thunk) * Jump back and execute the RET in the middle of the TEST instruction. * INT3 is for SLS protection. */ - jmp __x86_return_thunk + jmp __ret int3 SYM_FUNC_END(zen_untrain_ret) __EXPORT_THUNK(zen_untrain_ret) +/* + * SRSO untraining sequence for Zen1/2, similar to zen_untrain_ret() + * above. On kernel entry, srso_untrain_ret() is executed which is a + * + * movabs $0xccccccc308c48348,%rax + * + * and when the return thunk executes the inner label srso_safe_ret() + * later, it is a stack manipulation and a RET which is mispredicted and + * thus a "safe" one to use. + */ + .align 64 + .skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc +SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) + ANNOTATE_NOENDBR + .byte 0x48, 0xb8 + +SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL) + add $8, %_ASM_SP + ret + int3 + int3 + int3 + lfence + call srso_safe_ret + int3 +SYM_CODE_END(srso_safe_ret) +SYM_FUNC_END(srso_untrain_ret) +__EXPORT_THUNK(srso_untrain_ret) + +SYM_FUNC_START(__x86_return_thunk) + ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \ + "call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS + int3 +SYM_CODE_END(__x86_return_thunk) EXPORT_SYMBOL(__x86_return_thunk) #endif /* CONFIG_RETHUNK */ diff --git a/block/blk-core.c b/block/blk-core.c index 90de50082146..9866468c72a2 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -722,14 +722,9 @@ void submit_bio_noacct(struct bio *bio) struct block_device *bdev = bio->bi_bdev; struct request_queue *q = bdev_get_queue(bdev); blk_status_t status = BLK_STS_IOERR; - struct blk_plug *plug; might_sleep(); - plug = blk_mq_plug(bio); - if (plug && plug->nowait) - bio->bi_opf |= REQ_NOWAIT; - /* * For a REQ_NOWAIT based request, return -EOPNOTSUPP * if queue does not support NOWAIT. @@ -1059,7 +1054,6 @@ void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios) plug->rq_count = 0; plug->multiple_queues = false; plug->has_elevator = false; - plug->nowait = false; INIT_LIST_HEAD(&plug->cb_list); /* diff --git a/block/blk-iocost.c b/block/blk-iocost.c index dd64e2066f01..089fcb9cfce3 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -3301,11 +3301,12 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input, if (qos[QOS_MIN] > qos[QOS_MAX]) goto einval; - if (enable) { + if (enable && !ioc->enabled) { blk_stat_enable_accounting(disk->queue); blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue); ioc->enabled = true; - } else { + } else if (!enable && ioc->enabled) { + blk_stat_disable_accounting(disk->queue); blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue); ioc->enabled = false; } diff --git a/block/fops.c b/block/fops.c index a286bf3325c5..838ffada5341 100644 --- a/block/fops.c +++ b/block/fops.c @@ -358,13 +358,14 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb, task_io_account_write(bio->bi_iter.bi_size); } + if (iocb->ki_flags & IOCB_NOWAIT) + bio->bi_opf |= REQ_NOWAIT; + if (iocb->ki_flags & IOCB_HIPRI) { - bio->bi_opf |= REQ_POLLED | REQ_NOWAIT; + bio->bi_opf |= REQ_POLLED; submit_bio(bio); WRITE_ONCE(iocb->private, bio); } else { - if (iocb->ki_flags & IOCB_NOWAIT) - bio->bi_opf |= REQ_NOWAIT; submit_bio(bio); } return -EIOCBQUEUED; diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c index 52b339aefadc..9967fcfa27ec 100644 --- a/drivers/accel/ivpu/ivpu_gem.c +++ b/drivers/accel/ivpu/ivpu_gem.c @@ -173,6 +173,9 @@ static void internal_free_pages_locked(struct ivpu_bo *bo) { unsigned int i, npages = bo->base.size >> PAGE_SHIFT; + if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED) + set_pages_array_wb(bo->pages, bo->base.size >> PAGE_SHIFT); + for (i = 0; i < npages; i++) put_page(bo->pages[i]); @@ -587,6 +590,11 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED) drm_clflush_pages(bo->pages, bo->base.size >> PAGE_SHIFT); + if (bo->flags & DRM_IVPU_BO_WC) + set_pages_array_wc(bo->pages, bo->base.size >> PAGE_SHIFT); + else if (bo->flags & DRM_IVPU_BO_UNCACHED) + set_pages_array_uc(bo->pages, bo->base.size >> PAGE_SHIFT); + prot = ivpu_bo_pgprot(bo, PAGE_KERNEL); bo->kvaddr = vmap(bo->pages, bo->base.size >> PAGE_SHIFT, VM_MAP, prot); if (!bo->kvaddr) { diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 1dd8d5aebf67..a4d9f149b48d 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -470,6 +470,45 @@ static const struct dmi_system_id asus_laptop[] = { { } }; +static const struct dmi_system_id tongfang_gm_rg[] = { + { + .ident = "TongFang GMxRGxx/XMG CORE 15 (M22)/TUXEDO Stellaris 15 Gen4 AMD", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"), + }, + }, + { } +}; + +static const struct dmi_system_id maingear_laptop[] = { + { + .ident = "MAINGEAR Vector Pro 2 15", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Micro Electronics Inc"), + DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-15A3070T"), + } + }, + { + .ident = "MAINGEAR Vector Pro 2 17", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Micro Electronics Inc"), + DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-17A3070T"), + }, + }, + { } +}; + +static const struct dmi_system_id pcspecialist_laptop[] = { + { + .ident = "PCSpecialist Elimina Pro 16 M", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "PCSpecialist"), + DMI_MATCH(DMI_PRODUCT_NAME, "Elimina Pro 16 M"), + }, + }, + { } +}; + static const struct dmi_system_id lg_laptop[] = { { .ident = "LG Electronics 17U70P", @@ -493,6 +532,9 @@ struct irq_override_cmp { static const struct irq_override_cmp override_table[] = { { medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false }, { asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false }, + { tongfang_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true }, + { maingear_laptop, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true }, + { pcspecialist_laptop, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true }, { lg_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false }, }; @@ -512,6 +554,28 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity, return entry->override; } +#ifdef CONFIG_X86 + /* + * Always use the MADT override info, except for the i8042 PS/2 ctrl + * IRQs (1 and 12). For these the DSDT IRQ settings should sometimes + * be used otherwise PS/2 keyboards / mice will not work. + */ + if (gsi != 1 && gsi != 12) + return true; + + /* If the override comes from an INT_SRC_OVR MADT entry, honor it. */ + if (acpi_int_src_ovr[gsi]) + return true; + + /* + * IRQ override isn't needed on modern AMD Zen systems and + * this override breaks active low IRQs on AMD Ryzen 6000 and + * newer systems. Skip it. + */ + if (boot_cpu_has(X86_FEATURE_ZEN)) + return false; +#endif + return true; } diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 5b145f1aaa1b..87e385542576 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1714,6 +1714,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device) {"BSG1160", }, {"BSG2150", }, {"CSC3551", }, + {"CSC3556", }, {"INT33FE", }, {"INT3515", }, /* Non-conforming _HID for Cirrus Logic already released */ diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 486c8271cab7..d720f93d8b19 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -6617,6 +6617,7 @@ err_init_binder_device_failed: err_alloc_device_names_failed: debugfs_remove_recursive(binder_debugfs_dir_entry_root); + binder_alloc_shrinker_exit(); return ret; } diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 662a2a2e2e84..e3db8297095a 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -1087,6 +1087,12 @@ int binder_alloc_shrinker_init(void) return ret; } +void binder_alloc_shrinker_exit(void) +{ + unregister_shrinker(&binder_shrinker); + list_lru_destroy(&binder_alloc_lru); +} + /** * check_buffer() - verify that buffer/offset is safe to access * @alloc: binder_alloc for this proc diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index 138d1d5af9ce..dc1e2b01dd64 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h @@ -129,6 +129,7 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, int pid); extern void binder_alloc_init(struct binder_alloc *alloc); extern int binder_alloc_shrinker_init(void); +extern void binder_alloc_shrinker_exit(void); extern void binder_alloc_vma_close(struct binder_alloc *alloc); extern struct binder_buffer * binder_alloc_prepare_to_free(struct binder_alloc *alloc, diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 370d18aca71e..c6ece32de8e3 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1100,7 +1100,14 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev) } } else { sdev->sector_size = ata_id_logical_sector_size(dev->id); + /* + * Stop the drive on suspend but do not issue START STOP UNIT + * on resume as this is not necessary and may fail: the device + * will be woken up by ata_port_pm_resume() with a port reset + * and device revalidation. + */ sdev->manage_start_stop = 1; + sdev->no_start_on_resume = 1; } /* diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index c1815b9dae68..fe6690ecf563 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -509,73 +509,30 @@ static void __init cpu_dev_register_generic(void) } #ifdef CONFIG_GENERIC_CPU_VULNERABILITIES - -ssize_t __weak cpu_show_meltdown(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_emit(buf, "Not affected\n"); -} - -ssize_t __weak cpu_show_spectre_v1(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_emit(buf, "Not affected\n"); -} - -ssize_t __weak cpu_show_spectre_v2(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_emit(buf, "Not affected\n"); -} - -ssize_t __weak cpu_show_spec_store_bypass(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_emit(buf, "Not affected\n"); -} - -ssize_t __weak cpu_show_l1tf(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_emit(buf, "Not affected\n"); -} - -ssize_t __weak cpu_show_mds(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_emit(buf, "Not affected\n"); -} - -ssize_t __weak cpu_show_tsx_async_abort(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - return sysfs_emit(buf, "Not affected\n"); -} - -ssize_t __weak cpu_show_itlb_multihit(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_emit(buf, "Not affected\n"); -} - -ssize_t __weak cpu_show_srbds(struct device *dev, +static ssize_t cpu_show_not_affected(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "Not affected\n"); } -ssize_t __weak cpu_show_mmio_stale_data(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_emit(buf, "Not affected\n"); -} - -ssize_t __weak cpu_show_retbleed(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_emit(buf, "Not affected\n"); -} +#define CPU_SHOW_VULN_FALLBACK(func) \ + ssize_t cpu_show_##func(struct device *, \ + struct device_attribute *, char *) \ + __attribute__((weak, alias("cpu_show_not_affected"))) + +CPU_SHOW_VULN_FALLBACK(meltdown); +CPU_SHOW_VULN_FALLBACK(spectre_v1); +CPU_SHOW_VULN_FALLBACK(spectre_v2); +CPU_SHOW_VULN_FALLBACK(spec_store_bypass); +CPU_SHOW_VULN_FALLBACK(l1tf); +CPU_SHOW_VULN_FALLBACK(mds); +CPU_SHOW_VULN_FALLBACK(tsx_async_abort); +CPU_SHOW_VULN_FALLBACK(itlb_multihit); +CPU_SHOW_VULN_FALLBACK(srbds); +CPU_SHOW_VULN_FALLBACK(mmio_stale_data); +CPU_SHOW_VULN_FALLBACK(retbleed); +CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow); +CPU_SHOW_VULN_FALLBACK(gds); static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); @@ -588,6 +545,8 @@ static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL); static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL); static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL); static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL); +static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL); +static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, @@ -601,6 +560,8 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_srbds.attr, &dev_attr_mmio_stale_data.attr, &dev_attr_retbleed.attr, + &dev_attr_spec_rstack_overflow.attr, + &dev_attr_gather_data_sampling.attr, NULL }; diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 24afcc93ac01..2328cc05be36 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3675,7 +3675,7 @@ static int rbd_lock(struct rbd_device *rbd_dev) ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie, RBD_LOCK_TAG, "", 0); - if (ret) + if (ret && ret != -EEXIST) return ret; __rbd_lock(rbd_dev, cookie); @@ -3878,7 +3878,7 @@ static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev) &rbd_dev->header_oloc, RBD_LOCK_NAME, &lock_type, &lock_tag, &lockers, &num_lockers); if (ret) { - rbd_warn(rbd_dev, "failed to retrieve lockers: %d", ret); + rbd_warn(rbd_dev, "failed to get header lockers: %d", ret); return ERR_PTR(ret); } @@ -3940,8 +3940,10 @@ static int find_watcher(struct rbd_device *rbd_dev, ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, &watchers, &num_watchers); - if (ret) + if (ret) { + rbd_warn(rbd_dev, "failed to get watchers: %d", ret); return ret; + } sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie); for (i = 0; i < num_watchers; i++) { @@ -3985,8 +3987,12 @@ static int rbd_try_lock(struct rbd_device *rbd_dev) locker = refreshed_locker = NULL; ret = rbd_lock(rbd_dev); - if (ret != -EBUSY) + if (!ret) + goto out; + if (ret != -EBUSY) { + rbd_warn(rbd_dev, "failed to lock header: %d", ret); goto out; + } /* determine if the current lock holder is still alive */ locker = get_lock_owner_info(rbd_dev); @@ -4089,11 +4095,8 @@ static int rbd_try_acquire_lock(struct rbd_device *rbd_dev) ret = rbd_try_lock(rbd_dev); if (ret < 0) { - rbd_warn(rbd_dev, "failed to lock header: %d", ret); - if (ret == -EBLOCKLISTED) - goto out; - - ret = 1; /* request lock anyway */ + rbd_warn(rbd_dev, "failed to acquire lock: %d", ret); + goto out; } if (ret > 0) { up_write(&rbd_dev->lock_rwsem); @@ -6627,12 +6630,11 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) cancel_delayed_work_sync(&rbd_dev->lock_dwork); if (!ret) ret = -ETIMEDOUT; - } - if (ret) { - rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret); - return ret; + rbd_warn(rbd_dev, "failed to acquire lock: %ld", ret); } + if (ret) + return ret; /* * The lock may have been released by now, unless automatic lock diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 5676e6dd5b16..06673c6ca255 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1870,15 +1870,16 @@ static void zram_bio_discard(struct zram *zram, struct bio *bio) static void zram_bio_read(struct zram *zram, struct bio *bio) { - struct bvec_iter iter; - struct bio_vec bv; - unsigned long start_time; + unsigned long start_time = bio_start_io_acct(bio); + struct bvec_iter iter = bio->bi_iter; - start_time = bio_start_io_acct(bio); - bio_for_each_segment(bv, bio, iter) { + do { u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; + struct bio_vec bv = bio_iter_iovec(bio, iter); + + bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset); if (zram_bvec_read(zram, &bv, index, offset, bio) < 0) { atomic64_inc(&zram->stats.failed_reads); @@ -1890,22 +1891,26 @@ static void zram_bio_read(struct zram *zram, struct bio *bio) zram_slot_lock(zram, index); zram_accessed(zram, index); zram_slot_unlock(zram, index); - } + + bio_advance_iter_single(bio, &iter, bv.bv_len); + } while (iter.bi_size); + bio_end_io_acct(bio, start_time); bio_endio(bio); } static void zram_bio_write(struct zram *zram, struct bio *bio) { - struct bvec_iter iter; - struct bio_vec bv; - unsigned long start_time; + unsigned long start_time = bio_start_io_acct(bio); + struct bvec_iter iter = bio->bi_iter; - start_time = bio_start_io_acct(bio); - bio_for_each_segment(bv, bio, iter) { + do { u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; + struct bio_vec bv = bio_iter_iovec(bio, iter); + + bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset); if (zram_bvec_write(zram, &bv, index, offset, bio) < 0) { atomic64_inc(&zram->stats.failed_writes); @@ -1916,7 +1921,10 @@ static void zram_bio_write(struct zram *zram, struct bio *bio) zram_slot_lock(zram, index); zram_accessed(zram, index); zram_slot_unlock(zram, index); - } + + bio_advance_iter_single(bio, &iter, bv.bv_len); + } while (iter.bi_size); + bio_end_io_acct(bio, start_time); bio_endio(bio); } diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index cf5499e51999..ea6b4013bc38 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -510,70 +510,6 @@ static int tpm_add_legacy_sysfs(struct tpm_chip *chip) return 0; } -/* - * Some AMD fTPM versions may cause stutter - * https://www.amd.com/en/support/kb/faq/pa-410 - * - * Fixes are available in two series of fTPM firmware: - * 6.x.y.z series: 6.0.18.6 + - * 3.x.y.z series: 3.57.y.5 + - */ -#ifdef CONFIG_X86 -static bool tpm_amd_is_rng_defective(struct tpm_chip *chip) -{ - u32 val1, val2; - u64 version; - int ret; - - if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) - return false; - - ret = tpm_request_locality(chip); - if (ret) - return false; - - ret = tpm2_get_tpm_pt(chip, TPM2_PT_MANUFACTURER, &val1, NULL); - if (ret) - goto release; - if (val1 != 0x414D4400U /* AMD */) { - ret = -ENODEV; - goto release; - } - ret = tpm2_get_tpm_pt(chip, TPM2_PT_FIRMWARE_VERSION_1, &val1, NULL); - if (ret) - goto release; - ret = tpm2_get_tpm_pt(chip, TPM2_PT_FIRMWARE_VERSION_2, &val2, NULL); - -release: - tpm_relinquish_locality(chip); - - if (ret) - return false; - - version = ((u64)val1 << 32) | val2; - if ((version >> 48) == 6) { - if (version >= 0x0006000000180006ULL) - return false; - } else if ((version >> 48) == 3) { - if (version >= 0x0003005700000005ULL) - return false; - } else { - return false; - } - - dev_warn(&chip->dev, - "AMD fTPM version 0x%llx causes system stutter; hwrng disabled\n", - version); - - return true; -} -#else -static inline bool tpm_amd_is_rng_defective(struct tpm_chip *chip) -{ - return false; -} -#endif /* CONFIG_X86 */ - static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng); @@ -585,10 +521,20 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait) return tpm_get_random(chip, data, max); } +static bool tpm_is_hwrng_enabled(struct tpm_chip *chip) +{ + if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM)) + return false; + if (tpm_is_firmware_upgrade(chip)) + return false; + if (chip->flags & TPM_CHIP_FLAG_HWRNG_DISABLED) + return false; + return true; +} + static int tpm_add_hwrng(struct tpm_chip *chip) { - if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM) || tpm_is_firmware_upgrade(chip) || - tpm_amd_is_rng_defective(chip)) + if (!tpm_is_hwrng_enabled(chip)) return 0; snprintf(chip->hwrng_name, sizeof(chip->hwrng_name), @@ -693,7 +639,7 @@ int tpm_chip_register(struct tpm_chip *chip) return 0; out_hwrng: - if (IS_ENABLED(CONFIG_HW_RANDOM_TPM) && !tpm_is_firmware_upgrade(chip)) + if (tpm_is_hwrng_enabled(chip)) hwrng_unregister(&chip->hwrng); out_ppi: tpm_bios_log_teardown(chip); @@ -718,8 +664,7 @@ EXPORT_SYMBOL_GPL(tpm_chip_register); void tpm_chip_unregister(struct tpm_chip *chip) { tpm_del_legacy_sysfs(chip); - if (IS_ENABLED(CONFIG_HW_RANDOM_TPM) && !tpm_is_firmware_upgrade(chip) && - !tpm_amd_is_rng_defective(chip)) + if (tpm_is_hwrng_enabled(chip)) hwrng_unregister(&chip->hwrng); tpm_bios_log_teardown(chip); if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_firmware_upgrade(chip)) diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c index 1a5d09b18513..9eb1a1859012 100644 --- a/drivers/char/tpm/tpm_crb.c +++ b/drivers/char/tpm/tpm_crb.c @@ -463,6 +463,28 @@ static bool crb_req_canceled(struct tpm_chip *chip, u8 status) return (cancel & CRB_CANCEL_INVOKE) == CRB_CANCEL_INVOKE; } +static int crb_check_flags(struct tpm_chip *chip) +{ + u32 val; + int ret; + + ret = crb_request_locality(chip, 0); + if (ret) + return ret; + + ret = tpm2_get_tpm_pt(chip, TPM2_PT_MANUFACTURER, &val, NULL); + if (ret) + goto release; + + if (val == 0x414D4400U /* AMD */) + chip->flags |= TPM_CHIP_FLAG_HWRNG_DISABLED; + +release: + crb_relinquish_locality(chip, 0); + + return ret; +} + static const struct tpm_class_ops tpm_crb = { .flags = TPM_OPS_AUTO_STARTUP, .status = crb_status, @@ -800,6 +822,14 @@ static int crb_acpi_add(struct acpi_device *device) chip->acpi_dev_handle = device->handle; chip->flags = TPM_CHIP_FLAG_TPM2; + rc = tpm_chip_bootstrap(chip); + if (rc) + goto out; + + rc = crb_check_flags(chip); + if (rc) + goto out; + rc = tpm_chip_register(chip); out: diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index cc42cf3de960..7fa3d91042b2 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -89,7 +89,7 @@ static inline void tpm_tis_iowrite32(u32 b, void __iomem *iobase, u32 addr) tpm_tis_flush(iobase); } -static int interrupts = -1; +static int interrupts; module_param(interrupts, int, 0444); MODULE_PARM_DESC(interrupts, "Enable interrupts"); @@ -164,10 +164,26 @@ static const struct dmi_system_id tpm_tis_dmi_table[] = { }, { .callback = tpm_tis_disable_irq, + .ident = "ThinkStation P620", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkStation P620"), + }, + }, + { + .callback = tpm_tis_disable_irq, + .ident = "TUXEDO InfinityBook S 15/17 Gen7", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_PRODUCT_NAME, "TUXEDO InfinityBook S 15/17 Gen7"), + }, + }, + { + .callback = tpm_tis_disable_irq, .ident = "UPX-TGL", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "AAEON"), - DMI_MATCH(DMI_PRODUCT_VERSION, "UPX-TGL"), + DMI_MATCH(DMI_PRODUCT_NAME, "UPX-TGL01"), }, }, {} diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 93f38a8178ba..6b3b424addab 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -444,6 +444,7 @@ config COMMON_CLK_BD718XX config COMMON_CLK_FIXED_MMIO bool "Clock driver for Memory Mapped Fixed values" depends on COMMON_CLK && OF + depends on HAS_IOMEM help Support for Memory Mapped IO Fixed clocks diff --git a/drivers/clk/imx/clk-imx93.c b/drivers/clk/imx/clk-imx93.c index b6c7c2725906..44f435103c65 100644 --- a/drivers/clk/imx/clk-imx93.c +++ b/drivers/clk/imx/clk-imx93.c @@ -291,7 +291,7 @@ static int imx93_clocks_probe(struct platform_device *pdev) anatop_base = devm_of_iomap(dev, np, 0, NULL); of_node_put(np); if (WARN_ON(IS_ERR(anatop_base))) { - ret = PTR_ERR(base); + ret = PTR_ERR(anatop_base); goto unregister_hws; } diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c index 1ba421b38ec5..e31f94387d87 100644 --- a/drivers/clk/mediatek/clk-mt8183.c +++ b/drivers/clk/mediatek/clk-mt8183.c @@ -328,6 +328,14 @@ static const char * const atb_parents[] = { "syspll_d5" }; +static const char * const sspm_parents[] = { + "clk26m", + "univpll_d2_d4", + "syspll_d2_d2", + "univpll_d2_d2", + "syspll_d3" +}; + static const char * const dpi0_parents[] = { "clk26m", "tvdpll_d2", @@ -507,6 +515,9 @@ static const struct mtk_mux top_muxes[] = { /* CLK_CFG_6 */ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_ATB, "atb_sel", atb_parents, 0xa0, 0xa4, 0xa8, 0, 2, 7, 0x004, 24), + MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_MUX_SSPM, "sspm_sel", + sspm_parents, 0xa0, 0xa4, 0xa8, 8, 3, 15, 0x004, 25, + CLK_IS_CRITICAL | CLK_SET_RATE_PARENT), MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DPI0, "dpi0_sel", dpi0_parents, 0xa0, 0xa4, 0xa8, 16, 4, 23, 0x004, 26), MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SCAM, "scam_sel", @@ -673,10 +684,18 @@ static const struct mtk_gate_regs infra3_cg_regs = { GATE_MTK(_id, _name, _parent, &infra2_cg_regs, _shift, \ &mtk_clk_gate_ops_setclr) +#define GATE_INFRA2_FLAGS(_id, _name, _parent, _shift, _flag) \ + GATE_MTK_FLAGS(_id, _name, _parent, &infra2_cg_regs, \ + _shift, &mtk_clk_gate_ops_setclr, _flag) + #define GATE_INFRA3(_id, _name, _parent, _shift) \ GATE_MTK(_id, _name, _parent, &infra3_cg_regs, _shift, \ &mtk_clk_gate_ops_setclr) +#define GATE_INFRA3_FLAGS(_id, _name, _parent, _shift, _flag) \ + GATE_MTK_FLAGS(_id, _name, _parent, &infra3_cg_regs, \ + _shift, &mtk_clk_gate_ops_setclr, _flag) + static const struct mtk_gate infra_clks[] = { /* INFRA0 */ GATE_INFRA0(CLK_INFRA_PMIC_TMR, "infra_pmic_tmr", "axi_sel", 0), @@ -748,7 +767,11 @@ static const struct mtk_gate infra_clks[] = { GATE_INFRA2(CLK_INFRA_UNIPRO_TICK, "infra_unipro_tick", "fufs_sel", 12), GATE_INFRA2(CLK_INFRA_UFS_MP_SAP_BCLK, "infra_ufs_mp_sap_bck", "fufs_sel", 13), GATE_INFRA2(CLK_INFRA_MD32_BCLK, "infra_md32_bclk", "axi_sel", 14), + /* infra_sspm is main clock in co-processor, should not be closed in Linux. */ + GATE_INFRA2_FLAGS(CLK_INFRA_SSPM, "infra_sspm", "sspm_sel", 15, CLK_IS_CRITICAL), GATE_INFRA2(CLK_INFRA_UNIPRO_MBIST, "infra_unipro_mbist", "axi_sel", 16), + /* infra_sspm_bus_hclk is main clock in co-processor, should not be closed in Linux. */ + GATE_INFRA2_FLAGS(CLK_INFRA_SSPM_BUS_HCLK, "infra_sspm_bus_hclk", "axi_sel", 17, CLK_IS_CRITICAL), GATE_INFRA2(CLK_INFRA_I2C5, "infra_i2c5", "i2c_sel", 18), GATE_INFRA2(CLK_INFRA_I2C5_ARBITER, "infra_i2c5_arbiter", "i2c_sel", 19), GATE_INFRA2(CLK_INFRA_I2C5_IMM, "infra_i2c5_imm", "i2c_sel", 20), @@ -766,6 +789,10 @@ static const struct mtk_gate infra_clks[] = { GATE_INFRA3(CLK_INFRA_MSDC0_SELF, "infra_msdc0_self", "msdc50_0_sel", 0), GATE_INFRA3(CLK_INFRA_MSDC1_SELF, "infra_msdc1_self", "msdc50_0_sel", 1), GATE_INFRA3(CLK_INFRA_MSDC2_SELF, "infra_msdc2_self", "msdc50_0_sel", 2), + /* infra_sspm_26m_self is main clock in co-processor, should not be closed in Linux. */ + GATE_INFRA3_FLAGS(CLK_INFRA_SSPM_26M_SELF, "infra_sspm_26m_self", "f_f26m_ck", 3, CLK_IS_CRITICAL), + /* infra_sspm_32k_self is main clock in co-processor, should not be closed in Linux. */ + GATE_INFRA3_FLAGS(CLK_INFRA_SSPM_32K_SELF, "infra_sspm_32k_self", "f_f26m_ck", 4, CLK_IS_CRITICAL), GATE_INFRA3(CLK_INFRA_UFS_AXI, "infra_ufs_axi", "axi_sel", 5), GATE_INFRA3(CLK_INFRA_I2C6, "infra_i2c6", "i2c_sel", 6), GATE_INFRA3(CLK_INFRA_AP_MSDC0, "infra_ap_msdc0", "msdc50_hclk_sel", 7), diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c index 8fef90bf962f..6fa7639a3050 100644 --- a/drivers/clk/meson/clk-pll.c +++ b/drivers/clk/meson/clk-pll.c @@ -367,9 +367,9 @@ static int meson_clk_pll_enable(struct clk_hw *hw) * 3. enable the lock detect module */ if (MESON_PARM_APPLICABLE(&pll->current_en)) { - usleep_range(10, 20); + udelay(10); meson_parm_write(clk->map, &pll->current_en, 1); - usleep_range(40, 50); + udelay(40); } if (MESON_PARM_APPLICABLE(&pll->l_detect)) { diff --git a/drivers/counter/Kconfig b/drivers/counter/Kconfig index bca21df51168..62962ae84b77 100644 --- a/drivers/counter/Kconfig +++ b/drivers/counter/Kconfig @@ -3,13 +3,6 @@ # Counter devices # -menuconfig COUNTER - tristate "Counter support" - help - This enables counter device support through the Generic Counter - interface. You only need to enable this, if you also want to enable - one or more of the counter device drivers below. - config I8254 tristate select COUNTER @@ -25,6 +18,13 @@ config I8254 If built as a module its name will be i8254. +menuconfig COUNTER + tristate "Counter support" + help + This enables counter device support through the Generic Counter + interface. You only need to enable this, if you also want to enable + one or more of the counter device drivers below. + if COUNTER config 104_QUAD_8 diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 81fba0dcbee9..9a1e194d5cf8 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -1012,8 +1012,8 @@ static int amd_pstate_update_status(const char *buf, size_t size) return 0; } -static ssize_t show_status(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) +static ssize_t status_show(struct device *dev, + struct device_attribute *attr, char *buf) { ssize_t ret; @@ -1024,7 +1024,7 @@ static ssize_t show_status(struct kobject *kobj, return ret; } -static ssize_t store_status(struct kobject *a, struct kobj_attribute *b, +static ssize_t status_store(struct device *a, struct device_attribute *b, const char *buf, size_t count) { char *p = memchr(buf, '\n', count); @@ -1043,7 +1043,7 @@ cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq); cpufreq_freq_attr_ro(amd_pstate_highest_perf); cpufreq_freq_attr_rw(energy_performance_preference); cpufreq_freq_attr_ro(energy_performance_available_preferences); -define_one_global_rw(status); +static DEVICE_ATTR_RW(status); static struct freq_attr *amd_pstate_attr[] = { &amd_pstate_max_freq, @@ -1062,7 +1062,7 @@ static struct freq_attr *amd_pstate_epp_attr[] = { }; static struct attribute *pstate_global_attributes[] = { - &status.attr, + &dev_attr_status.attr, NULL }; diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c index c2d6d9c3c930..b88af1262f1a 100644 --- a/drivers/cpuidle/cpuidle-psci-domain.c +++ b/drivers/cpuidle/cpuidle-psci-domain.c @@ -120,20 +120,6 @@ static void psci_pd_remove(void) } } -static bool psci_pd_try_set_osi_mode(void) -{ - int ret; - - if (!psci_has_osi_support()) - return false; - - ret = psci_set_osi_mode(true); - if (ret) - return false; - - return true; -} - static void psci_cpuidle_domain_sync_state(struct device *dev) { /* @@ -152,15 +138,12 @@ static int psci_cpuidle_domain_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct device_node *node; - bool use_osi; + bool use_osi = psci_has_osi_support(); int ret = 0, pd_count = 0; if (!np) return -ENODEV; - /* If OSI mode is supported, let's try to enable it. */ - use_osi = psci_pd_try_set_osi_mode(); - /* * Parse child nodes for the "#power-domain-cells" property and * initialize a genpd/genpd-of-provider pair when it's found. @@ -170,33 +153,37 @@ static int psci_cpuidle_domain_probe(struct platform_device *pdev) continue; ret = psci_pd_init(node, use_osi); - if (ret) - goto put_node; + if (ret) { + of_node_put(node); + goto exit; + } pd_count++; } /* Bail out if not using the hierarchical CPU topology. */ if (!pd_count) - goto no_pd; + return 0; /* Link genpd masters/subdomains to model the CPU topology. */ ret = dt_idle_pd_init_topology(np); if (ret) goto remove_pd; + /* let's try to enable OSI. */ + ret = psci_set_osi_mode(use_osi); + if (ret) + goto remove_pd; + pr_info("Initialized CPU PM domain topology using %s mode\n", use_osi ? "OSI" : "PC"); return 0; -put_node: - of_node_put(node); remove_pd: + dt_idle_pd_remove_topology(np); psci_pd_remove(); +exit: pr_err("failed to create CPU PM domains ret=%d\n", ret); -no_pd: - if (use_osi) - psci_set_osi_mode(false); return ret; } diff --git a/drivers/cpuidle/dt_idle_genpd.c b/drivers/cpuidle/dt_idle_genpd.c index b37165514d4e..1af63c189039 100644 --- a/drivers/cpuidle/dt_idle_genpd.c +++ b/drivers/cpuidle/dt_idle_genpd.c @@ -152,6 +152,30 @@ int dt_idle_pd_init_topology(struct device_node *np) return 0; } +int dt_idle_pd_remove_topology(struct device_node *np) +{ + struct device_node *node; + struct of_phandle_args child, parent; + int ret; + + for_each_child_of_node(np, node) { + if (of_parse_phandle_with_args(node, "power-domains", + "#power-domain-cells", 0, &parent)) + continue; + + child.np = node; + child.args_count = 0; + ret = of_genpd_remove_subdomain(&parent, &child); + of_node_put(parent.np); + if (ret) { + of_node_put(node); + return ret; + } + } + + return 0; +} + struct device *dt_idle_attach_cpu(int cpu, const char *name) { struct device *dev; diff --git a/drivers/cpuidle/dt_idle_genpd.h b/drivers/cpuidle/dt_idle_genpd.h index a95483d08a02..3be1f70f55b5 100644 --- a/drivers/cpuidle/dt_idle_genpd.h +++ b/drivers/cpuidle/dt_idle_genpd.h @@ -14,6 +14,8 @@ struct generic_pm_domain *dt_idle_pd_alloc(struct device_node *np, int dt_idle_pd_init_topology(struct device_node *np); +int dt_idle_pd_remove_topology(struct device_node *np); + struct device *dt_idle_attach_cpu(int cpu, const char *name); void dt_idle_detach_cpu(struct device *dev); @@ -36,6 +38,11 @@ static inline int dt_idle_pd_init_topology(struct device_node *np) return 0; } +static inline int dt_idle_pd_remove_topology(struct device_node *np) +{ + return 0; +} + static inline struct device *dt_idle_attach_cpu(int cpu, const char *name) { return NULL; diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index d6d067fbee97..ca60bb8114f2 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -121,6 +121,45 @@ static bool cxl_is_security_command(u16 opcode) return false; } +static void cxl_set_security_cmd_enabled(struct cxl_security_state *security, + u16 opcode) +{ + switch (opcode) { + case CXL_MBOX_OP_SANITIZE: + set_bit(CXL_SEC_ENABLED_SANITIZE, security->enabled_cmds); + break; + case CXL_MBOX_OP_SECURE_ERASE: + set_bit(CXL_SEC_ENABLED_SECURE_ERASE, + security->enabled_cmds); + break; + case CXL_MBOX_OP_GET_SECURITY_STATE: + set_bit(CXL_SEC_ENABLED_GET_SECURITY_STATE, + security->enabled_cmds); + break; + case CXL_MBOX_OP_SET_PASSPHRASE: + set_bit(CXL_SEC_ENABLED_SET_PASSPHRASE, + security->enabled_cmds); + break; + case CXL_MBOX_OP_DISABLE_PASSPHRASE: + set_bit(CXL_SEC_ENABLED_DISABLE_PASSPHRASE, + security->enabled_cmds); + break; + case CXL_MBOX_OP_UNLOCK: + set_bit(CXL_SEC_ENABLED_UNLOCK, security->enabled_cmds); + break; + case CXL_MBOX_OP_FREEZE_SECURITY: + set_bit(CXL_SEC_ENABLED_FREEZE_SECURITY, + security->enabled_cmds); + break; + case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE: + set_bit(CXL_SEC_ENABLED_PASSPHRASE_SECURE_ERASE, + security->enabled_cmds); + break; + default: + break; + } +} + static bool cxl_is_poison_command(u16 opcode) { #define CXL_MBOX_OP_POISON_CMDS 0x43 @@ -677,7 +716,8 @@ static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel) u16 opcode = le16_to_cpu(cel_entry[i].opcode); struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); - if (!cmd && !cxl_is_poison_command(opcode)) { + if (!cmd && (!cxl_is_poison_command(opcode) || + !cxl_is_security_command(opcode))) { dev_dbg(dev, "Opcode 0x%04x unsupported by driver\n", opcode); continue; @@ -689,6 +729,9 @@ static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel) if (cxl_is_poison_command(opcode)) cxl_set_poison_cmd_enabled(&mds->poison, opcode); + if (cxl_is_security_command(opcode)) + cxl_set_security_cmd_enabled(&mds->security, opcode); + dev_dbg(dev, "Opcode 0x%04x enabled\n", opcode); } } diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c index f99e7ec3cc40..14b547c07f54 100644 --- a/drivers/cxl/core/memdev.c +++ b/drivers/cxl/core/memdev.c @@ -477,9 +477,28 @@ static struct attribute_group cxl_memdev_pmem_attribute_group = { .attrs = cxl_memdev_pmem_attributes, }; +static umode_t cxl_memdev_security_visible(struct kobject *kobj, + struct attribute *a, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct cxl_memdev *cxlmd = to_cxl_memdev(dev); + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); + + if (a == &dev_attr_security_sanitize.attr && + !test_bit(CXL_SEC_ENABLED_SANITIZE, mds->security.enabled_cmds)) + return 0; + + if (a == &dev_attr_security_erase.attr && + !test_bit(CXL_SEC_ENABLED_SECURE_ERASE, mds->security.enabled_cmds)) + return 0; + + return a->mode; +} + static struct attribute_group cxl_memdev_security_attribute_group = { .name = "security", .attrs = cxl_memdev_security_attributes, + .is_visible = cxl_memdev_security_visible, }; static const struct attribute_group *cxl_memdev_attribute_groups[] = { diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 499113328586..706f8a6d1ef4 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -244,6 +244,19 @@ enum poison_cmd_enabled_bits { CXL_POISON_ENABLED_MAX }; +/* Device enabled security commands */ +enum security_cmd_enabled_bits { + CXL_SEC_ENABLED_SANITIZE, + CXL_SEC_ENABLED_SECURE_ERASE, + CXL_SEC_ENABLED_GET_SECURITY_STATE, + CXL_SEC_ENABLED_SET_PASSPHRASE, + CXL_SEC_ENABLED_DISABLE_PASSPHRASE, + CXL_SEC_ENABLED_UNLOCK, + CXL_SEC_ENABLED_FREEZE_SECURITY, + CXL_SEC_ENABLED_PASSPHRASE_SECURE_ERASE, + CXL_SEC_ENABLED_MAX +}; + /** * struct cxl_poison_state - Driver poison state info * @@ -346,6 +359,7 @@ struct cxl_fw_state { * struct cxl_security_state - Device security state * * @state: state of last security operation + * @enabled_cmds: All security commands enabled in the CEL * @poll: polling for sanitization is enabled, device has no mbox irq support * @poll_tmo_secs: polling timeout * @poll_dwork: polling work item @@ -353,6 +367,7 @@ struct cxl_fw_state { */ struct cxl_security_state { unsigned long state; + DECLARE_BITMAP(enabled_cmds, CXL_SEC_ENABLED_MAX); bool poll; int poll_tmo_secs; struct delayed_work poll_dwork; @@ -434,6 +449,7 @@ struct cxl_dev_state { * @next_persistent_bytes: persistent capacity change pending device reset * @event: event log driver state * @poison: poison driver state info + * @security: security driver state info * @fw: firmware upload / activation state * @mbox_send: @dev specific transport for transmitting mailbox commands * diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 644c188d6a11..08fdd0e2ed1b 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -211,6 +211,7 @@ config FSL_DMA config FSL_EDMA tristate "Freescale eDMA engine support" depends on OF + depends on HAS_IOMEM select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help @@ -280,6 +281,7 @@ config IMX_SDMA config INTEL_IDMA64 tristate "Intel integrated DMA 64-bit support" + depends on HAS_IOMEM select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index 5abbcc61c528..9a15f0d12c79 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -384,9 +384,7 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq) wq->threshold = 0; wq->priority = 0; wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; - clear_bit(WQ_FLAG_DEDICATED, &wq->flags); - clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags); - clear_bit(WQ_FLAG_ATS_DISABLE, &wq->flags); + wq->flags = 0; memset(wq->name, 0, WQ_NAME_SIZE); wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER; idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH); diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c index ebd8733f72ad..9413fad08a60 100644 --- a/drivers/dma/mcf-edma.c +++ b/drivers/dma/mcf-edma.c @@ -190,7 +190,13 @@ static int mcf_edma_probe(struct platform_device *pdev) return -EINVAL; } - chans = pdata->dma_channels; + if (!pdata->dma_channels) { + dev_info(&pdev->dev, "setting default channel number to 64"); + chans = 64; + } else { + chans = pdata->dma_channels; + } + len = sizeof(*mcf_edma) + sizeof(*mcf_chan) * chans; mcf_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); if (!mcf_edma) @@ -202,11 +208,6 @@ static int mcf_edma_probe(struct platform_device *pdev) mcf_edma->drvdata = &mcf_data; mcf_edma->big_endian = 1; - if (!mcf_edma->n_chans) { - dev_info(&pdev->dev, "setting default channel number to 64"); - mcf_edma->n_chans = 64; - } - mutex_init(&mcf_edma->fsl_edma_mutex); mcf_edma->membase = devm_platform_ioremap_resource(pdev, 0); diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c index 95a462a1f511..b6e0ac8314e5 100644 --- a/drivers/dma/owl-dma.c +++ b/drivers/dma/owl-dma.c @@ -192,7 +192,7 @@ struct owl_dma_pchan { }; /** - * struct owl_dma_pchan - Wrapper for DMA ENGINE channel + * struct owl_dma_vchan - Wrapper for DMA ENGINE channel * @vc: wrapped virtual channel * @pchan: the physical channel utilized by this channel * @txd: active transaction on this channel diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index b4731fe6bbc1..3cf0b38387ae 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -404,6 +404,12 @@ enum desc_status { */ BUSY, /* + * Pause was called while descriptor was BUSY. Due to hardware + * limitations, only termination is possible for descriptors + * that have been paused. + */ + PAUSED, + /* * Sitting on the channel work_list but xfer done * by PL330 core */ @@ -2041,7 +2047,7 @@ static inline void fill_queue(struct dma_pl330_chan *pch) list_for_each_entry(desc, &pch->work_list, node) { /* If already submitted */ - if (desc->status == BUSY) + if (desc->status == BUSY || desc->status == PAUSED) continue; ret = pl330_submit_req(pch->thread, desc); @@ -2326,6 +2332,7 @@ static int pl330_pause(struct dma_chan *chan) { struct dma_pl330_chan *pch = to_pchan(chan); struct pl330_dmac *pl330 = pch->dmac; + struct dma_pl330_desc *desc; unsigned long flags; pm_runtime_get_sync(pl330->ddma.dev); @@ -2335,6 +2342,10 @@ static int pl330_pause(struct dma_chan *chan) _stop(pch->thread); spin_unlock(&pl330->lock); + list_for_each_entry(desc, &pch->work_list, node) { + if (desc->status == BUSY) + desc->status = PAUSED; + } spin_unlock_irqrestore(&pch->lock, flags); pm_runtime_mark_last_busy(pl330->ddma.dev); pm_runtime_put_autosuspend(pl330->ddma.dev); @@ -2425,7 +2436,7 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, else if (running && desc == running) transferred = pl330_get_current_xferred_count(pch, desc); - else if (desc->status == BUSY) + else if (desc->status == BUSY || desc->status == PAUSED) /* * Busy but not running means either just enqueued, * or finished and not yet marked done @@ -2442,6 +2453,9 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, case DONE: ret = DMA_COMPLETE; break; + case PAUSED: + ret = DMA_PAUSED; + break; case PREP: case BUSY: ret = DMA_IN_PROGRESS; diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c index 93ee298d52b8..e0bfd129d563 100644 --- a/drivers/dma/xilinx/xdma.c +++ b/drivers/dma/xilinx/xdma.c @@ -668,6 +668,8 @@ static int xdma_set_vector_reg(struct xdma_device *xdev, u32 vec_tbl_start, val |= irq_start << shift; irq_start++; irq_num--; + if (!irq_num) + break; } /* write IRQ register */ @@ -715,7 +717,7 @@ static int xdma_irq_init(struct xdma_device *xdev) ret = request_irq(irq, xdma_channel_isr, 0, "xdma-c2h-channel", &xdev->c2h_chans[j]); if (ret) { - xdma_err(xdev, "H2C channel%d request irq%d failed: %d", + xdma_err(xdev, "C2H channel%d request irq%d failed: %d", j, irq, ret); goto failed_init_c2h; } @@ -892,7 +894,7 @@ static int xdma_probe(struct platform_device *pdev) } reg_base = devm_ioremap_resource(&pdev->dev, res); - if (!reg_base) { + if (IS_ERR(reg_base)) { xdma_err(xdev, "ioremap failed"); goto failed; } diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c index 1efa5e9392c4..19246ed1f01f 100644 --- a/drivers/firmware/arm_scmi/mailbox.c +++ b/drivers/firmware/arm_scmi/mailbox.c @@ -166,8 +166,10 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, return -ENOMEM; shmem = of_parse_phandle(cdev->of_node, "shmem", idx); - if (!of_device_is_compatible(shmem, "arm,scmi-shmem")) + if (!of_device_is_compatible(shmem, "arm,scmi-shmem")) { + of_node_put(shmem); return -ENXIO; + } ret = of_address_to_resource(shmem, 0, &res); of_node_put(shmem); diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c index 6971dcf72fb9..0493aa3c12bf 100644 --- a/drivers/firmware/arm_scmi/raw_mode.c +++ b/drivers/firmware/arm_scmi/raw_mode.c @@ -818,10 +818,13 @@ static ssize_t scmi_dbg_raw_mode_common_write(struct file *filp, * before sending it with a single RAW xfer. */ if (rd->tx_size < rd->tx_req_size) { - size_t cnt; + ssize_t cnt; cnt = simple_write_to_buffer(rd->tx.buf, rd->tx.len, ppos, buf, count); + if (cnt < 0) + return cnt; + rd->tx_size += cnt; if (cnt < count) return cnt; diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c index 621c37efe3ec..c193516a254d 100644 --- a/drivers/firmware/arm_scmi/smc.c +++ b/drivers/firmware/arm_scmi/smc.c @@ -40,6 +40,7 @@ /** * struct scmi_smc - Structure representing a SCMI smc transport * + * @irq: An optional IRQ for completion * @cinfo: SCMI channel info * @shmem: Transmit/Receive shared memory area * @shmem_lock: Lock to protect access to Tx/Rx shared memory area. @@ -52,6 +53,7 @@ */ struct scmi_smc { + int irq; struct scmi_chan_info *cinfo; struct scmi_shared_mem __iomem *shmem; /* Protect access to shmem area */ @@ -127,7 +129,7 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, struct resource res; struct device_node *np; u32 func_id; - int ret, irq; + int ret; if (!tx) return -ENODEV; @@ -137,8 +139,10 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, return -ENOMEM; np = of_parse_phandle(cdev->of_node, "shmem", 0); - if (!of_device_is_compatible(np, "arm,scmi-shmem")) + if (!of_device_is_compatible(np, "arm,scmi-shmem")) { + of_node_put(np); return -ENXIO; + } ret = of_address_to_resource(np, 0, &res); of_node_put(np); @@ -167,11 +171,10 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, * completion of a message is signaled by an interrupt rather than by * the return of the SMC call. */ - irq = of_irq_get_byname(cdev->of_node, "a2p"); - if (irq > 0) { - ret = devm_request_irq(dev, irq, smc_msg_done_isr, - IRQF_NO_SUSPEND, - dev_name(dev), scmi_info); + scmi_info->irq = of_irq_get_byname(cdev->of_node, "a2p"); + if (scmi_info->irq > 0) { + ret = request_irq(scmi_info->irq, smc_msg_done_isr, + IRQF_NO_SUSPEND, dev_name(dev), scmi_info); if (ret) { dev_err(dev, "failed to setup SCMI smc irq\n"); return ret; @@ -193,6 +196,10 @@ static int smc_chan_free(int id, void *p, void *data) struct scmi_chan_info *cinfo = p; struct scmi_smc *scmi_info = cinfo->transport_info; + /* Ignore any possible further reception on the IRQ path */ + if (scmi_info->irq > 0) + free_irq(scmi_info->irq, scmi_info); + cinfo->transport_info = NULL; scmi_info->cinfo = NULL; diff --git a/drivers/firmware/smccc/soc_id.c b/drivers/firmware/smccc/soc_id.c index 890eb454599a..1990263fbba0 100644 --- a/drivers/firmware/smccc/soc_id.c +++ b/drivers/firmware/smccc/soc_id.c @@ -34,7 +34,6 @@ static struct soc_device_attribute *soc_dev_attr; static int __init smccc_soc_init(void) { - struct arm_smccc_res res; int soc_id_rev, soc_id_version; static char soc_id_str[20], soc_id_rev_str[12]; static char soc_id_jep106_id_str[12]; @@ -49,13 +48,13 @@ static int __init smccc_soc_init(void) } if (soc_id_version < 0) { - pr_err("ARCH_SOC_ID(0) returned error: %lx\n", res.a0); + pr_err("Invalid SoC Version: %x\n", soc_id_version); return -EINVAL; } soc_id_rev = arm_smccc_get_soc_id_revision(); if (soc_id_rev < 0) { - pr_err("ARCH_SOC_ID(1) returned error: %lx\n", res.a0); + pr_err("Invalid SoC Revision: %x\n", soc_id_rev); return -EINVAL; } diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c index 8b49b0abacd5..f1f6f1c32987 100644 --- a/drivers/gpio/gpio-sim.c +++ b/drivers/gpio/gpio-sim.c @@ -429,6 +429,7 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev) gc->set_config = gpio_sim_set_config; gc->to_irq = gpio_sim_to_irq; gc->free = gpio_sim_free; + gc->can_sleep = true; ret = devm_gpiochip_add_data(dev, gc, chip); if (ret) diff --git a/drivers/gpio/gpio-ws16c48.c b/drivers/gpio/gpio-ws16c48.c index e73885a4dc32..afb42a8e916f 100644 --- a/drivers/gpio/gpio-ws16c48.c +++ b/drivers/gpio/gpio-ws16c48.c @@ -18,7 +18,7 @@ #include <linux/spinlock.h> #include <linux/types.h> -#define WS16C48_EXTENT 10 +#define WS16C48_EXTENT 11 #define MAX_NUM_WS16C48 max_num_isa_dev(WS16C48_EXTENT) static unsigned int base[MAX_NUM_WS16C48]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index a3b86b86dc47..6dc950c1b689 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1296,6 +1296,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); int amdgpu_device_pci_reset(struct amdgpu_device *adev); bool amdgpu_device_need_post(struct amdgpu_device *adev); +bool amdgpu_sg_display_supported(struct amdgpu_device *adev); bool amdgpu_device_pcie_dynamic_switching_supported(void); bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev); bool amdgpu_device_aspm_support_quirk(void); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 040f4cb6ab2d..fb78a8f47587 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -295,7 +295,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, if (!p->gang_size) { ret = -EINVAL; - goto free_partial_kdata; + goto free_all_kdata; } for (i = 0; i < p->gang_size; ++i) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index a2cdde0ca0a7..45e9d737e5b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1459,6 +1459,32 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev) } /* + * On APUs with >= 64GB white flickering has been observed w/ SG enabled. + * Disable S/G on such systems until we have a proper fix. + * https://gitlab.freedesktop.org/drm/amd/-/issues/2354 + * https://gitlab.freedesktop.org/drm/amd/-/issues/2735 + */ +bool amdgpu_sg_display_supported(struct amdgpu_device *adev) +{ + switch (amdgpu_sg_display) { + case -1: + break; + case 0: + return false; + case 1: + return true; + default: + return false; + } + if ((totalram_pages() << (PAGE_SHIFT - 10)) + + (adev->gmc.real_vram_size / 1024) >= 64000000) { + DRM_WARN("Disabling S/G due to >=64GB RAM\n"); + return false; + } + return true; +} + +/* * Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic * speed switching. Until we have confirmation from Intel that a specific host * supports it, it's safer that we keep it disabled for all. diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 3a7af59e83ca..0451533ddde4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -471,8 +471,12 @@ static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev) case IP_VERSION(11, 0, 3): if ((adev->gfx.me_fw_version >= 1505) && (adev->gfx.pfp_fw_version >= 1600) && - (adev->gfx.mec_fw_version >= 512)) - adev->gfx.cp_gfx_shadow = true; + (adev->gfx.mec_fw_version >= 512)) { + if (amdgpu_sriov_vf(adev)) + adev->gfx.cp_gfx_shadow = true; + else + adev->gfx.cp_gfx_shadow = false; + } break; default: adev->gfx.cp_gfx_shadow = false; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c index e1a392bcea70..af5685f4cb34 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -137,14 +137,15 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp) int ret; int retry_loop; + /* Wait for bootloader to signify that it is ready having bit 31 of + * C2PMSG_35 set to 1. All other bits are expected to be cleared. + * If there is an error in processing command, bits[7:0] will be set. + * This is applicable for PSP v13.0.6 and newer. + */ for (retry_loop = 0; retry_loop < 10; retry_loop++) { - /* Wait for bootloader to signify that is - ready having bit 31 of C2PMSG_35 set to 1 */ - ret = psp_wait_for(psp, - SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), - 0x80000000, - 0x80000000, - false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), + 0x80000000, 0xffffffff, false); if (ret == 0) return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 49f40d9f16e8..f5a6f562e2a8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -1543,11 +1543,7 @@ static bool kfd_ignore_crat(void) if (ignore_crat) return true; -#ifndef KFD_SUPPORT_IOMMU_V2 ret = true; -#else - ret = false; -#endif return ret; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 0b3dc754e06b..a53e0757fe64 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -194,11 +194,6 @@ static void kfd_device_info_init(struct kfd_dev *kfd, kfd_device_info_set_event_interrupt_class(kfd); - /* Raven */ - if (gc_version == IP_VERSION(9, 1, 0) || - gc_version == IP_VERSION(9, 2, 2)) - kfd->device_info.needs_iommu_device = true; - if (gc_version < IP_VERSION(11, 0, 0)) { /* Navi2x+, Navi1x+ */ if (gc_version == IP_VERSION(10, 3, 6)) @@ -233,10 +228,6 @@ static void kfd_device_info_init(struct kfd_dev *kfd, asic_type != CHIP_TONGA) kfd->device_info.supports_cwsr = true; - if (asic_type == CHIP_KAVERI || - asic_type == CHIP_CARRIZO) - kfd->device_info.needs_iommu_device = true; - if (asic_type != CHIP_HAWAII && !vf) kfd->device_info.needs_pci_atomics = true; } @@ -249,7 +240,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) uint32_t gfx_target_version = 0; switch (adev->asic_type) { -#ifdef KFD_SUPPORT_IOMMU_V2 #ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_KAVERI: gfx_target_version = 70000; @@ -262,7 +252,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) if (!vf) f2g = &gfx_v8_kfd2kgd; break; -#endif #ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_HAWAII: gfx_target_version = 70001; @@ -298,7 +287,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) gfx_target_version = 90000; f2g = &gfx_v9_kfd2kgd; break; -#ifdef KFD_SUPPORT_IOMMU_V2 /* Raven */ case IP_VERSION(9, 1, 0): case IP_VERSION(9, 2, 2): @@ -306,7 +294,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) if (!vf) f2g = &gfx_v9_kfd2kgd; break; -#endif /* Vega12 */ case IP_VERSION(9, 2, 1): gfx_target_version = 90004; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 2df153828ff4..01192f5abe46 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -2538,18 +2538,12 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev) } switch (dev->adev->asic_type) { - case CHIP_CARRIZO: - device_queue_manager_init_vi(&dqm->asic_ops); - break; - case CHIP_KAVERI: - device_queue_manager_init_cik(&dqm->asic_ops); - break; - case CHIP_HAWAII: device_queue_manager_init_cik_hawaii(&dqm->asic_ops); break; + case CHIP_CARRIZO: case CHIP_TONGA: case CHIP_FIJI: case CHIP_POLARIS10: diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 0fa739fd6a9c..e5554a36e8c8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1638,9 +1638,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) } break; } - if (init_data.flags.gpu_vm_support && - (amdgpu_sg_display == 0)) - init_data.flags.gpu_vm_support = false; + if (init_data.flags.gpu_vm_support) + init_data.flags.gpu_vm_support = amdgpu_sg_display_supported(adev); if (init_data.flags.gpu_vm_support) adev->mode_info.gpu_vm_support = true; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 9bc86deac9e8..b885c39bd16b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -1320,7 +1320,7 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, if (computed_streams[i]) continue; - if (!res_pool->funcs->remove_stream_from_ctx || + if (res_pool->funcs->remove_stream_from_ctx && res_pool->funcs->remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK) return -EINVAL; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 20d4d08a6a2f..6966420dfbac 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -777,7 +777,8 @@ void dce110_edp_wait_for_hpd_ready( dal_gpio_destroy_irq(&hpd); /* ensure that the panel is detected */ - ASSERT(edp_hpd_high); + if (!edp_hpd_high) + DC_LOG_DC("%s: wait timed out!\n", __func__); } void dce110_edp_power_control( diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c index e5b7ef7422b8..50dc83404644 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c @@ -357,8 +357,11 @@ void dpp3_set_cursor_attributes( int cur_rom_en = 0; if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA || - color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) - cur_rom_en = 1; + color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) { + if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) { + cur_rom_en = 1; + } + } REG_UPDATE_3(CURSOR0_CONTROL, CUR0_MODE, color_format, diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index ce41a8309582..222af2fae745 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1581,9 +1581,9 @@ static int smu_disable_dpms(struct smu_context *smu) /* * For SMU 13.0.4/11, PMFW will handle the features disablement properly - * for gpu reset case. Driver involvement is unnecessary. + * for gpu reset and S0i3 cases. Driver involvement is unnecessary. */ - if (amdgpu_in_reset(adev)) { + if (amdgpu_in_reset(adev) || adev->in_s0ix) { switch (adev->ip_versions[MP1_HWIP][0]) { case IP_VERSION(13, 0, 4): case IP_VERSION(13, 0, 11): diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 3d188616ba24..fddcd834bcec 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -331,11 +331,13 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu) struct smu_13_0_0_powerplay_table *powerplay_table = table_context->power_play_table; struct smu_baco_context *smu_baco = &smu->smu_baco; +#if 0 PPTable_t *pptable = smu->smu_table.driver_pptable; const OverDriveLimits_t * const overdrive_upperlimits = &pptable->SkuTable.OverDriveLimitsBasicMax; const OverDriveLimits_t * const overdrive_lowerlimits = &pptable->SkuTable.OverDriveLimitsMin; +#endif if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC) smu->dc_controlled_by_gpio = true; @@ -347,18 +349,27 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu) if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO) smu_baco->maco_support = true; + /* + * We are in the transition to a new OD mechanism. + * Disable the OD feature support for SMU13 temporarily. + * TODO: get this reverted when new OD mechanism online + */ +#if 0 if (!overdrive_lowerlimits->FeatureCtrlMask || !overdrive_upperlimits->FeatureCtrlMask) smu->od_enabled = false; - table_context->thermal_controller_type = - powerplay_table->thermal_controller_type; - /* * Instead of having its own buffer space and get overdrive_table copied, * smu->od_settings just points to the actual overdrive_table */ smu->od_settings = &powerplay_table->overdrive_table; +#else + smu->od_enabled = false; +#endif + + table_context->thermal_controller_type = + powerplay_table->thermal_controller_type; return 0; } @@ -1140,7 +1151,6 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, (OverDriveTableExternal_t *)smu->smu_table.overdrive_table; struct smu_13_0_dpm_table *single_dpm_table; struct smu_13_0_pcie_table *pcie_table; - const int link_width[] = {0, 1, 2, 4, 8, 12, 16}; uint32_t gen_speed, lane_width; int i, curr_freq, size = 0; int32_t min_value, max_value; @@ -1256,7 +1266,7 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, (pcie_table->pcie_lane[i] == 6) ? "x16" : "", pcie_table->clk_freq[i], (gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) && - (lane_width == DECODE_LANE_WIDTH(link_width[pcie_table->pcie_lane[i]])) ? + (lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ? "*" : ""); break; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 1ac552142763..fe4ee2daa5d8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -1993,9 +1993,8 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table gpu_metrics->average_socket_power = SMUQ10_TO_UINT(metrics->SocketPower); - /* Energy is reported in 15.625mJ units */ - gpu_metrics->energy_accumulator = - SMUQ10_TO_UINT(metrics->SocketEnergyAcc); + /* Energy counter reported in 15.259uJ (2^-16) units */ + gpu_metrics->energy_accumulator = metrics->SocketEnergyAcc; gpu_metrics->current_gfxclk = SMUQ10_TO_UINT(metrics->GfxclkFrequency[xcc0]); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index b1f0937ccade..62f2886ab4df 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -323,10 +323,12 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu) struct smu_baco_context *smu_baco = &smu->smu_baco; PPTable_t *smc_pptable = table_context->driver_pptable; BoardTable_t *BoardTable = &smc_pptable->BoardTable; +#if 0 const OverDriveLimits_t * const overdrive_upperlimits = &smc_pptable->SkuTable.OverDriveLimitsBasicMax; const OverDriveLimits_t * const overdrive_lowerlimits = &smc_pptable->SkuTable.OverDriveLimitsMin; +#endif if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC) smu->dc_controlled_by_gpio = true; @@ -338,18 +340,22 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu) if (smu_baco->platform_support && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled)) smu_baco->maco_support = true; +#if 0 if (!overdrive_lowerlimits->FeatureCtrlMask || !overdrive_upperlimits->FeatureCtrlMask) smu->od_enabled = false; - table_context->thermal_controller_type = - powerplay_table->thermal_controller_type; - /* * Instead of having its own buffer space and get overdrive_table copied, * smu->od_settings just points to the actual overdrive_table */ smu->od_settings = &powerplay_table->overdrive_table; +#else + smu->od_enabled = false; +#endif + + table_context->thermal_controller_type = + powerplay_table->thermal_controller_type; return 0; } diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c index 504d51c42f79..aadb396508c5 100644 --- a/drivers/gpu/drm/bridge/ite-it6505.c +++ b/drivers/gpu/drm/bridge/ite-it6505.c @@ -2517,9 +2517,11 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data) }; int int_status[3], i; - if (it6505->enable_drv_hold || pm_runtime_get_if_in_use(dev) <= 0) + if (it6505->enable_drv_hold || !it6505->powered) return IRQ_HANDLED; + pm_runtime_get_sync(dev); + int_status[0] = it6505_read(it6505, INT_STATUS_01); int_status[1] = it6505_read(it6505, INT_STATUS_02); int_status[2] = it6505_read(it6505, INT_STATUS_03); diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c index 5163e5224aad..9663601ce098 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611.c @@ -774,9 +774,7 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611, dsi->lanes = 4; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | - MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO_NO_HSA | - MIPI_DSI_MODE_VIDEO_NO_HFP | MIPI_DSI_MODE_VIDEO_NO_HBP | - MIPI_DSI_MODE_NO_EOT_PACKET; + MIPI_DSI_MODE_VIDEO_HSE; ret = devm_mipi_dsi_attach(dev, dsi); if (ret < 0) { diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 4ea6507a77e5..baaf0e0feb06 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -623,7 +623,13 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct int ret; if (obj->import_attach) { + /* Reset both vm_ops and vm_private_data, so we don't end up with + * vm_ops pointing to our implementation if the dma-buf backend + * doesn't set those fields. + */ vma->vm_private_data = NULL; + vma->vm_ops = NULL; + ret = dma_buf_mmap(obj->dma_buf, vma, 0); /* Drop the reference drm_gem_mmap_obj() acquired.*/ diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index 23857cc08eca..2702ad4c26c8 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -165,14 +165,60 @@ static u32 preparser_disable(bool state) return MI_ARB_CHECK | 1 << 8 | state; } -u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg) +static i915_reg_t gen12_get_aux_inv_reg(struct intel_engine_cs *engine) { - u32 gsi_offset = gt->uncore->gsi_offset; + switch (engine->id) { + case RCS0: + return GEN12_CCS_AUX_INV; + case BCS0: + return GEN12_BCS0_AUX_INV; + case VCS0: + return GEN12_VD0_AUX_INV; + case VCS2: + return GEN12_VD2_AUX_INV; + case VECS0: + return GEN12_VE0_AUX_INV; + case CCS0: + return GEN12_CCS0_AUX_INV; + default: + return INVALID_MMIO_REG; + } +} + +static bool gen12_needs_ccs_aux_inv(struct intel_engine_cs *engine) +{ + i915_reg_t reg = gen12_get_aux_inv_reg(engine); + + if (IS_PONTEVECCHIO(engine->i915)) + return false; + + /* + * So far platforms supported by i915 having flat ccs do not require + * AUX invalidation. Check also whether the engine requires it. + */ + return i915_mmio_reg_valid(reg) && !HAS_FLAT_CCS(engine->i915); +} + +u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs) +{ + i915_reg_t inv_reg = gen12_get_aux_inv_reg(engine); + u32 gsi_offset = engine->gt->uncore->gsi_offset; + + if (!gen12_needs_ccs_aux_inv(engine)) + return cs; *cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN; *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset; *cs++ = AUX_INV; - *cs++ = MI_NOOP; + + *cs++ = MI_SEMAPHORE_WAIT_TOKEN | + MI_SEMAPHORE_REGISTER_POLL | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_EQ_SDD; + *cs++ = 0; + *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset; + *cs++ = 0; + *cs++ = 0; return cs; } @@ -202,8 +248,13 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) { struct intel_engine_cs *engine = rq->engine; - if (mode & EMIT_FLUSH) { - u32 flags = 0; + /* + * On Aux CCS platforms the invalidation of the Aux + * table requires quiescing memory traffic beforehand + */ + if (mode & EMIT_FLUSH || gen12_needs_ccs_aux_inv(engine)) { + u32 bit_group_0 = 0; + u32 bit_group_1 = 0; int err; u32 *cs; @@ -211,32 +262,40 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) if (err) return err; - flags |= PIPE_CONTROL_TILE_CACHE_FLUSH; - flags |= PIPE_CONTROL_FLUSH_L3; - flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; - flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + bit_group_0 |= PIPE_CONTROL0_HDC_PIPELINE_FLUSH; + + /* + * When required, in MTL and beyond platforms we + * need to set the CCS_FLUSH bit in the pipe control + */ + if (GRAPHICS_VER_FULL(rq->i915) >= IP_VER(12, 70)) + bit_group_0 |= PIPE_CONTROL_CCS_FLUSH; + + bit_group_1 |= PIPE_CONTROL_TILE_CACHE_FLUSH; + bit_group_1 |= PIPE_CONTROL_FLUSH_L3; + bit_group_1 |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; + bit_group_1 |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; /* Wa_1409600907:tgl,adl-p */ - flags |= PIPE_CONTROL_DEPTH_STALL; - flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; - flags |= PIPE_CONTROL_FLUSH_ENABLE; + bit_group_1 |= PIPE_CONTROL_DEPTH_STALL; + bit_group_1 |= PIPE_CONTROL_DC_FLUSH_ENABLE; + bit_group_1 |= PIPE_CONTROL_FLUSH_ENABLE; - flags |= PIPE_CONTROL_STORE_DATA_INDEX; - flags |= PIPE_CONTROL_QW_WRITE; + bit_group_1 |= PIPE_CONTROL_STORE_DATA_INDEX; + bit_group_1 |= PIPE_CONTROL_QW_WRITE; - flags |= PIPE_CONTROL_CS_STALL; + bit_group_1 |= PIPE_CONTROL_CS_STALL; if (!HAS_3D_PIPELINE(engine->i915)) - flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS; + bit_group_1 &= ~PIPE_CONTROL_3D_ARCH_FLAGS; else if (engine->class == COMPUTE_CLASS) - flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS; + bit_group_1 &= ~PIPE_CONTROL_3D_ENGINE_FLAGS; cs = intel_ring_begin(rq, 6); if (IS_ERR(cs)) return PTR_ERR(cs); - cs = gen12_emit_pipe_control(cs, - PIPE_CONTROL0_HDC_PIPELINE_FLUSH, - flags, LRC_PPHWSP_SCRATCH_ADDR); + cs = gen12_emit_pipe_control(cs, bit_group_0, bit_group_1, + LRC_PPHWSP_SCRATCH_ADDR); intel_ring_advance(rq, cs); } @@ -267,10 +326,9 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) else if (engine->class == COMPUTE_CLASS) flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS; - if (!HAS_FLAT_CCS(rq->engine->i915)) - count = 8 + 4; - else - count = 8; + count = 8; + if (gen12_needs_ccs_aux_inv(rq->engine)) + count += 8; cs = intel_ring_begin(rq, count); if (IS_ERR(cs)) @@ -285,11 +343,7 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); - if (!HAS_FLAT_CCS(rq->engine->i915)) { - /* hsdes: 1809175790 */ - cs = gen12_emit_aux_table_inv(rq->engine->gt, - cs, GEN12_GFX_CCS_AUX_NV); - } + cs = gen12_emit_aux_table_inv(engine, cs); *cs++ = preparser_disable(false); intel_ring_advance(rq, cs); @@ -300,21 +354,14 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode) { - intel_engine_mask_t aux_inv = 0; - u32 cmd, *cs; + u32 cmd = 4; + u32 *cs; - cmd = 4; if (mode & EMIT_INVALIDATE) { cmd += 2; - if (!HAS_FLAT_CCS(rq->engine->i915) && - (rq->engine->class == VIDEO_DECODE_CLASS || - rq->engine->class == VIDEO_ENHANCEMENT_CLASS)) { - aux_inv = rq->engine->mask & - ~GENMASK(_BCS(I915_MAX_BCS - 1), BCS0); - if (aux_inv) - cmd += 4; - } + if (gen12_needs_ccs_aux_inv(rq->engine)) + cmd += 8; } cs = intel_ring_begin(rq, cmd); @@ -338,6 +385,10 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode) cmd |= MI_INVALIDATE_TLB; if (rq->engine->class == VIDEO_DECODE_CLASS) cmd |= MI_INVALIDATE_BSD; + + if (gen12_needs_ccs_aux_inv(rq->engine) && + rq->engine->class == COPY_ENGINE_CLASS) + cmd |= MI_FLUSH_DW_CCS; } *cs++ = cmd; @@ -345,14 +396,7 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode) *cs++ = 0; /* upper addr */ *cs++ = 0; /* value */ - if (aux_inv) { /* hsdes: 1809175790 */ - if (rq->engine->class == VIDEO_DECODE_CLASS) - cs = gen12_emit_aux_table_inv(rq->engine->gt, - cs, GEN12_VD0_AUX_NV); - else - cs = gen12_emit_aux_table_inv(rq->engine->gt, - cs, GEN12_VE0_AUX_NV); - } + cs = gen12_emit_aux_table_inv(rq->engine, cs); if (mode & EMIT_INVALIDATE) *cs++ = preparser_disable(false); diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h index 655e5c00ddc2..867ba697aceb 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h @@ -13,6 +13,7 @@ #include "intel_gt_regs.h" #include "intel_gpu_commands.h" +struct intel_engine_cs; struct intel_gt; struct i915_request; @@ -46,28 +47,32 @@ u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); -u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg); +u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs); static inline u32 * -__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset) +__gen8_emit_pipe_control(u32 *batch, u32 bit_group_0, + u32 bit_group_1, u32 offset) { memset(batch, 0, 6 * sizeof(u32)); - batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0; - batch[1] = flags1; + batch[0] = GFX_OP_PIPE_CONTROL(6) | bit_group_0; + batch[1] = bit_group_1; batch[2] = offset; return batch + 6; } -static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset) +static inline u32 *gen8_emit_pipe_control(u32 *batch, + u32 bit_group_1, u32 offset) { - return __gen8_emit_pipe_control(batch, 0, flags, offset); + return __gen8_emit_pipe_control(batch, 0, bit_group_1, offset); } -static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset) +static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 bit_group_0, + u32 bit_group_1, u32 offset) { - return __gen8_emit_pipe_control(batch, flags0, flags1, offset); + return __gen8_emit_pipe_control(batch, bit_group_0, + bit_group_1, offset); } static inline u32 * diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h index 5d143e2a8db0..2bd8d98d2110 100644 --- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h +++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h @@ -121,6 +121,7 @@ #define MI_SEMAPHORE_TARGET(engine) ((engine)<<15) #define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */ #define MI_SEMAPHORE_WAIT_TOKEN MI_INSTR(0x1c, 3) /* GEN12+ */ +#define MI_SEMAPHORE_REGISTER_POLL (1 << 16) #define MI_SEMAPHORE_POLL (1 << 15) #define MI_SEMAPHORE_SAD_GT_SDD (0 << 12) #define MI_SEMAPHORE_SAD_GTE_SDD (1 << 12) @@ -299,6 +300,7 @@ #define PIPE_CONTROL_QW_WRITE (1<<14) #define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14) #define PIPE_CONTROL_DEPTH_STALL (1<<13) +#define PIPE_CONTROL_CCS_FLUSH (1<<13) /* MTL+ */ #define PIPE_CONTROL_WRITE_FLUSH (1<<12) #define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */ #define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on ILK */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h index 718cb2c80f79..2cdfb2f713d0 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h @@ -332,9 +332,11 @@ #define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4) #define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4) #define BSD_HWS_PGA_GEN7 _MMIO(0x4180) -#define GEN12_GFX_CCS_AUX_NV _MMIO(0x4208) -#define GEN12_VD0_AUX_NV _MMIO(0x4218) -#define GEN12_VD1_AUX_NV _MMIO(0x4228) + +#define GEN12_CCS_AUX_INV _MMIO(0x4208) +#define GEN12_VD0_AUX_INV _MMIO(0x4218) +#define GEN12_VE0_AUX_INV _MMIO(0x4238) +#define GEN12_BCS0_AUX_INV _MMIO(0x4248) #define GEN8_RTCR _MMIO(0x4260) #define GEN8_M1TCR _MMIO(0x4264) @@ -342,14 +344,12 @@ #define GEN8_BTCR _MMIO(0x426c) #define GEN8_VTCR _MMIO(0x4270) -#define GEN12_VD2_AUX_NV _MMIO(0x4298) -#define GEN12_VD3_AUX_NV _MMIO(0x42a8) -#define GEN12_VE0_AUX_NV _MMIO(0x4238) - #define BLT_HWS_PGA_GEN7 _MMIO(0x4280) -#define GEN12_VE1_AUX_NV _MMIO(0x42b8) +#define GEN12_VD2_AUX_INV _MMIO(0x4298) +#define GEN12_CCS0_AUX_INV _MMIO(0x42c8) #define AUX_INV REG_BIT(0) + #define VEBOX_HWS_PGA_GEN7 _MMIO(0x4380) #define GEN12_AUX_ERR_DBG _MMIO(0x43f4) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index a4ec20aaafe2..9477c2422321 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1364,10 +1364,7 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs) IS_DG2_G11(ce->engine->i915)) cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0); - /* hsdes: 1809175790 */ - if (!HAS_FLAT_CCS(ce->engine->i915)) - cs = gen12_emit_aux_table_inv(ce->engine->gt, - cs, GEN12_GFX_CCS_AUX_NV); + cs = gen12_emit_aux_table_inv(ce->engine, cs); /* Wa_16014892111 */ if (IS_MTL_GRAPHICS_STEP(ce->engine->i915, M, STEP_A0, STEP_B0) || @@ -1392,17 +1389,7 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs) PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0); - /* hsdes: 1809175790 */ - if (!HAS_FLAT_CCS(ce->engine->i915)) { - if (ce->engine->class == VIDEO_DECODE_CLASS) - cs = gen12_emit_aux_table_inv(ce->engine->gt, - cs, GEN12_VD0_AUX_NV); - else if (ce->engine->class == VIDEO_ENHANCEMENT_CLASS) - cs = gen12_emit_aux_table_inv(ce->engine->gt, - cs, GEN12_VE0_AUX_NV); - } - - return cs; + return gen12_emit_aux_table_inv(ce->engine, cs); } static void diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c index 2a0438f12a14..af9afdb53c7f 100644 --- a/drivers/gpu/drm/i915/gvt/edid.c +++ b/drivers/gpu/drm/i915/gvt/edid.c @@ -491,7 +491,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, return; } - msg_length = REG_FIELD_GET(DP_AUX_CH_CTL_MESSAGE_SIZE_MASK, reg); + msg_length = REG_FIELD_GET(DP_AUX_CH_CTL_MESSAGE_SIZE_MASK, value); // check the msg in DATA register. msg = vgpu_vreg(vgpu, offset + 4); diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index 8ef93889061a..5ec293011d99 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -449,8 +449,11 @@ int i915_active_add_request(struct i915_active *ref, struct i915_request *rq) } } while (unlikely(is_barrier(active))); - if (!__i915_active_fence_set(active, fence)) + fence = __i915_active_fence_set(active, fence); + if (!fence) __i915_active_acquire(ref); + else + dma_fence_put(fence); out: i915_active_release(ref); @@ -469,13 +472,9 @@ __i915_active_set_fence(struct i915_active *ref, return NULL; } - rcu_read_lock(); prev = __i915_active_fence_set(active, fence); - if (prev) - prev = dma_fence_get_rcu(prev); - else + if (!prev) __i915_active_acquire(ref); - rcu_read_unlock(); return prev; } @@ -1019,10 +1018,11 @@ void i915_request_add_active_barriers(struct i915_request *rq) * * Records the new @fence as the last active fence along its timeline in * this active tracker, moving the tracking callbacks from the previous - * fence onto this one. Returns the previous fence (if not already completed), - * which the caller must ensure is executed before the new fence. To ensure - * that the order of fences within the timeline of the i915_active_fence is - * understood, it should be locked by the caller. + * fence onto this one. Gets and returns a reference to the previous fence + * (if not already completed), which the caller must put after making sure + * that it is executed before the new fence. To ensure that the order of + * fences within the timeline of the i915_active_fence is understood, it + * should be locked by the caller. */ struct dma_fence * __i915_active_fence_set(struct i915_active_fence *active, @@ -1031,7 +1031,23 @@ __i915_active_fence_set(struct i915_active_fence *active, struct dma_fence *prev; unsigned long flags; - if (fence == rcu_access_pointer(active->fence)) + /* + * In case of fences embedded in i915_requests, their memory is + * SLAB_FAILSAFE_BY_RCU, then it can be reused right after release + * by new requests. Then, there is a risk of passing back a pointer + * to a new, completely unrelated fence that reuses the same memory + * while tracked under a different active tracker. Combined with i915 + * perf open/close operations that build await dependencies between + * engine kernel context requests and user requests from different + * timelines, this can lead to dependency loops and infinite waits. + * + * As a countermeasure, we try to get a reference to the active->fence + * first, so if we succeed and pass it back to our user then it is not + * released and potentially reused by an unrelated request before the + * user has a chance to set up an await dependency on it. + */ + prev = i915_active_fence_get(active); + if (fence == prev) return fence; GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)); @@ -1040,27 +1056,56 @@ __i915_active_fence_set(struct i915_active_fence *active, * Consider that we have two threads arriving (A and B), with * C already resident as the active->fence. * - * A does the xchg first, and so it sees C or NULL depending - * on the timing of the interrupt handler. If it is NULL, the - * previous fence must have been signaled and we know that - * we are first on the timeline. If it is still present, - * we acquire the lock on that fence and serialise with the interrupt - * handler, in the process removing it from any future interrupt - * callback. A will then wait on C before executing (if present). - * - * As B is second, it sees A as the previous fence and so waits for - * it to complete its transition and takes over the occupancy for - * itself -- remembering that it needs to wait on A before executing. + * Both A and B have got a reference to C or NULL, depending on the + * timing of the interrupt handler. Let's assume that if A has got C + * then it has locked C first (before B). * * Note the strong ordering of the timeline also provides consistent * nesting rules for the fence->lock; the inner lock is always the * older lock. */ spin_lock_irqsave(fence->lock, flags); - prev = xchg(__active_fence_slot(active), fence); - if (prev) { - GEM_BUG_ON(prev == fence); + if (prev) spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING); + + /* + * A does the cmpxchg first, and so it sees C or NULL, as before, or + * something else, depending on the timing of other threads and/or + * interrupt handler. If not the same as before then A unlocks C if + * applicable and retries, starting from an attempt to get a new + * active->fence. Meanwhile, B follows the same path as A. + * Once A succeeds with cmpxch, B fails again, retires, gets A from + * active->fence, locks it as soon as A completes, and possibly + * succeeds with cmpxchg. + */ + while (cmpxchg(__active_fence_slot(active), prev, fence) != prev) { + if (prev) { + spin_unlock(prev->lock); + dma_fence_put(prev); + } + spin_unlock_irqrestore(fence->lock, flags); + + prev = i915_active_fence_get(active); + GEM_BUG_ON(prev == fence); + + spin_lock_irqsave(fence->lock, flags); + if (prev) + spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING); + } + + /* + * If prev is NULL then the previous fence must have been signaled + * and we know that we are first on the timeline. If it is still + * present then, having the lock on that fence already acquired, we + * serialise with the interrupt handler, in the process of removing it + * from any future interrupt callback. A will then wait on C before + * executing (if present). + * + * As B is second, it sees A as the previous fence and so waits for + * it to complete its transition and takes over the occupancy for + * itself -- remembering that it needs to wait on A before executing. + */ + if (prev) { __list_del_entry(&active->cb.node); spin_unlock(prev->lock); /* serialise with prev->cb_list */ } @@ -1077,11 +1122,7 @@ int i915_active_fence_set(struct i915_active_fence *active, int err = 0; /* Must maintain timeline ordering wrt previous active requests */ - rcu_read_lock(); fence = __i915_active_fence_set(active, &rq->fence); - if (fence) /* but the previous fence may not belong to that timeline! */ - fence = dma_fence_get_rcu(fence); - rcu_read_unlock(); if (fence) { err = i915_request_await_dma_fence(rq, fence); dma_fence_put(fence); diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 894068bb37b6..833b73edefdb 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1661,6 +1661,11 @@ __i915_request_ensure_parallel_ordering(struct i915_request *rq, request_to_parent(rq)->parallel.last_rq = i915_request_get(rq); + /* + * Users have to put a reference potentially got by + * __i915_active_fence_set() to the returned request + * when no longer needed + */ return to_request(__i915_active_fence_set(&timeline->last_request, &rq->fence)); } @@ -1707,6 +1712,10 @@ __i915_request_ensure_ordering(struct i915_request *rq, 0); } + /* + * Users have to put the reference to prev potentially got + * by __i915_active_fence_set() when no longer needed + */ return prev; } @@ -1760,6 +1769,8 @@ __i915_request_add_to_timeline(struct i915_request *rq) prev = __i915_request_ensure_ordering(rq, timeline); else prev = __i915_request_ensure_parallel_ordering(rq, timeline); + if (prev) + i915_request_put(prev); /* * Make sure that no request gazumped us - if it was allocated after diff --git a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c index 5f26090b0c98..89585b31b985 100644 --- a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c @@ -310,7 +310,7 @@ static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc) dev_warn(ipu_crtc->dev, "8-pixel align hactive %d -> %d\n", sig_cfg.mode.hactive, new_hactive); - sig_cfg.mode.hfront_porch = new_hactive - sig_cfg.mode.hactive; + sig_cfg.mode.hfront_porch -= new_hactive - sig_cfg.mode.hactive; sig_cfg.mode.hactive = new_hactive; } diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index f75c6f09dd2a..a2e0033e8a26 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -967,7 +967,7 @@ nouveau_connector_get_modes(struct drm_connector *connector) /* Determine display colour depth for everything except LVDS now, * DP requires this before mode_valid() is called. */ - if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode) + if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS) nouveau_connector_detect_depth(connector); /* Find the native mode if this is a digital panel, if we didn't diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c index 40c8ea43c42f..b8ac66b4a2c4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c @@ -26,6 +26,8 @@ #include "head.h" #include "ior.h" +#include <drm/display/drm_dp.h> + #include <subdev/bios.h> #include <subdev/bios/init.h> #include <subdev/gpio.h> @@ -634,6 +636,50 @@ nvkm_dp_enable_supported_link_rates(struct nvkm_outp *outp) return outp->dp.rates != 0; } +/* XXX: This is a big fat hack, and this is just drm_dp_read_dpcd_caps() + * converted to work inside nvkm. This is a temporary holdover until we start + * passing the drm_dp_aux device through NVKM + */ +static int +nvkm_dp_read_dpcd_caps(struct nvkm_outp *outp) +{ + struct nvkm_i2c_aux *aux = outp->dp.aux; + u8 dpcd_ext[DP_RECEIVER_CAP_SIZE]; + int ret; + + ret = nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dp.dpcd, DP_RECEIVER_CAP_SIZE); + if (ret < 0) + return ret; + + /* + * Prior to DP1.3 the bit represented by + * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved. + * If it is set DP_DPCD_REV at 0000h could be at a value less than + * the true capability of the panel. The only way to check is to + * then compare 0000h and 2200h. + */ + if (!(outp->dp.dpcd[DP_TRAINING_AUX_RD_INTERVAL] & + DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)) + return 0; + + ret = nvkm_rdaux(aux, DP_DP13_DPCD_REV, dpcd_ext, sizeof(dpcd_ext)); + if (ret < 0) + return ret; + + if (outp->dp.dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) { + OUTP_DBG(outp, "Extended DPCD rev less than base DPCD rev (%d > %d)\n", + outp->dp.dpcd[DP_DPCD_REV], dpcd_ext[DP_DPCD_REV]); + return 0; + } + + if (!memcmp(outp->dp.dpcd, dpcd_ext, sizeof(dpcd_ext))) + return 0; + + memcpy(outp->dp.dpcd, dpcd_ext, sizeof(dpcd_ext)); + + return 0; +} + void nvkm_dp_enable(struct nvkm_outp *outp, bool auxpwr) { @@ -689,7 +735,7 @@ nvkm_dp_enable(struct nvkm_outp *outp, bool auxpwr) memset(outp->dp.lttpr, 0x00, sizeof(outp->dp.lttpr)); } - if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dp.dpcd, sizeof(outp->dp.dpcd))) { + if (!nvkm_dp_read_dpcd_caps(outp)) { const u8 rates[] = { 0x1e, 0x14, 0x0a, 0x06, 0 }; const u8 *rate; int rate_max; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h index 00dbeda7e346..de161e7a04aa 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h @@ -117,6 +117,7 @@ void gk104_grctx_generate_r418800(struct gf100_gr *); extern const struct gf100_grctx_func gk110_grctx; void gk110_grctx_generate_r419eb0(struct gf100_gr *); +void gk110_grctx_generate_r419f78(struct gf100_gr *); extern const struct gf100_grctx_func gk110b_grctx; extern const struct gf100_grctx_func gk208_grctx; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c index 94233d0119df..52a234b1ef01 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c @@ -906,7 +906,9 @@ static void gk104_grctx_generate_r419f78(struct gf100_gr *gr) { struct nvkm_device *device = gr->base.engine.subdev.device; - nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000); + + /* bit 3 set disables loads in fp helper invocations, we need it enabled */ + nvkm_mask(device, 0x419f78, 0x00000009, 0x00000000); } void diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c index 4391458e1fb2..3acdd9eeb74a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c @@ -820,6 +820,15 @@ gk110_grctx_generate_r419eb0(struct gf100_gr *gr) nvkm_mask(device, 0x419eb0, 0x00001000, 0x00001000); } +void +gk110_grctx_generate_r419f78(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + + /* bit 3 set disables loads in fp helper invocations, we need it enabled */ + nvkm_mask(device, 0x419f78, 0x00000008, 0x00000000); +} + const struct gf100_grctx_func gk110_grctx = { .main = gf100_grctx_generate_main, @@ -854,4 +863,5 @@ gk110_grctx = { .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr, .r418800 = gk104_grctx_generate_r418800, .r419eb0 = gk110_grctx_generate_r419eb0, + .r419f78 = gk110_grctx_generate_r419f78, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c index 7b9a34f9ec3c..5597e87624ac 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c @@ -103,4 +103,5 @@ gk110b_grctx = { .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr, .r418800 = gk104_grctx_generate_r418800, .r419eb0 = gk110_grctx_generate_r419eb0, + .r419f78 = gk110_grctx_generate_r419f78, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c index c78d07a8bb7d..612656496541 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c @@ -568,4 +568,5 @@ gk208_grctx = { .dist_skip_table = gf117_grctx_generate_dist_skip_table, .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr, .r418800 = gk104_grctx_generate_r418800, + .r419f78 = gk110_grctx_generate_r419f78, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c index beac66eb2a80..9906974ac3f0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c @@ -988,4 +988,5 @@ gm107_grctx = { .r406500 = gm107_grctx_generate_r406500, .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr, .r419e00 = gm107_grctx_generate_r419e00, + .r419f78 = gk110_grctx_generate_r419f78, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c index 3b6c8100a242..a7775aa18541 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c @@ -206,19 +206,6 @@ tu102_gr_av_to_init_veid(struct nvkm_blob *blob, struct gf100_gr_pack **ppack) return gk20a_gr_av_to_init_(blob, 64, 0x00100000, ppack); } -int -tu102_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif) -{ - int ret; - - ret = gm200_gr_load(gr, ver, fwif); - if (ret) - return ret; - - return gk20a_gr_load_net(gr, "gr/", "sw_veid_bundle_init", ver, tu102_gr_av_to_init_veid, - &gr->bundle_veid); -} - static const struct gf100_gr_fwif tu102_gr_fwif[] = { { 0, gm200_gr_load, &tu102_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr }, diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c index 102e1fc7ee38..be4ec5bb5223 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c @@ -569,6 +569,7 @@ static const struct of_device_id s6d7aa0_of_match[] = { }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, s6d7aa0_of_match); static struct mipi_dsi_driver s6d7aa0_driver = { .probe = s6d7aa0_probe, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index a530ecc4d207..bf34498c1b6d 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -833,12 +833,12 @@ static int vop_plane_atomic_check(struct drm_plane *plane, * need align with 2 pixel. */ if (fb->format->is_yuv && ((new_plane_state->src.x1 >> 16) % 2)) { - DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n"); + DRM_DEBUG_KMS("Invalid Source: Yuv format not support odd xpos\n"); return -EINVAL; } if (fb->format->is_yuv && new_plane_state->rotation & DRM_MODE_REFLECT_Y) { - DRM_ERROR("Invalid Source: Yuv format does not support this rotation\n"); + DRM_DEBUG_KMS("Invalid Source: Yuv format does not support this rotation\n"); return -EINVAL; } @@ -846,7 +846,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane, struct vop *vop = to_vop(crtc); if (!vop->data->afbc) { - DRM_ERROR("vop does not support AFBC\n"); + DRM_DEBUG_KMS("vop does not support AFBC\n"); return -EINVAL; } @@ -855,15 +855,16 @@ static int vop_plane_atomic_check(struct drm_plane *plane, return ret; if (new_plane_state->src.x1 || new_plane_state->src.y1) { - DRM_ERROR("AFBC does not support offset display, xpos=%d, ypos=%d, offset=%d\n", - new_plane_state->src.x1, - new_plane_state->src.y1, fb->offsets[0]); + DRM_DEBUG_KMS("AFBC does not support offset display, " \ + "xpos=%d, ypos=%d, offset=%d\n", + new_plane_state->src.x1, new_plane_state->src.y1, + fb->offsets[0]); return -EINVAL; } if (new_plane_state->rotation && new_plane_state->rotation != DRM_MODE_ROTATE_0) { - DRM_ERROR("No rotation support in AFBC, rotation=%d\n", - new_plane_state->rotation); + DRM_DEBUG_KMS("No rotation support in AFBC, rotation=%d\n", + new_plane_state->rotation); return -EINVAL; } } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 7139a522b2f3..54e3083076b7 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -519,7 +519,8 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo, if (bo->pin_count) { *locked = false; - *busy = false; + if (busy) + *busy = false; return false; } diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index 5978e9dbc286..ebf15f31d97e 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c @@ -209,8 +209,7 @@ int vmbus_connect(void) * Setup the vmbus event connection for channel interrupt * abstraction stuff */ - vmbus_connection.int_page = - (void *)hv_alloc_hyperv_zeroed_page(); + vmbus_connection.int_page = hv_alloc_hyperv_zeroed_page(); if (vmbus_connection.int_page == NULL) { ret = -ENOMEM; goto cleanup; @@ -225,8 +224,8 @@ int vmbus_connect(void) * Setup the monitor notification facility. The 1st page for * parent->child and the 2nd page for child->parent */ - vmbus_connection.monitor_pages[0] = (void *)hv_alloc_hyperv_page(); - vmbus_connection.monitor_pages[1] = (void *)hv_alloc_hyperv_page(); + vmbus_connection.monitor_pages[0] = hv_alloc_hyperv_page(); + vmbus_connection.monitor_pages[1] = hv_alloc_hyperv_page(); if ((vmbus_connection.monitor_pages[0] == NULL) || (vmbus_connection.monitor_pages[1] == NULL)) { ret = -ENOMEM; @@ -333,15 +332,15 @@ void vmbus_disconnect(void) destroy_workqueue(vmbus_connection.work_queue); if (vmbus_connection.int_page) { - hv_free_hyperv_page((unsigned long)vmbus_connection.int_page); + hv_free_hyperv_page(vmbus_connection.int_page); vmbus_connection.int_page = NULL; } set_memory_encrypted((unsigned long)vmbus_connection.monitor_pages[0], 1); set_memory_encrypted((unsigned long)vmbus_connection.monitor_pages[1], 1); - hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[0]); - hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[1]); + hv_free_hyperv_page(vmbus_connection.monitor_pages[0]); + hv_free_hyperv_page(vmbus_connection.monitor_pages[1]); vmbus_connection.monitor_pages[0] = NULL; vmbus_connection.monitor_pages[1] = NULL; } diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index dffcc894f117..0d7a3ba66396 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -1628,7 +1628,7 @@ static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info, WARN_ON_ONCE(nents > HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES); WARN_ON_ONCE(sgl->length < (HV_HYP_PAGE_SIZE << page_reporting_order)); local_irq_save(flags); - hint = *(struct hv_memory_hint **)this_cpu_ptr(hyperv_pcpu_input_arg); + hint = *this_cpu_ptr(hyperv_pcpu_input_arg); if (!hint) { local_irq_restore(flags); return -ENOSPC; diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c index 542a1d53b303..6a2258fef1fe 100644 --- a/drivers/hv/hv_common.c +++ b/drivers/hv/hv_common.c @@ -115,12 +115,12 @@ void *hv_alloc_hyperv_zeroed_page(void) } EXPORT_SYMBOL_GPL(hv_alloc_hyperv_zeroed_page); -void hv_free_hyperv_page(unsigned long addr) +void hv_free_hyperv_page(void *addr) { if (PAGE_SIZE == HV_HYP_PAGE_SIZE) - free_page(addr); + free_page((unsigned long)addr); else - kfree((void *)addr); + kfree(addr); } EXPORT_SYMBOL_GPL(hv_free_hyperv_page); @@ -253,7 +253,7 @@ static void hv_kmsg_dump_unregister(void) atomic_notifier_chain_unregister(&panic_notifier_list, &hyperv_panic_report_block); - hv_free_hyperv_page((unsigned long)hv_panic_page); + hv_free_hyperv_page(hv_panic_page); hv_panic_page = NULL; } @@ -270,7 +270,7 @@ static void hv_kmsg_dump_register(void) ret = kmsg_dump_register(&hv_kmsg_dumper); if (ret) { pr_err("Hyper-V: kmsg dump register error 0x%x\n", ret); - hv_free_hyperv_page((unsigned long)hv_panic_page); + hv_free_hyperv_page(hv_panic_page); hv_panic_page = NULL; } } diff --git a/drivers/hwmon/aquacomputer_d5next.c b/drivers/hwmon/aquacomputer_d5next.c index a997dbcb563f..023807859be7 100644 --- a/drivers/hwmon/aquacomputer_d5next.c +++ b/drivers/hwmon/aquacomputer_d5next.c @@ -13,9 +13,11 @@ #include <linux/crc16.h> #include <linux/debugfs.h> +#include <linux/delay.h> #include <linux/hid.h> #include <linux/hwmon.h> #include <linux/jiffies.h> +#include <linux/ktime.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/seq_file.h> @@ -63,6 +65,8 @@ static const char *const aqc_device_names[] = { #define CTRL_REPORT_ID 0x03 #define AQUAERO_CTRL_REPORT_ID 0x0b +#define CTRL_REPORT_DELAY 200 /* ms */ + /* The HID report that the official software always sends * after writing values, currently same for all devices */ @@ -527,6 +531,9 @@ struct aqc_data { int secondary_ctrl_report_size; u8 *secondary_ctrl_report; + ktime_t last_ctrl_report_op; + int ctrl_report_delay; /* Delay between two ctrl report operations, in ms */ + int buffer_size; u8 *buffer; int checksum_start; @@ -611,17 +618,35 @@ static int aqc_aquastreamxt_convert_fan_rpm(u16 val) return 0; } +static void aqc_delay_ctrl_report(struct aqc_data *priv) +{ + /* + * If previous read or write is too close to this one, delay the current operation + * to give the device enough time to process the previous one. + */ + if (priv->ctrl_report_delay) { + s64 delta = ktime_ms_delta(ktime_get(), priv->last_ctrl_report_op); + + if (delta < priv->ctrl_report_delay) + msleep(priv->ctrl_report_delay - delta); + } +} + /* Expects the mutex to be locked */ static int aqc_get_ctrl_data(struct aqc_data *priv) { int ret; + aqc_delay_ctrl_report(priv); + memset(priv->buffer, 0x00, priv->buffer_size); ret = hid_hw_raw_request(priv->hdev, priv->ctrl_report_id, priv->buffer, priv->buffer_size, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret < 0) ret = -ENODATA; + priv->last_ctrl_report_op = ktime_get(); + return ret; } @@ -631,6 +656,8 @@ static int aqc_send_ctrl_data(struct aqc_data *priv) int ret; u16 checksum; + aqc_delay_ctrl_report(priv); + /* Checksum is not needed for Aquaero */ if (priv->kind != aquaero) { /* Init and xorout value for CRC-16/USB is 0xffff */ @@ -646,12 +673,16 @@ static int aqc_send_ctrl_data(struct aqc_data *priv) ret = hid_hw_raw_request(priv->hdev, priv->ctrl_report_id, priv->buffer, priv->buffer_size, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); if (ret < 0) - return ret; + goto record_access_and_ret; /* The official software sends this report after every change, so do it here as well */ ret = hid_hw_raw_request(priv->hdev, priv->secondary_ctrl_report_id, priv->secondary_ctrl_report, priv->secondary_ctrl_report_size, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); + +record_access_and_ret: + priv->last_ctrl_report_op = ktime_get(); + return ret; } @@ -1524,6 +1555,7 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id) priv->buffer_size = AQUAERO_CTRL_REPORT_SIZE; priv->temp_ctrl_offset = AQUAERO_TEMP_CTRL_OFFSET; + priv->ctrl_report_delay = CTRL_REPORT_DELAY; priv->temp_label = label_temp_sensors; priv->virtual_temp_label = label_virtual_temp_sensors; @@ -1547,6 +1579,7 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id) priv->temp_ctrl_offset = D5NEXT_TEMP_CTRL_OFFSET; priv->buffer_size = D5NEXT_CTRL_REPORT_SIZE; + priv->ctrl_report_delay = CTRL_REPORT_DELAY; priv->power_cycle_count_offset = D5NEXT_POWER_CYCLES; @@ -1597,6 +1630,7 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id) priv->temp_ctrl_offset = OCTO_TEMP_CTRL_OFFSET; priv->buffer_size = OCTO_CTRL_REPORT_SIZE; + priv->ctrl_report_delay = CTRL_REPORT_DELAY; priv->power_cycle_count_offset = OCTO_POWER_CYCLES; @@ -1624,6 +1658,7 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id) priv->temp_ctrl_offset = QUADRO_TEMP_CTRL_OFFSET; priv->buffer_size = QUADRO_CTRL_REPORT_SIZE; + priv->ctrl_report_delay = CTRL_REPORT_DELAY; priv->flow_pulses_ctrl_offset = QUADRO_FLOW_PULSES_CTRL_OFFSET; priv->power_cycle_count_offset = QUADRO_POWER_CYCLES; diff --git a/drivers/hwmon/pmbus/bel-pfe.c b/drivers/hwmon/pmbus/bel-pfe.c index fa5070ae26bc..7c5f4b10a7c1 100644 --- a/drivers/hwmon/pmbus/bel-pfe.c +++ b/drivers/hwmon/pmbus/bel-pfe.c @@ -17,12 +17,13 @@ enum chips {pfe1100, pfe3000}; /* - * Disable status check for pfe3000 devices, because some devices report - * communication error (invalid command) for VOUT_MODE command (0x20) - * although correct VOUT_MODE (0x16) is returned: it leads to incorrect - * exponent in linear mode. + * Disable status check because some devices report communication error + * (invalid command) for VOUT_MODE command (0x20) although the correct + * VOUT_MODE (0x16) is returned: it leads to incorrect exponent in linear + * mode. + * This affects both pfe3000 and pfe1100. */ -static struct pmbus_platform_data pfe3000_plat_data = { +static struct pmbus_platform_data pfe_plat_data = { .flags = PMBUS_SKIP_STATUS_CHECK, }; @@ -94,16 +95,15 @@ static int pfe_pmbus_probe(struct i2c_client *client) int model; model = (int)i2c_match_id(pfe_device_id, client)->driver_data; + client->dev.platform_data = &pfe_plat_data; /* * PFE3000-12-069RA devices may not stay in page 0 during device * probe which leads to probe failure (read status word failed). * So let's set the device to page 0 at the beginning. */ - if (model == pfe3000) { - client->dev.platform_data = &pfe3000_plat_data; + if (model == pfe3000) i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0); - } return pmbus_do_probe(client, &pfe_driver_info[model]); } diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c index 8685e0b58a83..7bc3ebfe8081 100644 --- a/drivers/iio/adc/ad7192.c +++ b/drivers/iio/adc/ad7192.c @@ -62,7 +62,6 @@ #define AD7192_MODE_STA_MASK BIT(20) /* Status Register transmission Mask */ #define AD7192_MODE_CLKSRC(x) (((x) & 0x3) << 18) /* Clock Source Select */ #define AD7192_MODE_SINC3 BIT(15) /* SINC3 Filter Select */ -#define AD7192_MODE_ACX BIT(14) /* AC excitation enable(AD7195 only)*/ #define AD7192_MODE_ENPAR BIT(13) /* Parity Enable */ #define AD7192_MODE_CLKDIV BIT(12) /* Clock divide by 2 (AD7190/2 only)*/ #define AD7192_MODE_SCYCLE BIT(11) /* Single cycle conversion */ @@ -91,6 +90,7 @@ /* Configuration Register Bit Designations (AD7192_REG_CONF) */ #define AD7192_CONF_CHOP BIT(23) /* CHOP enable */ +#define AD7192_CONF_ACX BIT(22) /* AC excitation enable(AD7195 only) */ #define AD7192_CONF_REFSEL BIT(20) /* REFIN1/REFIN2 Reference Select */ #define AD7192_CONF_CHAN(x) ((x) << 8) /* Channel select */ #define AD7192_CONF_CHAN_MASK (0x7FF << 8) /* Channel select mask */ @@ -472,7 +472,7 @@ static ssize_t ad7192_show_ac_excitation(struct device *dev, struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct ad7192_state *st = iio_priv(indio_dev); - return sysfs_emit(buf, "%d\n", !!(st->mode & AD7192_MODE_ACX)); + return sysfs_emit(buf, "%d\n", !!(st->conf & AD7192_CONF_ACX)); } static ssize_t ad7192_show_bridge_switch(struct device *dev, @@ -513,13 +513,13 @@ static ssize_t ad7192_set(struct device *dev, ad_sd_write_reg(&st->sd, AD7192_REG_GPOCON, 1, st->gpocon); break; - case AD7192_REG_MODE: + case AD7192_REG_CONF: if (val) - st->mode |= AD7192_MODE_ACX; + st->conf |= AD7192_CONF_ACX; else - st->mode &= ~AD7192_MODE_ACX; + st->conf &= ~AD7192_CONF_ACX; - ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode); + ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, st->conf); break; default: ret = -EINVAL; @@ -579,12 +579,11 @@ static IIO_DEVICE_ATTR(bridge_switch_en, 0644, static IIO_DEVICE_ATTR(ac_excitation_en, 0644, ad7192_show_ac_excitation, ad7192_set, - AD7192_REG_MODE); + AD7192_REG_CONF); static struct attribute *ad7192_attributes[] = { &iio_dev_attr_filter_low_pass_3db_frequency_available.dev_attr.attr, &iio_dev_attr_bridge_switch_en.dev_attr.attr, - &iio_dev_attr_ac_excitation_en.dev_attr.attr, NULL }; @@ -595,6 +594,7 @@ static const struct attribute_group ad7192_attribute_group = { static struct attribute *ad7195_attributes[] = { &iio_dev_attr_filter_low_pass_3db_frequency_available.dev_attr.attr, &iio_dev_attr_bridge_switch_en.dev_attr.attr, + &iio_dev_attr_ac_excitation_en.dev_attr.attr, NULL }; diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c index 213526c1592f..aea83f369437 100644 --- a/drivers/iio/adc/ina2xx-adc.c +++ b/drivers/iio/adc/ina2xx-adc.c @@ -124,6 +124,7 @@ static const struct regmap_config ina2xx_regmap_config = { enum ina2xx_ids { ina219, ina226 }; struct ina2xx_config { + const char *name; u16 config_default; int calibration_value; int shunt_voltage_lsb; /* nV */ @@ -155,6 +156,7 @@ struct ina2xx_chip_info { static const struct ina2xx_config ina2xx_config[] = { [ina219] = { + .name = "ina219", .config_default = INA219_CONFIG_DEFAULT, .calibration_value = 4096, .shunt_voltage_lsb = 10000, @@ -164,6 +166,7 @@ static const struct ina2xx_config ina2xx_config[] = { .chip_id = ina219, }, [ina226] = { + .name = "ina226", .config_default = INA226_CONFIG_DEFAULT, .calibration_value = 2048, .shunt_voltage_lsb = 2500, @@ -996,7 +999,7 @@ static int ina2xx_probe(struct i2c_client *client) /* Patch the current config register with default. */ val = chip->config->config_default; - if (id->driver_data == ina226) { + if (type == ina226) { ina226_set_average(chip, INA226_DEFAULT_AVG, &val); ina226_set_int_time_vbus(chip, INA226_DEFAULT_IT, &val); ina226_set_int_time_vshunt(chip, INA226_DEFAULT_IT, &val); @@ -1015,7 +1018,7 @@ static int ina2xx_probe(struct i2c_client *client) } indio_dev->modes = INDIO_DIRECT_MODE; - if (id->driver_data == ina226) { + if (type == ina226) { indio_dev->channels = ina226_channels; indio_dev->num_channels = ARRAY_SIZE(ina226_channels); indio_dev->info = &ina226_info; @@ -1024,7 +1027,7 @@ static int ina2xx_probe(struct i2c_client *client) indio_dev->num_channels = ARRAY_SIZE(ina219_channels); indio_dev->info = &ina219_info; } - indio_dev->name = id->name; + indio_dev->name = id ? id->name : chip->config->name; ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev, &ina2xx_setup_ops); diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c index af6bfcc19075..eb78a6f17fd0 100644 --- a/drivers/iio/adc/meson_saradc.c +++ b/drivers/iio/adc/meson_saradc.c @@ -916,12 +916,6 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev) goto err_vref; } - ret = clk_prepare_enable(priv->core_clk); - if (ret) { - dev_err(dev, "failed to enable core clk\n"); - goto err_core_clk; - } - regval = FIELD_PREP(MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, 1); regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0, MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, regval); @@ -948,8 +942,6 @@ err_adc_clk: regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3, MESON_SAR_ADC_REG3_ADC_EN, 0); meson_sar_adc_set_bandgap(indio_dev, false); - clk_disable_unprepare(priv->core_clk); -err_core_clk: regulator_disable(priv->vref); err_vref: meson_sar_adc_unlock(indio_dev); @@ -977,8 +969,6 @@ static void meson_sar_adc_hw_disable(struct iio_dev *indio_dev) meson_sar_adc_set_bandgap(indio_dev, false); - clk_disable_unprepare(priv->core_clk); - regulator_disable(priv->vref); if (!ret) @@ -1211,7 +1201,7 @@ static int meson_sar_adc_probe(struct platform_device *pdev) if (IS_ERR(priv->clkin)) return dev_err_probe(dev, PTR_ERR(priv->clkin), "failed to get clkin\n"); - priv->core_clk = devm_clk_get(dev, "core"); + priv->core_clk = devm_clk_get_enabled(dev, "core"); if (IS_ERR(priv->core_clk)) return dev_err_probe(dev, PTR_ERR(priv->core_clk), "failed to get core clk\n"); @@ -1294,15 +1284,26 @@ static int meson_sar_adc_remove(struct platform_device *pdev) static int meson_sar_adc_suspend(struct device *dev) { struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct meson_sar_adc_priv *priv = iio_priv(indio_dev); meson_sar_adc_hw_disable(indio_dev); + clk_disable_unprepare(priv->core_clk); + return 0; } static int meson_sar_adc_resume(struct device *dev) { struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct meson_sar_adc_priv *priv = iio_priv(indio_dev); + int ret; + + ret = clk_prepare_enable(priv->core_clk); + if (ret) { + dev_err(dev, "failed to enable core clk\n"); + return ret; + } return meson_sar_adc_hw_enable(indio_dev); } diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c index 943e9e14d1e9..b72d39fc2434 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c @@ -253,7 +253,7 @@ int cros_ec_sensors_core_init(struct platform_device *pdev, platform_set_drvdata(pdev, indio_dev); state->ec = ec->ec_dev; - state->msg = devm_kzalloc(&pdev->dev, + state->msg = devm_kzalloc(&pdev->dev, sizeof(*state->msg) + max((u16)sizeof(struct ec_params_motion_sense), state->ec->max_response), GFP_KERNEL); if (!state->msg) diff --git a/drivers/iio/frequency/admv1013.c b/drivers/iio/frequency/admv1013.c index 9bf8337806fc..8c8e0bbfc99f 100644 --- a/drivers/iio/frequency/admv1013.c +++ b/drivers/iio/frequency/admv1013.c @@ -344,9 +344,12 @@ static int admv1013_update_quad_filters(struct admv1013_state *st) static int admv1013_update_mixer_vgate(struct admv1013_state *st) { - unsigned int vcm, mixer_vgate; + unsigned int mixer_vgate; + int vcm; vcm = regulator_get_voltage(st->reg); + if (vcm < 0) + return vcm; if (vcm < 1800000) mixer_vgate = (2389 * vcm / 1000000 + 8100) / 100; diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c index 6a18b363cf73..b6e6b1df8a61 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c @@ -2687,7 +2687,7 @@ unknown_format: static int lsm6dsx_get_acpi_mount_matrix(struct device *dev, struct iio_mount_matrix *orientation) { - return false; + return -EOPNOTSUPP; } #endif diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index c117f50d0cf3..adcba832e6fa 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -1888,7 +1888,7 @@ static const struct iio_buffer_setup_ops noop_ring_setup_ops; int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); - struct fwnode_handle *fwnode; + struct fwnode_handle *fwnode = NULL; int ret; if (!indio_dev->info) @@ -1899,7 +1899,8 @@ int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod) /* If the calling driver did not initialize firmware node, do it here */ if (dev_fwnode(&indio_dev->dev)) fwnode = dev_fwnode(&indio_dev->dev); - else + /* The default dummy IIO device has no parent */ + else if (indio_dev->dev.parent) fwnode = dev_fwnode(indio_dev->dev.parent); device_set_node(&indio_dev->dev, fwnode); diff --git a/drivers/iio/light/rohm-bu27008.c b/drivers/iio/light/rohm-bu27008.c index 489902bed7f0..b50bf8973d9a 100644 --- a/drivers/iio/light/rohm-bu27008.c +++ b/drivers/iio/light/rohm-bu27008.c @@ -190,7 +190,7 @@ static const struct iio_itime_sel_mul bu27008_itimes[] = { .address = BU27008_REG_##data##_LO, \ .scan_index = BU27008_##color, \ .scan_type = { \ - .sign = 's', \ + .sign = 'u', \ .realbits = 16, \ .storagebits = 16, \ .endianness = IIO_LE, \ @@ -633,7 +633,7 @@ static int bu27008_try_find_new_time_gain(struct bu27008_data *data, int val, for (i = 0; i < data->gts.num_itime; i++) { new_time_sel = data->gts.itime_table[i].sel; ret = iio_gts_find_gain_sel_for_scale_using_time(&data->gts, - new_time_sel, val, val2 * 1000, gain_sel); + new_time_sel, val, val2, gain_sel); if (!ret) break; } @@ -662,7 +662,7 @@ static int bu27008_set_scale(struct bu27008_data *data, goto unlock_out; ret = iio_gts_find_gain_sel_for_scale_using_time(&data->gts, time_sel, - val, val2 * 1000, &gain_sel); + val, val2, &gain_sel); if (ret) { ret = bu27008_try_find_new_time_gain(data, val, val2, &gain_sel); if (ret) @@ -677,6 +677,21 @@ unlock_out: return ret; } +static int bu27008_write_raw_get_fmt(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + long mask) +{ + + switch (mask) { + case IIO_CHAN_INFO_SCALE: + return IIO_VAL_INT_PLUS_NANO; + case IIO_CHAN_INFO_INT_TIME: + return IIO_VAL_INT_PLUS_MICRO; + default: + return -EINVAL; + } +} + static int bu27008_write_raw(struct iio_dev *idev, struct iio_chan_spec const *chan, int val, int val2, long mask) @@ -756,6 +771,7 @@ static int bu27008_update_scan_mode(struct iio_dev *idev, static const struct iio_info bu27008_info = { .read_raw = &bu27008_read_raw, .write_raw = &bu27008_write_raw, + .write_raw_get_fmt = &bu27008_write_raw_get_fmt, .read_avail = &bu27008_read_avail, .update_scan_mode = bu27008_update_scan_mode, .validate_trigger = iio_validate_own_trigger, diff --git a/drivers/iio/light/rohm-bu27034.c b/drivers/iio/light/rohm-bu27034.c index e63ef5789cde..bf3de853a811 100644 --- a/drivers/iio/light/rohm-bu27034.c +++ b/drivers/iio/light/rohm-bu27034.c @@ -575,7 +575,7 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan, return -EINVAL; if (chan == BU27034_CHAN_ALS) { - if (val == 0 && val2 == 1000) + if (val == 0 && val2 == 1000000) return 0; return -EINVAL; @@ -587,7 +587,7 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan, goto unlock_out; ret = iio_gts_find_gain_sel_for_scale_using_time(&data->gts, time_sel, - val, val2 * 1000, &gain_sel); + val, val2, &gain_sel); if (ret) { /* * Could not support scale with given time. Need to change time. @@ -624,7 +624,7 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan, /* Can we provide requested scale with this time? */ ret = iio_gts_find_gain_sel_for_scale_using_time( - &data->gts, new_time_sel, val, val2 * 1000, + &data->gts, new_time_sel, val, val2, &gain_sel); if (ret) continue; @@ -1217,6 +1217,21 @@ static int bu27034_read_raw(struct iio_dev *idev, } } +static int bu27034_write_raw_get_fmt(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + long mask) +{ + + switch (mask) { + case IIO_CHAN_INFO_SCALE: + return IIO_VAL_INT_PLUS_NANO; + case IIO_CHAN_INFO_INT_TIME: + return IIO_VAL_INT_PLUS_MICRO; + default: + return -EINVAL; + } +} + static int bu27034_write_raw(struct iio_dev *idev, struct iio_chan_spec const *chan, int val, int val2, long mask) @@ -1267,6 +1282,7 @@ static int bu27034_read_avail(struct iio_dev *idev, static const struct iio_info bu27034_info = { .read_raw = &bu27034_read_raw, .write_raw = &bu27034_write_raw, + .write_raw_get_fmt = &bu27034_write_raw_get_fmt, .read_avail = &bu27034_read_avail, }; diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 755a9c57db6f..f9ab671c8eda 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -85,6 +85,8 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, dma_addr_t mask; int i; + umem->iova = va = virt; + if (umem->is_odp) { unsigned int page_size = BIT(to_ib_umem_odp(umem)->page_shift); @@ -100,7 +102,6 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, */ pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT); - umem->iova = va = virt; /* The best result is the smallest page size that results in the minimum * number of required pages. Compute the largest page size that could * work based on VA address bits that don't change. diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index b42166fe7454..63e98e2d3596 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -1253,6 +1253,8 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode) rc = bnxt_re_setup_chip_ctx(rdev, wqe_mode); if (rc) { + bnxt_unregister_dev(rdev->en_dev); + clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); ibdev_err(&rdev->ibdev, "Failed to get chip context\n"); return -EINVAL; } @@ -1526,8 +1528,8 @@ static void bnxt_re_remove(struct auxiliary_device *adev) } bnxt_re_setup_cc(rdev, false); ib_unregister_device(&rdev->ibdev); - ib_dealloc_device(&rdev->ibdev); bnxt_re_dev_uninit(rdev); + ib_dealloc_device(&rdev->ibdev); skip_remove: mutex_unlock(&bnxt_re_mutex); } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index 5fd8f7c90bb0..739d942761d1 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -819,6 +819,7 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res, } memset((u8 *)dpit->tbl, 0xFF, bytes); + mutex_init(&res->dpi_tbl_lock); dpit->priv_db = dpit->ucreg.bar_reg + dpit->ucreg.offset; return 0; diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 9dbb89e9f4af..baaa4406d5e6 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -12307,6 +12307,7 @@ static void free_cntrs(struct hfi1_devdata *dd) if (dd->synth_stats_timer.function) del_timer_sync(&dd->synth_stats_timer); + cancel_work_sync(&dd->update_cntr_work); ppd = (struct hfi1_pportdata *)(dd + 1); for (i = 0; i < dd->num_pports; i++, ppd++) { kfree(ppd->cntrs); diff --git a/drivers/interconnect/qcom/bcm-voter.c b/drivers/interconnect/qcom/bcm-voter.c index 8f385f9c2dd3..d5f2a6b5376b 100644 --- a/drivers/interconnect/qcom/bcm-voter.c +++ b/drivers/interconnect/qcom/bcm-voter.c @@ -83,6 +83,11 @@ static void bcm_aggregate(struct qcom_icc_bcm *bcm) temp = agg_peak[bucket] * bcm->vote_scale; bcm->vote_y[bucket] = bcm_div(temp, bcm->aux_data.unit); + + if (bcm->enable_mask && (bcm->vote_x[bucket] || bcm->vote_y[bucket])) { + bcm->vote_x[bucket] = 0; + bcm->vote_y[bucket] = bcm->enable_mask; + } } if (bcm->keepalive && bcm->vote_x[QCOM_ICC_BUCKET_AMC] == 0 && diff --git a/drivers/interconnect/qcom/icc-rpmh.h b/drivers/interconnect/qcom/icc-rpmh.h index 04391c1ba465..7843d8864d6b 100644 --- a/drivers/interconnect/qcom/icc-rpmh.h +++ b/drivers/interconnect/qcom/icc-rpmh.h @@ -81,6 +81,7 @@ struct qcom_icc_node { * @vote_x: aggregated threshold values, represents sum_bw when @type is bw bcm * @vote_y: aggregated threshold values, represents peak_bw when @type is bw bcm * @vote_scale: scaling factor for vote_x and vote_y + * @enable_mask: optional mask to send as vote instead of vote_x/vote_y * @dirty: flag used to indicate whether the bcm needs to be committed * @keepalive: flag used to indicate whether a keepalive is required * @aux_data: auxiliary data used when calculating threshold values and @@ -97,6 +98,7 @@ struct qcom_icc_bcm { u64 vote_x[QCOM_ICC_NUM_BUCKETS]; u64 vote_y[QCOM_ICC_NUM_BUCKETS]; u64 vote_scale; + u32 enable_mask; bool dirty; bool keepalive; struct bcm_db aux_data; diff --git a/drivers/interconnect/qcom/sa8775p.c b/drivers/interconnect/qcom/sa8775p.c index da21cc31a580..f56538669de0 100644 --- a/drivers/interconnect/qcom/sa8775p.c +++ b/drivers/interconnect/qcom/sa8775p.c @@ -1873,6 +1873,7 @@ static struct qcom_icc_node srvc_snoc = { static struct qcom_icc_bcm bcm_acv = { .name = "ACV", + .enable_mask = 0x8, .num_nodes = 1, .nodes = { &ebi }, }; diff --git a/drivers/interconnect/qcom/sm8450.c b/drivers/interconnect/qcom/sm8450.c index 2d7a8e7b85ec..e64c214b4020 100644 --- a/drivers/interconnect/qcom/sm8450.c +++ b/drivers/interconnect/qcom/sm8450.c @@ -1337,6 +1337,7 @@ static struct qcom_icc_node qns_mem_noc_sf_disp = { static struct qcom_icc_bcm bcm_acv = { .name = "ACV", + .enable_mask = 0x8, .num_nodes = 1, .nodes = { &ebi }, }; @@ -1349,6 +1350,7 @@ static struct qcom_icc_bcm bcm_ce0 = { static struct qcom_icc_bcm bcm_cn0 = { .name = "CN0", + .enable_mask = 0x1, .keepalive = true, .num_nodes = 55, .nodes = { &qnm_gemnoc_cnoc, &qnm_gemnoc_pcie, @@ -1383,6 +1385,7 @@ static struct qcom_icc_bcm bcm_cn0 = { static struct qcom_icc_bcm bcm_co0 = { .name = "CO0", + .enable_mask = 0x1, .num_nodes = 2, .nodes = { &qxm_nsp, &qns_nsp_gemnoc }, }; @@ -1403,6 +1406,7 @@ static struct qcom_icc_bcm bcm_mm0 = { static struct qcom_icc_bcm bcm_mm1 = { .name = "MM1", + .enable_mask = 0x1, .num_nodes = 12, .nodes = { &qnm_camnoc_hf, &qnm_camnoc_icp, &qnm_camnoc_sf, &qnm_mdp, @@ -1445,6 +1449,7 @@ static struct qcom_icc_bcm bcm_sh0 = { static struct qcom_icc_bcm bcm_sh1 = { .name = "SH1", + .enable_mask = 0x1, .num_nodes = 7, .nodes = { &alm_gpu_tcu, &alm_sys_tcu, &qnm_nsp_gemnoc, &qnm_pcie, @@ -1461,6 +1466,7 @@ static struct qcom_icc_bcm bcm_sn0 = { static struct qcom_icc_bcm bcm_sn1 = { .name = "SN1", + .enable_mask = 0x1, .num_nodes = 4, .nodes = { &qhm_gic, &qxm_pimem, &xm_gic, &qns_gemnoc_gc }, @@ -1492,6 +1498,7 @@ static struct qcom_icc_bcm bcm_sn7 = { static struct qcom_icc_bcm bcm_acv_disp = { .name = "ACV", + .enable_mask = 0x1, .num_nodes = 1, .nodes = { &ebi_disp }, }; @@ -1510,6 +1517,7 @@ static struct qcom_icc_bcm bcm_mm0_disp = { static struct qcom_icc_bcm bcm_mm1_disp = { .name = "MM1", + .enable_mask = 0x1, .num_nodes = 3, .nodes = { &qnm_mdp_disp, &qnm_rot_disp, &qns_mem_noc_sf_disp }, @@ -1523,6 +1531,7 @@ static struct qcom_icc_bcm bcm_sh0_disp = { static struct qcom_icc_bcm bcm_sh1_disp = { .name = "SH1", + .enable_mask = 0x1, .num_nodes = 1, .nodes = { &qnm_pcie_disp }, }; diff --git a/drivers/interconnect/qcom/sm8550.c b/drivers/interconnect/qcom/sm8550.c index d823ba988ef6..0864ed285375 100644 --- a/drivers/interconnect/qcom/sm8550.c +++ b/drivers/interconnect/qcom/sm8550.c @@ -1473,6 +1473,7 @@ static struct qcom_icc_node qns_mem_noc_sf_cam_ife_2 = { static struct qcom_icc_bcm bcm_acv = { .name = "ACV", + .enable_mask = 0x8, .num_nodes = 1, .nodes = { &ebi }, }; @@ -1485,6 +1486,7 @@ static struct qcom_icc_bcm bcm_ce0 = { static struct qcom_icc_bcm bcm_cn0 = { .name = "CN0", + .enable_mask = 0x1, .keepalive = true, .num_nodes = 54, .nodes = { &qsm_cfg, &qhs_ahb2phy0, @@ -1524,6 +1526,7 @@ static struct qcom_icc_bcm bcm_cn1 = { static struct qcom_icc_bcm bcm_co0 = { .name = "CO0", + .enable_mask = 0x1, .num_nodes = 2, .nodes = { &qxm_nsp, &qns_nsp_gemnoc }, }; @@ -1549,6 +1552,7 @@ static struct qcom_icc_bcm bcm_mm0 = { static struct qcom_icc_bcm bcm_mm1 = { .name = "MM1", + .enable_mask = 0x1, .num_nodes = 8, .nodes = { &qnm_camnoc_hf, &qnm_camnoc_icp, &qnm_camnoc_sf, &qnm_vapss_hcp, @@ -1589,6 +1593,7 @@ static struct qcom_icc_bcm bcm_sh0 = { static struct qcom_icc_bcm bcm_sh1 = { .name = "SH1", + .enable_mask = 0x1, .num_nodes = 13, .nodes = { &alm_gpu_tcu, &alm_sys_tcu, &chm_apps, &qnm_gpu, @@ -1608,6 +1613,7 @@ static struct qcom_icc_bcm bcm_sn0 = { static struct qcom_icc_bcm bcm_sn1 = { .name = "SN1", + .enable_mask = 0x1, .num_nodes = 3, .nodes = { &qhm_gic, &xm_gic, &qns_gemnoc_gc }, @@ -1633,6 +1639,7 @@ static struct qcom_icc_bcm bcm_sn7 = { static struct qcom_icc_bcm bcm_acv_disp = { .name = "ACV", + .enable_mask = 0x1, .num_nodes = 1, .nodes = { &ebi_disp }, }; @@ -1657,12 +1664,14 @@ static struct qcom_icc_bcm bcm_sh0_disp = { static struct qcom_icc_bcm bcm_sh1_disp = { .name = "SH1", + .enable_mask = 0x1, .num_nodes = 2, .nodes = { &qnm_mnoc_hf_disp, &qnm_pcie_disp }, }; static struct qcom_icc_bcm bcm_acv_cam_ife_0 = { .name = "ACV", + .enable_mask = 0x0, .num_nodes = 1, .nodes = { &ebi_cam_ife_0 }, }; @@ -1681,6 +1690,7 @@ static struct qcom_icc_bcm bcm_mm0_cam_ife_0 = { static struct qcom_icc_bcm bcm_mm1_cam_ife_0 = { .name = "MM1", + .enable_mask = 0x1, .num_nodes = 4, .nodes = { &qnm_camnoc_hf_cam_ife_0, &qnm_camnoc_icp_cam_ife_0, &qnm_camnoc_sf_cam_ife_0, &qns_mem_noc_sf_cam_ife_0 }, @@ -1694,6 +1704,7 @@ static struct qcom_icc_bcm bcm_sh0_cam_ife_0 = { static struct qcom_icc_bcm bcm_sh1_cam_ife_0 = { .name = "SH1", + .enable_mask = 0x1, .num_nodes = 3, .nodes = { &qnm_mnoc_hf_cam_ife_0, &qnm_mnoc_sf_cam_ife_0, &qnm_pcie_cam_ife_0 }, @@ -1701,6 +1712,7 @@ static struct qcom_icc_bcm bcm_sh1_cam_ife_0 = { static struct qcom_icc_bcm bcm_acv_cam_ife_1 = { .name = "ACV", + .enable_mask = 0x0, .num_nodes = 1, .nodes = { &ebi_cam_ife_1 }, }; @@ -1719,6 +1731,7 @@ static struct qcom_icc_bcm bcm_mm0_cam_ife_1 = { static struct qcom_icc_bcm bcm_mm1_cam_ife_1 = { .name = "MM1", + .enable_mask = 0x1, .num_nodes = 4, .nodes = { &qnm_camnoc_hf_cam_ife_1, &qnm_camnoc_icp_cam_ife_1, &qnm_camnoc_sf_cam_ife_1, &qns_mem_noc_sf_cam_ife_1 }, @@ -1732,6 +1745,7 @@ static struct qcom_icc_bcm bcm_sh0_cam_ife_1 = { static struct qcom_icc_bcm bcm_sh1_cam_ife_1 = { .name = "SH1", + .enable_mask = 0x1, .num_nodes = 3, .nodes = { &qnm_mnoc_hf_cam_ife_1, &qnm_mnoc_sf_cam_ife_1, &qnm_pcie_cam_ife_1 }, @@ -1739,6 +1753,7 @@ static struct qcom_icc_bcm bcm_sh1_cam_ife_1 = { static struct qcom_icc_bcm bcm_acv_cam_ife_2 = { .name = "ACV", + .enable_mask = 0x0, .num_nodes = 1, .nodes = { &ebi_cam_ife_2 }, }; @@ -1757,6 +1772,7 @@ static struct qcom_icc_bcm bcm_mm0_cam_ife_2 = { static struct qcom_icc_bcm bcm_mm1_cam_ife_2 = { .name = "MM1", + .enable_mask = 0x1, .num_nodes = 4, .nodes = { &qnm_camnoc_hf_cam_ife_2, &qnm_camnoc_icp_cam_ife_2, &qnm_camnoc_sf_cam_ife_2, &qns_mem_noc_sf_cam_ife_2 }, @@ -1770,6 +1786,7 @@ static struct qcom_icc_bcm bcm_sh0_cam_ife_2 = { static struct qcom_icc_bcm bcm_sh1_cam_ife_2 = { .name = "SH1", + .enable_mask = 0x1, .num_nodes = 3, .nodes = { &qnm_mnoc_hf_cam_ife_2, &qnm_mnoc_sf_cam_ife_2, &qnm_pcie_cam_ife_2 }, diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index c0331b268010..fe391de1aba3 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -839,7 +839,7 @@ hfcpci_fill_fifo(struct bchannel *bch) *z1t = cpu_to_le16(new_z1); /* now send data */ if (bch->tx_idx < bch->tx_skb->len) return; - dev_kfree_skb(bch->tx_skb); + dev_kfree_skb_any(bch->tx_skb); if (get_next_bframe(bch)) goto next_t_frame; return; @@ -895,7 +895,7 @@ hfcpci_fill_fifo(struct bchannel *bch) } bz->za[new_f1].z1 = cpu_to_le16(new_z1); /* for next buffer */ bz->f1 = new_f1; /* next frame */ - dev_kfree_skb(bch->tx_skb); + dev_kfree_skb_any(bch->tx_skb); get_next_bframe(bch); } @@ -1119,7 +1119,7 @@ tx_birq(struct bchannel *bch) if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len) hfcpci_fill_fifo(bch); else { - dev_kfree_skb(bch->tx_skb); + dev_kfree_skb_any(bch->tx_skb); if (get_next_bframe(bch)) hfcpci_fill_fifo(bch); } @@ -2277,7 +2277,7 @@ _hfcpci_softirq(struct device *dev, void *unused) return 0; if (hc->hw.int_m2 & HFCPCI_IRQ_ENABLE) { - spin_lock(&hc->lock); + spin_lock_irq(&hc->lock); bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1); if (bch && bch->state == ISDN_P_B_RAW) { /* B1 rx&tx */ main_rec_hfcpci(bch); @@ -2288,7 +2288,7 @@ _hfcpci_softirq(struct device *dev, void *unused) main_rec_hfcpci(bch); tx_birq(bch); } - spin_unlock(&hc->lock); + spin_unlock_irq(&hc->lock); } return 0; } diff --git a/drivers/isdn/mISDN/dsp.h b/drivers/isdn/mISDN/dsp.h index fa09d511a8ed..baf31258f5c9 100644 --- a/drivers/isdn/mISDN/dsp.h +++ b/drivers/isdn/mISDN/dsp.h @@ -247,7 +247,7 @@ extern void dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp); extern int dsp_cmx_conf(struct dsp *dsp, u32 conf_id); extern void dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb); extern void dsp_cmx_hdlc(struct dsp *dsp, struct sk_buff *skb); -extern void dsp_cmx_send(void *arg); +extern void dsp_cmx_send(struct timer_list *arg); extern void dsp_cmx_transmit(struct dsp *dsp, struct sk_buff *skb); extern int dsp_cmx_del_conf_member(struct dsp *dsp); extern int dsp_cmx_del_conf(struct dsp_conf *conf); diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c index 357b87592eb4..61cb45c5d0d8 100644 --- a/drivers/isdn/mISDN/dsp_cmx.c +++ b/drivers/isdn/mISDN/dsp_cmx.c @@ -1614,7 +1614,7 @@ static u16 dsp_count; /* last sample count */ static int dsp_count_valid; /* if we have last sample count */ void -dsp_cmx_send(void *arg) +dsp_cmx_send(struct timer_list *arg) { struct dsp_conf *conf; struct dsp_conf_member *member; diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c index 386084530c2f..fae95f166688 100644 --- a/drivers/isdn/mISDN/dsp_core.c +++ b/drivers/isdn/mISDN/dsp_core.c @@ -1195,7 +1195,7 @@ static int __init dsp_init(void) } /* set sample timer */ - timer_setup(&dsp_spl_tl, (void *)dsp_cmx_send, 0); + timer_setup(&dsp_spl_tl, dsp_cmx_send, 0); dsp_spl_tl.expires = jiffies + dsp_tics; dsp_spl_jiffies = dsp_spl_tl.expires; add_timer(&dsp_spl_tl); diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c index 7f0802a5518c..3418d2dd9371 100644 --- a/drivers/media/platform/qcom/venus/hfi_cmds.c +++ b/drivers/media/platform/qcom/venus/hfi_cmds.c @@ -251,8 +251,8 @@ int pkt_session_unset_buffers(struct hfi_session_release_buffer_pkt *pkt, pkt->extradata_size = 0; pkt->shdr.hdr.size = - struct_size((struct hfi_session_set_buffers_pkt *)0, - buffer_info, bd->num_buffers); + struct_size_t(struct hfi_session_set_buffers_pkt, + buffer_info, bd->num_buffers); } pkt->response_req = bd->response_required; diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c index 4a750da1c12a..deb6e65b59af 100644 --- a/drivers/memory/tegra/mc.c +++ b/drivers/memory/tegra/mc.c @@ -755,6 +755,43 @@ const char *const tegra_mc_error_names[8] = { [6] = "SMMU translation error", }; +struct icc_node *tegra_mc_icc_xlate(struct of_phandle_args *spec, void *data) +{ + struct tegra_mc *mc = icc_provider_to_tegra_mc(data); + struct icc_node *node; + + list_for_each_entry(node, &mc->provider.nodes, node_list) { + if (node->id == spec->args[0]) + return node; + } + + /* + * If a client driver calls devm_of_icc_get() before the MC driver + * is probed, then return EPROBE_DEFER to the client driver. + */ + return ERR_PTR(-EPROBE_DEFER); +} + +static int tegra_mc_icc_get(struct icc_node *node, u32 *average, u32 *peak) +{ + *average = 0; + *peak = 0; + + return 0; +} + +static int tegra_mc_icc_set(struct icc_node *src, struct icc_node *dst) +{ + return 0; +} + +const struct tegra_mc_icc_ops tegra_mc_icc_ops = { + .xlate = tegra_mc_icc_xlate, + .aggregate = icc_std_aggregate, + .get_bw = tegra_mc_icc_get, + .set = tegra_mc_icc_set, +}; + /* * Memory Controller (MC) has few Memory Clients that are issuing memory * bandwidth allocation requests to the MC interconnect provider. The MC diff --git a/drivers/memory/tegra/tegra194.c b/drivers/memory/tegra/tegra194.c index b2416ee3ac26..26035ac3a1eb 100644 --- a/drivers/memory/tegra/tegra194.c +++ b/drivers/memory/tegra/tegra194.c @@ -1355,6 +1355,7 @@ const struct tegra_mc_soc tegra194_mc_soc = { MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM, .has_addr_hi_reg = true, .ops = &tegra186_mc_ops, + .icc_ops = &tegra_mc_icc_ops, .ch_intmask = 0x00000f00, .global_intstatus_channel_shift = 8, }; diff --git a/drivers/memory/tegra/tegra234.c b/drivers/memory/tegra/tegra234.c index 8e873a7bc34f..8fb83b39f5f5 100644 --- a/drivers/memory/tegra/tegra234.c +++ b/drivers/memory/tegra/tegra234.c @@ -827,7 +827,7 @@ static int tegra234_mc_icc_set(struct icc_node *src, struct icc_node *dst) return 0; if (!mc->bwmgr_mrq_supported) - return -EINVAL; + return 0; if (!mc->bpmp) { dev_err(mc->dev, "BPMP reference NULL\n"); @@ -874,7 +874,7 @@ static int tegra234_mc_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw, struct tegra_mc *mc = icc_provider_to_tegra_mc(p); if (!mc->bwmgr_mrq_supported) - return -EINVAL; + return 0; if (node->id == TEGRA_ICC_MC_CPU_CLUSTER0 || node->id == TEGRA_ICC_MC_CPU_CLUSTER1 || @@ -889,27 +889,6 @@ static int tegra234_mc_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw, return 0; } -static struct icc_node* -tegra234_mc_of_icc_xlate(struct of_phandle_args *spec, void *data) -{ - struct tegra_mc *mc = icc_provider_to_tegra_mc(data); - unsigned int cl_id = spec->args[0]; - struct icc_node *node; - - list_for_each_entry(node, &mc->provider.nodes, node_list) { - if (node->id != cl_id) - continue; - - return node; - } - - /* - * If a client driver calls devm_of_icc_get() before the MC driver - * is probed, then return EPROBE_DEFER to the client driver. - */ - return ERR_PTR(-EPROBE_DEFER); -} - static int tegra234_mc_icc_get_init_bw(struct icc_node *node, u32 *avg, u32 *peak) { *avg = 0; @@ -919,7 +898,7 @@ static int tegra234_mc_icc_get_init_bw(struct icc_node *node, u32 *avg, u32 *pea } static const struct tegra_mc_icc_ops tegra234_mc_icc_ops = { - .xlate = tegra234_mc_of_icc_xlate, + .xlate = tegra_mc_icc_xlate, .aggregate = tegra234_mc_icc_aggregate, .get_bw = tegra234_mc_icc_get_init_bw, .set = tegra234_mc_icc_set, diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c index d676cf63a966..3dae5e3a1697 100644 --- a/drivers/misc/cardreader/rts5227.c +++ b/drivers/misc/cardreader/rts5227.c @@ -195,7 +195,7 @@ static int rts5227_extra_init_hw(struct rtsx_pcr *pcr) } } - if (option->force_clkreq_0) + if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG) rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); else diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c index cfebad51d1d8..f4ab09439da7 100644 --- a/drivers/misc/cardreader/rts5228.c +++ b/drivers/misc/cardreader/rts5228.c @@ -435,17 +435,10 @@ static void rts5228_init_from_cfg(struct rtsx_pcr *pcr) option->ltr_enabled = false; } } - - if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN - | PM_L1_1_EN | PM_L1_2_EN)) - option->force_clkreq_0 = false; - else - option->force_clkreq_0 = true; } static int rts5228_extra_init_hw(struct rtsx_pcr *pcr) { - struct rtsx_cr_option *option = &pcr->option; rtsx_pci_write_register(pcr, RTS5228_AUTOLOAD_CFG1, CD_RESUME_EN_MASK, CD_RESUME_EN_MASK); @@ -476,17 +469,6 @@ static int rts5228_extra_init_hw(struct rtsx_pcr *pcr) else rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00); - /* - * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced - * to drive low, and we forcibly request clock. - */ - if (option->force_clkreq_0) - rtsx_pci_write_register(pcr, PETXCFG, - FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); - else - rtsx_pci_write_register(pcr, PETXCFG, - FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH); - rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB); if (pcr->rtd3_en) { diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c index 91d240dd68fa..47ab72a43256 100644 --- a/drivers/misc/cardreader/rts5249.c +++ b/drivers/misc/cardreader/rts5249.c @@ -327,12 +327,11 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr) } } - /* * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced * to drive low, and we forcibly request clock. */ - if (option->force_clkreq_0) + if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG) rtsx_pci_write_register(pcr, PETXCFG, FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); else diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c index 9b42b20a3e5a..79b18f6f73a8 100644 --- a/drivers/misc/cardreader/rts5260.c +++ b/drivers/misc/cardreader/rts5260.c @@ -517,17 +517,10 @@ static void rts5260_init_from_cfg(struct rtsx_pcr *pcr) option->ltr_enabled = false; } } - - if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN - | PM_L1_1_EN | PM_L1_2_EN)) - option->force_clkreq_0 = false; - else - option->force_clkreq_0 = true; } static int rts5260_extra_init_hw(struct rtsx_pcr *pcr) { - struct rtsx_cr_option *option = &pcr->option; /* Set mcu_cnt to 7 to ensure data can be sampled properly */ rtsx_pci_write_register(pcr, 0xFC03, 0x7F, 0x07); @@ -546,17 +539,6 @@ static int rts5260_extra_init_hw(struct rtsx_pcr *pcr) rts5260_init_hw(pcr); - /* - * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced - * to drive low, and we forcibly request clock. - */ - if (option->force_clkreq_0) - rtsx_pci_write_register(pcr, PETXCFG, - FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); - else - rtsx_pci_write_register(pcr, PETXCFG, - FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH); - rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00); return 0; diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c index b1e76030cafd..94af6bf8a25a 100644 --- a/drivers/misc/cardreader/rts5261.c +++ b/drivers/misc/cardreader/rts5261.c @@ -498,17 +498,10 @@ static void rts5261_init_from_cfg(struct rtsx_pcr *pcr) option->ltr_enabled = false; } } - - if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN - | PM_L1_1_EN | PM_L1_2_EN)) - option->force_clkreq_0 = false; - else - option->force_clkreq_0 = true; } static int rts5261_extra_init_hw(struct rtsx_pcr *pcr) { - struct rtsx_cr_option *option = &pcr->option; u32 val; rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG1, @@ -554,17 +547,6 @@ static int rts5261_extra_init_hw(struct rtsx_pcr *pcr) else rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00); - /* - * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced - * to drive low, and we forcibly request clock. - */ - if (option->force_clkreq_0) - rtsx_pci_write_register(pcr, PETXCFG, - FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); - else - rtsx_pci_write_register(pcr, PETXCFG, - FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH); - rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB); if (pcr->rtd3_en) { diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index 32b7783e9d4f..a3f4b52bb159 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -1326,8 +1326,11 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) return err; } - if (pcr->aspm_mode == ASPM_MODE_REG) + if (pcr->aspm_mode == ASPM_MODE_REG) { rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30); + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH); + } /* No CD interrupt if probing driver with card inserted. * So we need to initialize pcr->card_exist here. diff --git a/drivers/misc/tps6594-esm.c b/drivers/misc/tps6594-esm.c index b488f704f104..05e2c151e632 100644 --- a/drivers/misc/tps6594-esm.c +++ b/drivers/misc/tps6594-esm.c @@ -13,6 +13,8 @@ #include <linux/mfd/tps6594.h> +#define TPS6594_DEV_REV_1 0x08 + static irqreturn_t tps6594_esm_isr(int irq, void *dev_id) { struct platform_device *pdev = dev_id; @@ -32,11 +34,26 @@ static int tps6594_esm_probe(struct platform_device *pdev) { struct tps6594 *tps = dev_get_drvdata(pdev->dev.parent); struct device *dev = &pdev->dev; + unsigned int rev; int irq; int ret; int i; - for (i = 0 ; i < pdev->num_resources ; i++) { + /* + * Due to a bug in revision 1 of the PMIC, the GPIO3 used for the + * SoC ESM function is used to power the load switch instead. + * As a consequence, ESM can not be used on those PMIC. + * Check the version and return an error in case of revision 1. + */ + ret = regmap_read(tps->regmap, TPS6594_REG_DEV_REV, &rev); + if (ret) + return dev_err_probe(dev, ret, + "Failed to read PMIC revision\n"); + if (rev == TPS6594_DEV_REV_1) + return dev_err_probe(dev, -ENODEV, + "ESM not supported for revision 1 PMIC\n"); + + for (i = 0; i < pdev->num_resources; i++) { irq = platform_get_irq_byname(pdev, pdev->resource[i].name); if (irq < 0) return dev_err_probe(dev, irq, "Failed to get %s irq\n", diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c index 2d002c81dcf3..d0d6ffcf78d4 100644 --- a/drivers/mmc/host/moxart-mmc.c +++ b/drivers/mmc/host/moxart-mmc.c @@ -338,13 +338,7 @@ static void moxart_transfer_pio(struct moxart_host *host) return; } for (len = 0; len < remain && len < host->fifo_width;) { - /* SCR data must be read in big endian. */ - if (data->mrq->cmd->opcode == SD_APP_SEND_SCR) - *sgp = ioread32be(host->base + - REG_DATA_WINDOW); - else - *sgp = ioread32(host->base + - REG_DATA_WINDOW); + *sgp = ioread32(host->base + REG_DATA_WINDOW); sgp++; len += 4; } diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c index a202a69a4b08..b01ffb4d0973 100644 --- a/drivers/mmc/host/sdhci_f_sdh30.c +++ b/drivers/mmc/host/sdhci_f_sdh30.c @@ -29,9 +29,16 @@ struct f_sdhost_priv { bool enable_cmd_dat_delay; }; +static void *sdhci_f_sdhost_priv(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + + return sdhci_pltfm_priv(pltfm_host); +} + static void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host) { - struct f_sdhost_priv *priv = sdhci_priv(host); + struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host); u32 ctrl = 0; usleep_range(2500, 3000); @@ -64,7 +71,7 @@ static unsigned int sdhci_f_sdh30_get_min_clock(struct sdhci_host *host) static void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask) { - struct f_sdhost_priv *priv = sdhci_priv(host); + struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host); u32 ctl; if (sdhci_readw(host, SDHCI_CLOCK_CONTROL) == 0) @@ -95,30 +102,32 @@ static const struct sdhci_ops sdhci_f_sdh30_ops = { .set_uhs_signaling = sdhci_set_uhs_signaling, }; +static const struct sdhci_pltfm_data sdhci_f_sdh30_pltfm_data = { + .ops = &sdhci_f_sdh30_ops, + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC + | SDHCI_QUIRK_INVERTED_WRITE_PROTECT, + .quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE + | SDHCI_QUIRK2_TUNING_WORK_AROUND, +}; + static int sdhci_f_sdh30_probe(struct platform_device *pdev) { struct sdhci_host *host; struct device *dev = &pdev->dev; - int irq, ctrl = 0, ret = 0; + int ctrl = 0, ret = 0; struct f_sdhost_priv *priv; + struct sdhci_pltfm_host *pltfm_host; u32 reg = 0; - irq = platform_get_irq(pdev, 0); - if (irq < 0) - return irq; - - host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv)); + host = sdhci_pltfm_init(pdev, &sdhci_f_sdh30_pltfm_data, + sizeof(struct f_sdhost_priv)); if (IS_ERR(host)) return PTR_ERR(host); - priv = sdhci_priv(host); + pltfm_host = sdhci_priv(host); + priv = sdhci_pltfm_priv(pltfm_host); priv->dev = dev; - host->quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | - SDHCI_QUIRK_INVERTED_WRITE_PROTECT; - host->quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE | - SDHCI_QUIRK2_TUNING_WORK_AROUND; - priv->enable_cmd_dat_delay = device_property_read_bool(dev, "fujitsu,cmd-dat-delay-select"); @@ -126,18 +135,6 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev) if (ret) goto err; - platform_set_drvdata(pdev, host); - - host->hw_name = "f_sdh30"; - host->ops = &sdhci_f_sdh30_ops; - host->irq = irq; - - host->ioaddr = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(host->ioaddr)) { - ret = PTR_ERR(host->ioaddr); - goto err; - } - if (dev_of_node(dev)) { sdhci_get_of_property(pdev); @@ -204,24 +201,21 @@ err_rst: err_clk: clk_disable_unprepare(priv->clk_iface); err: - sdhci_free_host(host); + sdhci_pltfm_free(pdev); + return ret; } static int sdhci_f_sdh30_remove(struct platform_device *pdev) { struct sdhci_host *host = platform_get_drvdata(pdev); - struct f_sdhost_priv *priv = sdhci_priv(host); - - sdhci_remove_host(host, readl(host->ioaddr + SDHCI_INT_STATUS) == - 0xffffffff); + struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host); reset_control_assert(priv->rst); clk_disable_unprepare(priv->clk); clk_disable_unprepare(priv->clk_iface); - sdhci_free_host(host); - platform_set_drvdata(pdev, NULL); + sdhci_pltfm_unregister(pdev); return 0; } diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c index 086426139173..7366e85c09fd 100644 --- a/drivers/mtd/nand/raw/fsl_upm.c +++ b/drivers/mtd/nand/raw/fsl_upm.c @@ -135,7 +135,7 @@ static int fun_exec_op(struct nand_chip *chip, const struct nand_operation *op, unsigned int i; int ret; - if (op->cs > NAND_MAX_CHIPS) + if (op->cs >= NAND_MAX_CHIPS) return -EINVAL; if (check_only) diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index d3faf8086631..b10011dec1e6 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -1278,7 +1278,6 @@ static int meson_nand_attach_chip(struct nand_chip *nand) struct meson_nfc *nfc = nand_get_controller_data(nand); struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); struct mtd_info *mtd = nand_to_mtd(nand); - int nsectors = mtd->writesize / 1024; int raw_writesize; int ret; @@ -1304,7 +1303,7 @@ static int meson_nand_attach_chip(struct nand_chip *nand) nand->options |= NAND_NO_SUBPAGE_WRITE; ret = nand_ecc_choose_conf(nand, nfc->data->ecc_caps, - mtd->oobsize - 2 * nsectors); + mtd->oobsize - 2); if (ret) { dev_err(nfc->dev, "failed to ECC init\n"); return -EINVAL; diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c index 6e1eac6644a6..4a97d4a76454 100644 --- a/drivers/mtd/nand/raw/omap_elm.c +++ b/drivers/mtd/nand/raw/omap_elm.c @@ -177,17 +177,17 @@ static void elm_load_syndrome(struct elm_info *info, switch (info->bch_type) { case BCH8_ECC: /* syndrome fragment 0 = ecc[9-12B] */ - val = cpu_to_be32(*(u32 *) &ecc[9]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[9]); elm_write_reg(info, offset, val); /* syndrome fragment 1 = ecc[5-8B] */ offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[5]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[5]); elm_write_reg(info, offset, val); /* syndrome fragment 2 = ecc[1-4B] */ offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[1]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[1]); elm_write_reg(info, offset, val); /* syndrome fragment 3 = ecc[0B] */ @@ -197,35 +197,35 @@ static void elm_load_syndrome(struct elm_info *info, break; case BCH4_ECC: /* syndrome fragment 0 = ecc[20-52b] bits */ - val = (cpu_to_be32(*(u32 *) &ecc[3]) >> 4) | + val = ((__force u32)cpu_to_be32(*(u32 *)&ecc[3]) >> 4) | ((ecc[2] & 0xf) << 28); elm_write_reg(info, offset, val); /* syndrome fragment 1 = ecc[0-20b] bits */ offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12; + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 12; elm_write_reg(info, offset, val); break; case BCH16_ECC: - val = cpu_to_be32(*(u32 *) &ecc[22]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[22]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[18]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[18]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[14]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[14]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[10]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[10]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[6]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[6]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[2]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[2]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[0]) >> 16; + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 16; elm_write_reg(info, offset, val); break; default: diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c index 2312e27362cb..5a04680342c3 100644 --- a/drivers/mtd/nand/raw/rockchip-nand-controller.c +++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c @@ -562,9 +562,10 @@ static int rk_nfc_write_page_raw(struct nand_chip *chip, const u8 *buf, * BBM OOB1 OOB2 OOB3 |......| PA0 PA1 PA2 PA3 * * The rk_nfc_ooblayout_free() function already has reserved - * these 4 bytes with: + * these 4 bytes together with 2 bytes for BBM + * by reducing it's length: * - * oob_region->offset = NFC_SYS_DATA_SIZE + 2; + * oob_region->length = rknand->metadata_size - NFC_SYS_DATA_SIZE - 2; */ if (!i) memcpy(rk_nfc_oob_ptr(chip, i), @@ -597,7 +598,7 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf, int pages_per_blk = mtd->erasesize / mtd->writesize; int ret = 0, i, boot_rom_mode = 0; dma_addr_t dma_data, dma_oob; - u32 reg; + u32 tmp; u8 *oob; nand_prog_page_begin_op(chip, page, 0, NULL, 0); @@ -624,6 +625,13 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf, * * 0xFF 0xFF 0xFF 0xFF | BBM OOB1 OOB2 OOB3 | ... * + * The code here just swaps the first 4 bytes with the last + * 4 bytes without losing any data. + * + * The chip->oob_poi data layout: + * + * BBM OOB1 OOB2 OOB3 |......| PA0 PA1 PA2 PA3 + * * Configure the ECC algorithm supported by the boot ROM. */ if ((page < (pages_per_blk * rknand->boot_blks)) && @@ -634,21 +642,17 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf, } for (i = 0; i < ecc->steps; i++) { - if (!i) { - reg = 0xFFFFFFFF; - } else { + if (!i) + oob = chip->oob_poi + (ecc->steps - 1) * NFC_SYS_DATA_SIZE; + else oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE; - reg = oob[0] | oob[1] << 8 | oob[2] << 16 | - oob[3] << 24; - } - if (!i && boot_rom_mode) - reg = (page & (pages_per_blk - 1)) * 4; + tmp = oob[0] | oob[1] << 8 | oob[2] << 16 | oob[3] << 24; if (nfc->cfg->type == NFC_V9) - nfc->oob_buf[i] = reg; + nfc->oob_buf[i] = tmp; else - nfc->oob_buf[i * (oob_step / 4)] = reg; + nfc->oob_buf[i * (oob_step / 4)] = tmp; } dma_data = dma_map_single(nfc->dev, (void *)nfc->page_buf, @@ -811,12 +815,17 @@ static int rk_nfc_read_page_hwecc(struct nand_chip *chip, u8 *buf, int oob_on, goto timeout_err; } - for (i = 1; i < ecc->steps; i++) { - oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE; + for (i = 0; i < ecc->steps; i++) { + if (!i) + oob = chip->oob_poi + (ecc->steps - 1) * NFC_SYS_DATA_SIZE; + else + oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE; + if (nfc->cfg->type == NFC_V9) tmp = nfc->oob_buf[i]; else tmp = nfc->oob_buf[i * (oob_step / 4)]; + *oob++ = (u8)tmp; *oob++ = (u8)(tmp >> 8); *oob++ = (u8)(tmp >> 16); @@ -933,12 +942,8 @@ static int rk_nfc_ooblayout_free(struct mtd_info *mtd, int section, if (section) return -ERANGE; - /* - * The beginning of the OOB area stores the reserved data for the NFC, - * the size of the reserved data is NFC_SYS_DATA_SIZE bytes. - */ oob_region->length = rknand->metadata_size - NFC_SYS_DATA_SIZE - 2; - oob_region->offset = NFC_SYS_DATA_SIZE + 2; + oob_region->offset = 2; return 0; } diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c index 7380b1ebaccd..a80427c13121 100644 --- a/drivers/mtd/nand/spi/toshiba.c +++ b/drivers/mtd/nand/spi/toshiba.c @@ -73,7 +73,7 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand, { struct nand_device *nand = spinand_to_nand(spinand); u8 mbf = 0; - struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, &mbf); + struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf); switch (status & STATUS_ECC_MASK) { case STATUS_ECC_NO_BITFLIPS: @@ -92,7 +92,7 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand, if (spi_mem_exec_op(spinand->spimem, &op)) return nanddev_get_ecc_conf(nand)->strength; - mbf >>= 4; + mbf = *(spinand->scratchbuf) >> 4; if (WARN_ON(mbf > nanddev_get_ecc_conf(nand)->strength || !mbf)) return nanddev_get_ecc_conf(nand)->strength; diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c index 3ad58cd284d8..f507e3759301 100644 --- a/drivers/mtd/nand/spi/winbond.c +++ b/drivers/mtd/nand/spi/winbond.c @@ -108,7 +108,7 @@ static int w25n02kv_ecc_get_status(struct spinand_device *spinand, { struct nand_device *nand = spinand_to_nand(spinand); u8 mbf = 0; - struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, &mbf); + struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf); switch (status & STATUS_ECC_MASK) { case STATUS_ECC_NO_BITFLIPS: @@ -126,7 +126,7 @@ static int w25n02kv_ecc_get_status(struct spinand_device *spinand, if (spi_mem_exec_op(spinand->spimem, &op)) return nanddev_get_ecc_conf(nand)->strength; - mbf >>= 4; + mbf = *(spinand->scratchbuf) >> 4; if (WARN_ON(mbf > nanddev_get_ecc_conf(nand)->strength || !mbf)) return nanddev_get_ecc_conf(nand)->strength; diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c index 36876aa849ed..15f9a80c10b9 100644 --- a/drivers/mtd/spi-nor/spansion.c +++ b/drivers/mtd/spi-nor/spansion.c @@ -361,7 +361,7 @@ static int cypress_nor_determine_addr_mode_by_sr1(struct spi_nor *nor, */ static int cypress_nor_set_addr_mode_nbytes(struct spi_nor *nor) { - struct spi_mem_op op = {}; + struct spi_mem_op op; u8 addr_mode; int ret; @@ -492,7 +492,7 @@ s25fs256t_post_bfpt_fixup(struct spi_nor *nor, const struct sfdp_parameter_header *bfpt_header, const struct sfdp_bfpt *bfpt) { - struct spi_mem_op op = {}; + struct spi_mem_op op; int ret; ret = cypress_nor_set_addr_mode_nbytes(nor); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 484c9e3e5e82..447b06ea4fc9 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -5901,7 +5901,9 @@ void bond_setup(struct net_device *bond_dev) bond_dev->hw_features = BOND_VLAN_FEATURES | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_FILTER; bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL; bond_dev->features |= bond_dev->hw_features; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index cde253d27bd0..72374b066f64 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -1436,7 +1436,9 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) if (IS_ERR(priv->clk)) return PTR_ERR(priv->clk); - clk_prepare_enable(priv->clk); + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; priv->clk_mdiv = devm_clk_get_optional(&pdev->dev, "sw_switch_mdiv"); if (IS_ERR(priv->clk_mdiv)) { @@ -1444,7 +1446,9 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) goto out_clk; } - clk_prepare_enable(priv->clk_mdiv); + ret = clk_prepare_enable(priv->clk_mdiv); + if (ret) + goto out_clk; ret = bcm_sf2_sw_rst(priv); if (ret) { diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index b18cd170ec06..6c0623f88654 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -635,10 +635,9 @@ static const struct regmap_range ksz9477_valid_regs[] = { regmap_reg_range(0x1030, 0x1030), regmap_reg_range(0x1100, 0x1115), regmap_reg_range(0x111a, 0x111f), - regmap_reg_range(0x1122, 0x1127), - regmap_reg_range(0x112a, 0x112b), - regmap_reg_range(0x1136, 0x1139), - regmap_reg_range(0x113e, 0x113f), + regmap_reg_range(0x1120, 0x112b), + regmap_reg_range(0x1134, 0x113b), + regmap_reg_range(0x113c, 0x113f), regmap_reg_range(0x1400, 0x1401), regmap_reg_range(0x1403, 0x1403), regmap_reg_range(0x1410, 0x1417), @@ -669,10 +668,9 @@ static const struct regmap_range ksz9477_valid_regs[] = { regmap_reg_range(0x2030, 0x2030), regmap_reg_range(0x2100, 0x2115), regmap_reg_range(0x211a, 0x211f), - regmap_reg_range(0x2122, 0x2127), - regmap_reg_range(0x212a, 0x212b), - regmap_reg_range(0x2136, 0x2139), - regmap_reg_range(0x213e, 0x213f), + regmap_reg_range(0x2120, 0x212b), + regmap_reg_range(0x2134, 0x213b), + regmap_reg_range(0x213c, 0x213f), regmap_reg_range(0x2400, 0x2401), regmap_reg_range(0x2403, 0x2403), regmap_reg_range(0x2410, 0x2417), @@ -703,10 +701,9 @@ static const struct regmap_range ksz9477_valid_regs[] = { regmap_reg_range(0x3030, 0x3030), regmap_reg_range(0x3100, 0x3115), regmap_reg_range(0x311a, 0x311f), - regmap_reg_range(0x3122, 0x3127), - regmap_reg_range(0x312a, 0x312b), - regmap_reg_range(0x3136, 0x3139), - regmap_reg_range(0x313e, 0x313f), + regmap_reg_range(0x3120, 0x312b), + regmap_reg_range(0x3134, 0x313b), + regmap_reg_range(0x313c, 0x313f), regmap_reg_range(0x3400, 0x3401), regmap_reg_range(0x3403, 0x3403), regmap_reg_range(0x3410, 0x3417), @@ -737,10 +734,9 @@ static const struct regmap_range ksz9477_valid_regs[] = { regmap_reg_range(0x4030, 0x4030), regmap_reg_range(0x4100, 0x4115), regmap_reg_range(0x411a, 0x411f), - regmap_reg_range(0x4122, 0x4127), - regmap_reg_range(0x412a, 0x412b), - regmap_reg_range(0x4136, 0x4139), - regmap_reg_range(0x413e, 0x413f), + regmap_reg_range(0x4120, 0x412b), + regmap_reg_range(0x4134, 0x413b), + regmap_reg_range(0x413c, 0x413f), regmap_reg_range(0x4400, 0x4401), regmap_reg_range(0x4403, 0x4403), regmap_reg_range(0x4410, 0x4417), @@ -771,10 +767,9 @@ static const struct regmap_range ksz9477_valid_regs[] = { regmap_reg_range(0x5030, 0x5030), regmap_reg_range(0x5100, 0x5115), regmap_reg_range(0x511a, 0x511f), - regmap_reg_range(0x5122, 0x5127), - regmap_reg_range(0x512a, 0x512b), - regmap_reg_range(0x5136, 0x5139), - regmap_reg_range(0x513e, 0x513f), + regmap_reg_range(0x5120, 0x512b), + regmap_reg_range(0x5134, 0x513b), + regmap_reg_range(0x513c, 0x513f), regmap_reg_range(0x5400, 0x5401), regmap_reg_range(0x5403, 0x5403), regmap_reg_range(0x5410, 0x5417), diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 8da46d284e35..bef879c6d500 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -1625,8 +1625,10 @@ static void felix_teardown(struct dsa_switch *ds) struct felix *felix = ocelot_to_felix(ocelot); struct dsa_port *dp; + rtnl_lock(); if (felix->tag_proto_ops) felix->tag_proto_ops->teardown(ds); + rtnl_unlock(); dsa_switch_for_each_available_port(dp, ds) ocelot_deinit_port(ocelot, dp->index); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index e5b54e6025be..1eb490c48c52 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -633,12 +633,13 @@ tx_kick_pending: return NETDEV_TX_OK; } -static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) +static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) { struct bnxt_tx_ring_info *txr = bnapi->tx_ring; struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); u16 cons = txr->tx_cons; struct pci_dev *pdev = bp->pdev; + int nr_pkts = bnapi->tx_pkts; int i; unsigned int tx_bytes = 0; @@ -688,6 +689,7 @@ next_tx_int: dev_kfree_skb_any(skb); } + bnapi->tx_pkts = 0; WRITE_ONCE(txr->tx_cons, cons); __netif_txq_completed_wake(txq, nr_pkts, tx_bytes, @@ -697,17 +699,24 @@ next_tx_int: static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, struct bnxt_rx_ring_info *rxr, + unsigned int *offset, gfp_t gfp) { struct device *dev = &bp->pdev->dev; struct page *page; - page = page_pool_dev_alloc_pages(rxr->page_pool); + if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { + page = page_pool_dev_alloc_frag(rxr->page_pool, offset, + BNXT_RX_PAGE_SIZE); + } else { + page = page_pool_dev_alloc_pages(rxr->page_pool); + *offset = 0; + } if (!page) return NULL; - *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); + *mapping = dma_map_page_attrs(dev, page, *offset, BNXT_RX_PAGE_SIZE, + bp->rx_dir, DMA_ATTR_WEAK_ORDERING); if (dma_mapping_error(dev, *mapping)) { page_pool_recycle_direct(rxr->page_pool, page); return NULL; @@ -747,15 +756,16 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, dma_addr_t mapping; if (BNXT_RX_PAGE_MODE(bp)) { + unsigned int offset; struct page *page = - __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp); + __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp); if (!page) return -ENOMEM; mapping += bp->rx_dma_offset; rx_buf->data = page; - rx_buf->data_ptr = page_address(page) + bp->rx_offset; + rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset; } else { u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp); @@ -815,7 +825,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp, unsigned int offset = 0; if (BNXT_RX_PAGE_MODE(bp)) { - page = __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp); + page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp); if (!page) return -ENOMEM; @@ -962,15 +972,15 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp, return NULL; } dma_addr -= bp->rx_dma_offset; - dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); - skb = build_skb(page_address(page), PAGE_SIZE); + dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, + bp->rx_dir, DMA_ATTR_WEAK_ORDERING); + skb = build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE); if (!skb) { page_pool_recycle_direct(rxr->page_pool, page); return NULL; } skb_mark_for_recycle(skb); - skb_reserve(skb, bp->rx_dma_offset); + skb_reserve(skb, bp->rx_offset); __skb_put(skb, len); return skb; @@ -996,8 +1006,8 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, return NULL; } dma_addr -= bp->rx_dma_offset; - dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); + dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, + bp->rx_dir, DMA_ATTR_WEAK_ORDERING); if (unlikely(!payload)) payload = eth_get_headlen(bp->dev, data_ptr, len); @@ -1010,7 +1020,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, skb_mark_for_recycle(skb); off = (void *)data_ptr - page_address(page); - skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE); + skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE); memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, payload + NET_IP_ALIGN); @@ -1141,7 +1151,7 @@ static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp, skb->data_len += total_frag_len; skb->len += total_frag_len; - skb->truesize += PAGE_SIZE * agg_bufs; + skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs; return skb; } @@ -2569,12 +2579,11 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, return rx_pkts; } -static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi) +static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi, + int budget) { - if (bnapi->tx_pkts) { - bnapi->tx_int(bp, bnapi, bnapi->tx_pkts); - bnapi->tx_pkts = 0; - } + if (bnapi->tx_pkts) + bnapi->tx_int(bp, bnapi, budget); if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) { struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; @@ -2603,7 +2612,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, */ bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); - __bnxt_poll_work_done(bp, bnapi); + __bnxt_poll_work_done(bp, bnapi, budget); return rx_pkts; } @@ -2734,7 +2743,7 @@ static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) } static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, - u64 dbr_type) + u64 dbr_type, int budget) { struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; int i; @@ -2750,7 +2759,7 @@ static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, cpr2->had_work_done = 0; } } - __bnxt_poll_work_done(bp, bnapi); + __bnxt_poll_work_done(bp, bnapi, budget); } static int bnxt_poll_p5(struct napi_struct *napi, int budget) @@ -2780,7 +2789,8 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget) if (cpr->has_more_work) break; - __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL); + __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, + budget); cpr->cp_raw_cons = raw_cons; if (napi_complete_done(napi, work_done)) BNXT_DB_NQ_ARM_P5(&cpr->cp_db, @@ -2810,7 +2820,7 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget) } raw_cons = NEXT_RAW_CMP(raw_cons); } - __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ); + __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget); if (raw_cons != cpr->cp_raw_cons) { cpr->cp_raw_cons = raw_cons; BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons); @@ -2943,8 +2953,8 @@ skip_rx_tpa_free: rx_buf->data = NULL; if (BNXT_RX_PAGE_MODE(bp)) { mapping -= bp->rx_dma_offset; - dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE, - bp->rx_dir, + dma_unmap_page_attrs(&pdev->dev, mapping, + BNXT_RX_PAGE_SIZE, bp->rx_dir, DMA_ATTR_WEAK_ORDERING); page_pool_recycle_direct(rxr->page_pool, data); } else { @@ -3213,6 +3223,8 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp, pp.napi = &rxr->bnapi->napi; pp.dev = &bp->pdev->dev; pp.dma_dir = DMA_BIDIRECTIONAL; + if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) + pp.flags |= PP_FLAG_PAGE_FRAG; rxr->page_pool = page_pool_create(&pp); if (IS_ERR(rxr->page_pool)) { @@ -3989,26 +4001,29 @@ void bnxt_set_ring_params(struct bnxt *bp) */ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) { + struct net_device *dev = bp->dev; + if (page_mode) { bp->flags &= ~BNXT_FLAG_AGG_RINGS; bp->flags |= BNXT_FLAG_RX_PAGE_MODE; - if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { + if (bp->xdp_prog->aux->xdp_has_frags) + dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU); + else + dev->max_mtu = + min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); + if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { bp->flags |= BNXT_FLAG_JUMBO; bp->rx_skb_func = bnxt_rx_multi_page_skb; - bp->dev->max_mtu = - min_t(u16, bp->max_mtu, BNXT_MAX_MTU); } else { bp->flags |= BNXT_FLAG_NO_AGG_RINGS; bp->rx_skb_func = bnxt_rx_page_skb; - bp->dev->max_mtu = - min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); } bp->rx_dir = DMA_BIDIRECTIONAL; /* Disable LRO or GRO_HW */ - netdev_update_features(bp->dev); + netdev_update_features(dev); } else { - bp->dev->max_mtu = bp->max_mtu; + dev->max_mtu = bp->max_mtu; bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; bp->rx_dir = DMA_FROM_DEVICE; bp->rx_skb_func = bnxt_rx_skb; @@ -9429,6 +9444,8 @@ static void bnxt_enable_napi(struct bnxt *bp) cpr->sw_stats.rx.rx_resets++; bnapi->in_reset = false; + bnapi->tx_pkts = 0; + if (bnapi->rx_ring) { INIT_WORK(&cpr->dim.work, bnxt_dim_work); cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 080e73496066..bb95c3dc5270 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1005,7 +1005,7 @@ struct bnxt_napi { struct bnxt_tx_ring_info *tx_ring; void (*tx_int)(struct bnxt *, struct bnxt_napi *, - int); + int budget); int tx_pkts; u8 events; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 4efa5fe6972b..fb43232310b2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -125,16 +125,20 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp, dma_unmap_len_set(tx_buf, len, 0); } -void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) +void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) { struct bnxt_tx_ring_info *txr = bnapi->tx_ring; struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; bool rx_doorbell_needed = false; + int nr_pkts = bnapi->tx_pkts; struct bnxt_sw_tx_bd *tx_buf; u16 tx_cons = txr->tx_cons; u16 last_tx_cons = tx_cons; int i, j, frags; + if (!budget) + return; + for (i = 0; i < nr_pkts; i++) { tx_buf = &txr->tx_buf_ring[tx_cons]; @@ -161,6 +165,8 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) } tx_cons = NEXT_TX(tx_cons); } + + bnapi->tx_pkts = 0; WRITE_ONCE(txr->tx_cons, tx_cons); if (rx_doorbell_needed) { tx_buf = &txr->tx_buf_ring[last_tx_cons]; @@ -180,8 +186,8 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, u8 *data_ptr, unsigned int len, struct xdp_buff *xdp) { + u32 buflen = BNXT_RX_PAGE_SIZE; struct bnxt_sw_rx_bd *rx_buf; - u32 buflen = PAGE_SIZE; struct pci_dev *pdev; dma_addr_t mapping; u32 offset; @@ -297,7 +303,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, rx_buf = &rxr->rx_buf_ring[cons]; mapping = rx_buf->mapping - bp->rx_dma_offset; dma_unmap_page_attrs(&pdev->dev, mapping, - PAGE_SIZE, bp->rx_dir, + BNXT_RX_PAGE_SIZE, bp->rx_dir, DMA_ATTR_WEAK_ORDERING); /* if we are unable to allocate a new buffer, abort and reuse */ @@ -480,7 +486,7 @@ bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags, } xdp_update_skb_shared_info(skb, num_frags, sinfo->xdp_frags_size, - PAGE_SIZE * sinfo->nr_frags, + BNXT_RX_PAGE_SIZE * sinfo->nr_frags, xdp_buff_is_frag_pfmemalloc(xdp)); return skb; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h index ea430d6961df..5e412c5655ba 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h @@ -16,7 +16,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, struct bnxt_tx_ring_info *txr, dma_addr_t mapping, u32 len, struct xdp_buff *xdp); -void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts); +void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget); bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, struct xdp_buff xdp, struct page *page, u8 **data_ptr, unsigned int *len, u8 *event); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 1416262d4296..e0a4cb7e3f50 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -1186,14 +1186,9 @@ static int enetc_init_port_rss_memory(struct enetc_si *si) static int enetc_pf_register_with_ierb(struct pci_dev *pdev) { - struct device_node *node = pdev->dev.of_node; struct platform_device *ierb_pdev; struct device_node *ierb_node; - /* Don't register with the IERB if the PF itself is disabled */ - if (!node || !of_device_is_available(node)) - return 0; - ierb_node = of_find_compatible_node(NULL, NULL, "fsl,ls1028a-enetc-ierb"); if (!ierb_node || !of_device_is_available(ierb_node)) @@ -1208,56 +1203,81 @@ static int enetc_pf_register_with_ierb(struct pci_dev *pdev) return enetc_ierb_register_pf(ierb_pdev, pdev); } -static int enetc_pf_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) +static struct enetc_si *enetc_psi_create(struct pci_dev *pdev) { - struct device_node *node = pdev->dev.of_node; - struct enetc_ndev_priv *priv; - struct net_device *ndev; struct enetc_si *si; - struct enetc_pf *pf; int err; - err = enetc_pf_register_with_ierb(pdev); - if (err == -EPROBE_DEFER) - return err; - if (err) - dev_warn(&pdev->dev, - "Could not register with IERB driver: %pe, please update the device tree\n", - ERR_PTR(err)); - - err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf)); - if (err) - return dev_err_probe(&pdev->dev, err, "PCI probing failed\n"); + err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(struct enetc_pf)); + if (err) { + dev_err_probe(&pdev->dev, err, "PCI probing failed\n"); + goto out; + } si = pci_get_drvdata(pdev); if (!si->hw.port || !si->hw.global) { err = -ENODEV; dev_err(&pdev->dev, "could not map PF space, probing a VF?\n"); - goto err_map_pf_space; + goto out_pci_remove; } err = enetc_setup_cbdr(&pdev->dev, &si->hw, ENETC_CBDR_DEFAULT_SIZE, &si->cbd_ring); if (err) - goto err_setup_cbdr; + goto out_pci_remove; err = enetc_init_port_rfs_memory(si); if (err) { dev_err(&pdev->dev, "Failed to initialize RFS memory\n"); - goto err_init_port_rfs; + goto out_teardown_cbdr; } err = enetc_init_port_rss_memory(si); if (err) { dev_err(&pdev->dev, "Failed to initialize RSS memory\n"); - goto err_init_port_rss; + goto out_teardown_cbdr; } - if (node && !of_device_is_available(node)) { - dev_info(&pdev->dev, "device is disabled, skipping\n"); - err = -ENODEV; - goto err_device_disabled; + return si; + +out_teardown_cbdr: + enetc_teardown_cbdr(&si->cbd_ring); +out_pci_remove: + enetc_pci_remove(pdev); +out: + return ERR_PTR(err); +} + +static void enetc_psi_destroy(struct pci_dev *pdev) +{ + struct enetc_si *si = pci_get_drvdata(pdev); + + enetc_teardown_cbdr(&si->cbd_ring); + enetc_pci_remove(pdev); +} + +static int enetc_pf_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct device_node *node = pdev->dev.of_node; + struct enetc_ndev_priv *priv; + struct net_device *ndev; + struct enetc_si *si; + struct enetc_pf *pf; + int err; + + err = enetc_pf_register_with_ierb(pdev); + if (err == -EPROBE_DEFER) + return err; + if (err) + dev_warn(&pdev->dev, + "Could not register with IERB driver: %pe, please update the device tree\n", + ERR_PTR(err)); + + si = enetc_psi_create(pdev); + if (IS_ERR(si)) { + err = PTR_ERR(si); + goto err_psi_create; } pf = enetc_si_priv(si); @@ -1339,15 +1359,9 @@ err_alloc_si_res: si->ndev = NULL; free_netdev(ndev); err_alloc_netdev: -err_init_port_rss: -err_init_port_rfs: -err_device_disabled: err_setup_mac_addresses: - enetc_teardown_cbdr(&si->cbd_ring); -err_setup_cbdr: -err_map_pf_space: - enetc_pci_remove(pdev); - + enetc_psi_destroy(pdev); +err_psi_create: return err; } @@ -1370,12 +1384,29 @@ static void enetc_pf_remove(struct pci_dev *pdev) enetc_free_msix(priv); enetc_free_si_resources(priv); - enetc_teardown_cbdr(&si->cbd_ring); free_netdev(si->ndev); - enetc_pci_remove(pdev); + enetc_psi_destroy(pdev); +} + +static void enetc_fixup_clear_rss_rfs(struct pci_dev *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct enetc_si *si; + + /* Only apply quirk for disabled functions. For the ones + * that are enabled, enetc_pf_probe() will apply it. + */ + if (node && of_device_is_available(node)) + return; + + si = enetc_psi_create(pdev); + if (si) + enetc_psi_destroy(pdev); } +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PF, + enetc_fixup_clear_rss_rfs); static const struct pci_device_id enetc_pf_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PF) }, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 52546f625c8b..f276b5ecb431 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -464,9 +464,9 @@ static void hns3_dbg_fill_content(char *content, u16 len, if (result) { if (item_len < strlen(result[i])) break; - strscpy(pos, result[i], strlen(result[i])); + memcpy(pos, result[i], strlen(result[i])); } else { - strscpy(pos, items[i].name, strlen(items[i].name)); + memcpy(pos, items[i].name, strlen(items[i].name)); } pos += item_len; len -= item_len; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 9f6890059666..b7b51e56b030 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -5854,6 +5854,9 @@ void hns3_external_lb_prepare(struct net_device *ndev, bool if_running) if (!if_running) return; + if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) + return; + netif_carrier_off(ndev); netif_tx_disable(ndev); @@ -5882,7 +5885,16 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running) if (!if_running) return; - hns3_nic_reset_all_ring(priv->ae_handle); + if (hns3_nic_resetting(ndev)) + return; + + if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) + return; + + if (hns3_nic_reset_all_ring(priv->ae_handle)) + return; + + clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); for (i = 0; i < priv->vector_num; i++) hns3_vector_enable(&priv->tqp_vector[i]); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 409db2e70965..0fb2eaee3e8a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -111,9 +111,9 @@ static void hclge_dbg_fill_content(char *content, u16 len, if (result) { if (item_len < strlen(result[i])) break; - strscpy(pos, result[i], strlen(result[i])); + memcpy(pos, result[i], strlen(result[i])); } else { - strscpy(pos, items[i].name, strlen(items[i].name)); + memcpy(pos, items[i].name, strlen(items[i].name)); } pos += item_len; len -= item_len; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index bf675c15fbb9..a940e35aef29 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -72,6 +72,8 @@ static void hclge_restore_hw_table(struct hclge_dev *hdev); static void hclge_sync_promisc_mode(struct hclge_dev *hdev); static void hclge_sync_fd_table(struct hclge_dev *hdev); static void hclge_update_fec_stats(struct hclge_dev *hdev); +static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret, + int wait_cnt); static struct hnae3_ae_algo ae_algo; @@ -7558,6 +7560,8 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) { +#define HCLGE_LINK_STATUS_WAIT_CNT 3 + struct hclge_desc desc; struct hclge_config_mac_mode_cmd *req = (struct hclge_config_mac_mode_cmd *)desc.data; @@ -7582,9 +7586,15 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) + if (ret) { dev_err(&hdev->pdev->dev, "mac enable fail, ret =%d.\n", ret); + return; + } + + if (!enable) + hclge_mac_link_status_wait(hdev, HCLGE_LINK_STATUS_DOWN, + HCLGE_LINK_STATUS_WAIT_CNT); } static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid, @@ -7647,10 +7657,9 @@ static void hclge_phy_link_status_wait(struct hclge_dev *hdev, } while (++i < HCLGE_PHY_LINK_STATUS_NUM); } -static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret) +static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret, + int wait_cnt) { -#define HCLGE_MAC_LINK_STATUS_NUM 100 - int link_status; int i = 0; int ret; @@ -7663,13 +7672,15 @@ static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret) return 0; msleep(HCLGE_LINK_STATUS_MS); - } while (++i < HCLGE_MAC_LINK_STATUS_NUM); + } while (++i < wait_cnt); return -EBUSY; } static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en, bool is_phy) { +#define HCLGE_MAC_LINK_STATUS_NUM 100 + int link_ret; link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; @@ -7677,7 +7688,8 @@ static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en, if (is_phy) hclge_phy_link_status_wait(hdev, link_ret); - return hclge_mac_link_status_wait(hdev, link_ret); + return hclge_mac_link_status_wait(hdev, link_ret, + HCLGE_MAC_LINK_STATUS_NUM); } static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) @@ -10915,9 +10927,12 @@ int hclge_cfg_flowctrl(struct hclge_dev *hdev) u32 rx_pause, tx_pause; u8 flowctl; - if (!phydev->link || !phydev->autoneg) + if (!phydev->link) return 0; + if (!phydev->autoneg) + return hclge_mac_pause_setup_hw(hdev); + local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising); if (phydev->pause) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index de509e5751a7..c58c31221762 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -1553,7 +1553,7 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) return 0; } -static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev) +int hclge_mac_pause_setup_hw(struct hclge_dev *hdev) { bool tx_en, rx_en; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 45dcfef3f90c..53eec6df5194 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -245,6 +245,7 @@ int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, u8 pfc_bitmap); int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx); int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr); +int hclge_mac_pause_setup_hw(struct hclge_dev *hdev); void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats); void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats); int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 763d613adbcc..df76cdaddcfb 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -97,6 +97,8 @@ static int pending_scrq(struct ibmvnic_adapter *, static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *, struct ibmvnic_sub_crq_queue *); static int ibmvnic_poll(struct napi_struct *napi, int data); +static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter); +static inline void reinit_init_done(struct ibmvnic_adapter *adapter); static void send_query_map(struct ibmvnic_adapter *adapter); static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8); static int send_request_unmap(struct ibmvnic_adapter *, u8); @@ -114,6 +116,7 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, static void free_long_term_buff(struct ibmvnic_adapter *adapter, struct ibmvnic_long_term_buff *ltb); static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter); +static void flush_reset_queue(struct ibmvnic_adapter *adapter); struct ibmvnic_stat { char name[ETH_GSTRING_LEN]; @@ -1505,8 +1508,8 @@ static const char *adapter_state_to_string(enum vnic_state state) static int ibmvnic_login(struct net_device *netdev) { + unsigned long flags, timeout = msecs_to_jiffies(20000); struct ibmvnic_adapter *adapter = netdev_priv(netdev); - unsigned long timeout = msecs_to_jiffies(20000); int retry_count = 0; int retries = 10; bool retry; @@ -1527,11 +1530,9 @@ static int ibmvnic_login(struct net_device *netdev) if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { - netdev_warn(netdev, "Login timed out, retrying...\n"); - retry = true; - adapter->init_done_rc = 0; - retry_count++; - continue; + netdev_warn(netdev, "Login timed out\n"); + adapter->login_pending = false; + goto partial_reset; } if (adapter->init_done_rc == ABORTED) { @@ -1573,10 +1574,69 @@ static int ibmvnic_login(struct net_device *netdev) "SCRQ irq initialization failed\n"); return rc; } + /* Default/timeout error handling, reset and start fresh */ } else if (adapter->init_done_rc) { netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n", adapter->init_done_rc); - return -EIO; + +partial_reset: + /* adapter login failed, so free any CRQs or sub-CRQs + * and register again before attempting to login again. + * If we don't do this then the VIOS may think that + * we are already logged in and reject any subsequent + * attempts + */ + netdev_warn(netdev, + "Freeing and re-registering CRQs before attempting to login again\n"); + retry = true; + adapter->init_done_rc = 0; + release_sub_crqs(adapter, true); + /* Much of this is similar logic as ibmvnic_probe(), + * we are essentially re-initializing communication + * with the server. We really should not run any + * resets/failovers here because this is already a form + * of reset and we do not want parallel resets occurring + */ + do { + reinit_init_done(adapter); + /* Clear any failovers we got in the previous + * pass since we are re-initializing the CRQ + */ + adapter->failover_pending = false; + release_crq_queue(adapter); + /* If we don't sleep here then we risk an + * unnecessary failover event from the VIOS. + * This is a known VIOS issue caused by a vnic + * device freeing and registering a CRQ too + * quickly. + */ + msleep(1500); + /* Avoid any resets, since we are currently + * resetting. + */ + spin_lock_irqsave(&adapter->rwi_lock, flags); + flush_reset_queue(adapter); + spin_unlock_irqrestore(&adapter->rwi_lock, + flags); + + rc = init_crq_queue(adapter); + if (rc) { + netdev_err(netdev, "login recovery: init CRQ failed %d\n", + rc); + return -EIO; + } + + rc = ibmvnic_reset_init(adapter, false); + if (rc) + netdev_err(netdev, "login recovery: Reset init failed %d\n", + rc); + /* IBMVNIC_CRQ_INIT will return EAGAIN if it + * fails, since ibmvnic_reset_init will free + * irq's in failure, we won't be able to receive + * new CRQs so we need to keep trying. probe() + * handles this similarly. + */ + } while (rc == -EAGAIN && retry_count++ < retries); } } while (retry); @@ -1588,12 +1648,22 @@ static int ibmvnic_login(struct net_device *netdev) static void release_login_buffer(struct ibmvnic_adapter *adapter) { + if (!adapter->login_buf) + return; + + dma_unmap_single(&adapter->vdev->dev, adapter->login_buf_token, + adapter->login_buf_sz, DMA_TO_DEVICE); kfree(adapter->login_buf); adapter->login_buf = NULL; } static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter) { + if (!adapter->login_rsp_buf) + return; + + dma_unmap_single(&adapter->vdev->dev, adapter->login_rsp_buf_token, + adapter->login_rsp_buf_sz, DMA_FROM_DEVICE); kfree(adapter->login_rsp_buf); adapter->login_rsp_buf = NULL; } @@ -4830,11 +4900,14 @@ static int send_login(struct ibmvnic_adapter *adapter) if (rc) { adapter->login_pending = false; netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc); - goto buf_rsp_map_failed; + goto buf_send_failed; } return 0; +buf_send_failed: + dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size, + DMA_FROM_DEVICE); buf_rsp_map_failed: kfree(login_rsp_buffer); adapter->login_rsp_buf = NULL; @@ -5396,6 +5469,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, int num_tx_pools; int num_rx_pools; u64 *size_array; + u32 rsp_len; int i; /* CHECK: Test/set of login_pending does not need to be atomic @@ -5407,11 +5481,6 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, } adapter->login_pending = false; - dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, - DMA_TO_DEVICE); - dma_unmap_single(dev, adapter->login_rsp_buf_token, - adapter->login_rsp_buf_sz, DMA_FROM_DEVICE); - /* If the number of queues requested can't be allocated by the * server, the login response will return with code 1. We will need * to resend the login buffer with fewer queues requested. @@ -5447,6 +5516,23 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, ibmvnic_reset(adapter, VNIC_RESET_FATAL); return -EIO; } + + rsp_len = be32_to_cpu(login_rsp->len); + if (be32_to_cpu(login->login_rsp_len) < rsp_len || + rsp_len <= be32_to_cpu(login_rsp->off_txsubm_subcrqs) || + rsp_len <= be32_to_cpu(login_rsp->off_rxadd_subcrqs) || + rsp_len <= be32_to_cpu(login_rsp->off_rxadd_buff_size) || + rsp_len <= be32_to_cpu(login_rsp->off_supp_tx_desc)) { + /* This can happen if a login request times out and there are + * 2 outstanding login requests sent, the LOGIN_RSP crq + * could have been for the older login request. So we are + * parsing the newer response buffer which may be incomplete + */ + dev_err(dev, "FATAL: Login rsp offsets/lengths invalid\n"); + ibmvnic_reset(adapter, VNIC_RESET_FATAL); + return -EIO; + } + size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); /* variable buffer sizes are not supported, so just read the diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 2f47cfa7f06e..460ca561819a 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -1401,14 +1401,15 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx if (fsp->flow_type & FLOW_MAC_EXT) return -EINVAL; + spin_lock_bh(&adapter->fdir_fltr_lock); if (adapter->fdir_active_fltr >= IAVF_MAX_FDIR_FILTERS) { + spin_unlock_bh(&adapter->fdir_fltr_lock); dev_err(&adapter->pdev->dev, "Unable to add Flow Director filter because VF reached the limit of max allowed filters (%u)\n", IAVF_MAX_FDIR_FILTERS); return -ENOSPC; } - spin_lock_bh(&adapter->fdir_fltr_lock); if (iavf_find_fdir_fltr_by_loc(adapter, fsp->location)) { dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n"); spin_unlock_bh(&adapter->fdir_fltr_lock); @@ -1781,7 +1782,9 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, case ETHTOOL_GRXCLSRLCNT: if (!FDIR_FLTR_SUPPORT(adapter)) break; + spin_lock_bh(&adapter->fdir_fltr_lock); cmd->rule_cnt = adapter->fdir_active_fltr; + spin_unlock_bh(&adapter->fdir_fltr_lock); cmd->data = IAVF_MAX_FDIR_FILTERS; ret = 0; break; diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c index 6146203efd84..505e82ebafe4 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_fdir.c +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c @@ -722,7 +722,9 @@ void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *f bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) { struct iavf_fdir_fltr *tmp; + bool ret = false; + spin_lock_bh(&adapter->fdir_fltr_lock); list_for_each_entry(tmp, &adapter->fdir_list_head, list) { if (tmp->flow_type != fltr->flow_type) continue; @@ -732,11 +734,14 @@ bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr * !memcmp(&tmp->ip_data, &fltr->ip_data, sizeof(fltr->ip_data)) && !memcmp(&tmp->ext_data, &fltr->ext_data, - sizeof(fltr->ext_data))) - return true; + sizeof(fltr->ext_data))) { + ret = true; + break; + } } + spin_unlock_bh(&adapter->fdir_fltr_lock); - return false; + return ret; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index f02d44455772..cf92c39467c8 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -8813,6 +8813,7 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type, { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_pf *pf = np->vsi->back; + bool locked = false; int err; switch (type) { @@ -8822,10 +8823,27 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type, ice_setup_tc_block_cb, np, np, true); case TC_SETUP_QDISC_MQPRIO: + if (pf->adev) { + mutex_lock(&pf->adev_mutex); + device_lock(&pf->adev->dev); + locked = true; + if (pf->adev->dev.driver) { + netdev_err(netdev, "Cannot change qdisc when RDMA is active\n"); + err = -EBUSY; + goto adev_unlock; + } + } + /* setup traffic classifier for receive side */ mutex_lock(&pf->tc_mutex); err = ice_setup_tc_mqprio_qdisc(netdev, type_data); mutex_unlock(&pf->tc_mutex); + +adev_unlock: + if (locked) { + device_unlock(&pf->adev->dev); + mutex_unlock(&pf->adev_mutex); + } return err; default: return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 9db384f66a8e..38901d2a4680 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -195,6 +195,10 @@ struct igc_adapter { u32 qbv_config_change_errors; bool qbv_transition; unsigned int qbv_count; + /* Access to oper_gate_closed, admin_gate_closed and qbv_transition + * are protected by the qbv_tx_lock. + */ + spinlock_t qbv_tx_lock; /* OS defined structs */ struct pci_dev *pdev; diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index bdeb36790d77..6f557e843e49 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -4801,6 +4801,7 @@ static int igc_sw_init(struct igc_adapter *adapter) adapter->nfc_rule_count = 0; spin_lock_init(&adapter->stats64_lock); + spin_lock_init(&adapter->qbv_tx_lock); /* Assume MSI-X interrupts, will be checked during IRQ allocation */ adapter->flags |= IGC_FLAG_HAS_MSIX; @@ -6119,15 +6120,15 @@ static int igc_tsn_enable_launchtime(struct igc_adapter *adapter, return igc_tsn_offload_apply(adapter); } -static int igc_tsn_clear_schedule(struct igc_adapter *adapter) +static int igc_qbv_clear_schedule(struct igc_adapter *adapter) { + unsigned long flags; int i; adapter->base_time = 0; adapter->cycle_time = NSEC_PER_SEC; adapter->taprio_offload_enable = false; adapter->qbv_config_change_errors = 0; - adapter->qbv_transition = false; adapter->qbv_count = 0; for (i = 0; i < adapter->num_tx_queues; i++) { @@ -6136,10 +6137,28 @@ static int igc_tsn_clear_schedule(struct igc_adapter *adapter) ring->start_time = 0; ring->end_time = NSEC_PER_SEC; ring->max_sdu = 0; + } + + spin_lock_irqsave(&adapter->qbv_tx_lock, flags); + + adapter->qbv_transition = false; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + ring->oper_gate_closed = false; ring->admin_gate_closed = false; } + spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); + + return 0; +} + +static int igc_tsn_clear_schedule(struct igc_adapter *adapter) +{ + igc_qbv_clear_schedule(adapter); + return 0; } @@ -6150,6 +6169,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, struct igc_hw *hw = &adapter->hw; u32 start_time = 0, end_time = 0; struct timespec64 now; + unsigned long flags; size_t n; int i; @@ -6217,6 +6237,8 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, start_time += e->interval; } + spin_lock_irqsave(&adapter->qbv_tx_lock, flags); + /* Check whether a queue gets configured. * If not, set the start and end time to be end time. */ @@ -6241,6 +6263,8 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, } } + spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); + for (i = 0; i < adapter->num_tx_queues; i++) { struct igc_ring *ring = adapter->tx_ring[i]; struct net_device *dev = adapter->netdev; @@ -6619,8 +6643,11 @@ static enum hrtimer_restart igc_qbv_scheduling_timer(struct hrtimer *timer) { struct igc_adapter *adapter = container_of(timer, struct igc_adapter, hrtimer); + unsigned long flags; unsigned int i; + spin_lock_irqsave(&adapter->qbv_tx_lock, flags); + adapter->qbv_transition = true; for (i = 0; i < adapter->num_tx_queues; i++) { struct igc_ring *tx_ring = adapter->tx_ring[i]; @@ -6633,6 +6660,9 @@ static enum hrtimer_restart igc_qbv_scheduling_timer(struct hrtimer *timer) } } adapter->qbv_transition = false; + + spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); + return HRTIMER_NORESTART; } diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index 2b9335cb4bb3..8537578e1cf1 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -1302,11 +1302,10 @@ static int korina_probe(struct platform_device *pdev) else if (of_get_ethdev_address(pdev->dev.of_node, dev) < 0) eth_hw_addr_random(dev); - clk = devm_clk_get_optional(&pdev->dev, "mdioclk"); + clk = devm_clk_get_optional_enabled(&pdev->dev, "mdioclk"); if (IS_ERR(clk)) return PTR_ERR(clk); if (clk) { - clk_prepare_enable(clk); lp->mii_clock_freq = clk_get_rate(clk); } else { lp->mii_clock_freq = 200000000; /* max possible input clk */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c index 035ead7935c7..dab61cc1acb5 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c @@ -98,6 +98,9 @@ int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox) writeq(OCTEP_CTRL_MBOX_STATUS_INIT, OCTEP_CTRL_MBOX_INFO_HOST_STATUS(mbox->barmem)); + mutex_init(&mbox->h2fq_lock); + mutex_init(&mbox->f2hq_lock); + mbox->h2fq.sz = readl(OCTEP_CTRL_MBOX_H2FQ_SZ(mbox->barmem)); mbox->h2fq.hw_prod = OCTEP_CTRL_MBOX_H2FQ_PROD(mbox->barmem); mbox->h2fq.hw_cons = OCTEP_CTRL_MBOX_H2FQ_CONS(mbox->barmem); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c index f328d957b2db..35857dc19542 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c @@ -727,7 +727,8 @@ pick_fw_ver: err = request_firmware_direct(&fw->bin, fw_path, fw->dev.dev); if (err) { - if (ver_maj == PRESTERA_SUPP_FW_MAJ_VER) { + if (ver_maj != PRESTERA_PREV_FW_MAJ_VER || + ver_min != PRESTERA_PREV_FW_MIN_VER) { ver_maj = PRESTERA_PREV_FW_MAJ_VER; ver_min = PRESTERA_PREV_FW_MIN_VER; diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c index a9a1028cb17b..de317179a7dc 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_router.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c @@ -166,11 +166,11 @@ prestera_util_neigh2nc_key(struct prestera_switch *sw, struct neighbour *n, static bool __prestera_fi_is_direct(struct fib_info *fi) { - struct fib_nh *fib_nh; + struct fib_nh_common *fib_nhc; if (fib_info_num_path(fi) == 1) { - fib_nh = fib_info_nh(fi, 0); - if (fib_nh->fib_nh_gw_family == AF_UNSPEC) + fib_nhc = fib_info_nhc(fi, 0); + if (fib_nhc->nhc_gw_family == AF_UNSPEC) return true; } @@ -261,7 +261,7 @@ static bool __prestera_util_kern_n_is_reachable_v4(u32 tb_id, __be32 *addr, struct net_device *dev) { - struct fib_nh *fib_nh; + struct fib_nh_common *fib_nhc; struct fib_result res; bool reachable; @@ -269,8 +269,8 @@ __prestera_util_kern_n_is_reachable_v4(u32 tb_id, __be32 *addr, if (!prestera_util_kern_get_route(&res, tb_id, addr)) if (prestera_fi_is_direct(res.fi)) { - fib_nh = fib_info_nh(res.fi, 0); - if (dev == fib_nh->fib_nh_dev) + fib_nhc = fib_info_nhc(res.fi, 0); + if (dev == fib_nhc->nhc_dev) reachable = true; } @@ -324,7 +324,7 @@ prestera_kern_fib_info_nhc(struct fib_notifier_info *info, int n) if (info->family == AF_INET) { fen4_info = container_of(info, struct fib_entry_notifier_info, info); - return &fib_info_nh(fen4_info->fi, n)->nh_common; + return fib_info_nhc(fen4_info->fi, n); } else if (info->family == AF_INET6) { fen6_info = container_of(info, struct fib6_entry_notifier_info, info); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c index b0128336ff01..e869c65d8e90 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c @@ -2,6 +2,7 @@ /* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. */ #include "reporter_vnic.h" +#include "en_stats.h" #include "devlink.h" #define VNIC_ENV_GET64(vnic_env_stats, c) \ @@ -36,55 +37,72 @@ int mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev, if (err) return err; - err = devlink_fmsg_u64_pair_put(fmsg, "total_error_queues", - VNIC_ENV_GET64(&vnic, total_error_queues)); - if (err) - return err; - - err = devlink_fmsg_u64_pair_put(fmsg, "send_queue_priority_update_flow", - VNIC_ENV_GET64(&vnic, send_queue_priority_update_flow)); - if (err) - return err; - - err = devlink_fmsg_u64_pair_put(fmsg, "comp_eq_overrun", - VNIC_ENV_GET64(&vnic, comp_eq_overrun)); - if (err) - return err; - - err = devlink_fmsg_u64_pair_put(fmsg, "async_eq_overrun", - VNIC_ENV_GET64(&vnic, async_eq_overrun)); - if (err) - return err; - - err = devlink_fmsg_u64_pair_put(fmsg, "cq_overrun", - VNIC_ENV_GET64(&vnic, cq_overrun)); - if (err) - return err; - - err = devlink_fmsg_u64_pair_put(fmsg, "invalid_command", - VNIC_ENV_GET64(&vnic, invalid_command)); - if (err) - return err; - - err = devlink_fmsg_u64_pair_put(fmsg, "quota_exceeded_command", - VNIC_ENV_GET64(&vnic, quota_exceeded_command)); - if (err) - return err; - - err = devlink_fmsg_u64_pair_put(fmsg, "nic_receive_steering_discard", - VNIC_ENV_GET64(&vnic, nic_receive_steering_discard)); - if (err) - return err; - - err = devlink_fmsg_u64_pair_put(fmsg, "generated_pkt_steering_fail", - VNIC_ENV_GET64(&vnic, generated_pkt_steering_fail)); - if (err) - return err; - - err = devlink_fmsg_u64_pair_put(fmsg, "handled_pkt_steering_fail", - VNIC_ENV_GET64(&vnic, handled_pkt_steering_fail)); - if (err) - return err; + if (MLX5_CAP_GEN(dev, vnic_env_queue_counters)) { + err = devlink_fmsg_u32_pair_put(fmsg, "total_error_queues", + VNIC_ENV_GET(&vnic, total_error_queues)); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "send_queue_priority_update_flow", + VNIC_ENV_GET(&vnic, + send_queue_priority_update_flow)); + if (err) + return err; + } + + if (MLX5_CAP_GEN(dev, eq_overrun_count)) { + err = devlink_fmsg_u32_pair_put(fmsg, "comp_eq_overrun", + VNIC_ENV_GET(&vnic, comp_eq_overrun)); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "async_eq_overrun", + VNIC_ENV_GET(&vnic, async_eq_overrun)); + if (err) + return err; + } + + if (MLX5_CAP_GEN(dev, vnic_env_cq_overrun)) { + err = devlink_fmsg_u32_pair_put(fmsg, "cq_overrun", + VNIC_ENV_GET(&vnic, cq_overrun)); + if (err) + return err; + } + + if (MLX5_CAP_GEN(dev, invalid_command_count)) { + err = devlink_fmsg_u32_pair_put(fmsg, "invalid_command", + VNIC_ENV_GET(&vnic, invalid_command)); + if (err) + return err; + } + + if (MLX5_CAP_GEN(dev, quota_exceeded_count)) { + err = devlink_fmsg_u32_pair_put(fmsg, "quota_exceeded_command", + VNIC_ENV_GET(&vnic, quota_exceeded_command)); + if (err) + return err; + } + + if (MLX5_CAP_GEN(dev, nic_receive_steering_discard)) { + err = devlink_fmsg_u64_pair_put(fmsg, "nic_receive_steering_discard", + VNIC_ENV_GET64(&vnic, + nic_receive_steering_discard)); + if (err) + return err; + } + + if (MLX5_CAP_GEN(dev, vnic_env_cnt_steering_fail)) { + err = devlink_fmsg_u64_pair_put(fmsg, "generated_pkt_steering_fail", + VNIC_ENV_GET64(&vnic, + generated_pkt_steering_fail)); + if (err) + return err; + + err = devlink_fmsg_u64_pair_put(fmsg, "handled_pkt_steering_fail", + VNIC_ENV_GET64(&vnic, handled_pkt_steering_fail)); + if (err) + return err; + } err = devlink_fmsg_obj_nest_end(fmsg); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index f0c3464f037f..1730f6a716ee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -1030,9 +1030,6 @@ int mlx5e_tc_tun_encap_dests_set(struct mlx5e_priv *priv, int out_index; int err = 0; - if (!mlx5e_is_eswitch_flow(flow)) - return 0; - parse_attr = attr->parse_attr; esw_attr = attr->esw_attr; *vf_tun = false; @@ -1464,10 +1461,12 @@ static void mlx5e_invalidate_encap(struct mlx5e_priv *priv, attr = mlx5e_tc_get_encap_attr(flow); esw_attr = attr->esw_attr; - if (flow_flag_test(flow, SLOW)) + if (flow_flag_test(flow, SLOW)) { mlx5e_tc_unoffload_from_slow_path(esw, flow); - else + } else { mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr); + mlx5e_tc_unoffload_flow_post_acts(flow); + } mlx5e_tc_detach_mod_hdr(priv, flow, attr); attr->modify_hdr = NULL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c index d97e6df66f45..b8dd74453655 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c @@ -323,8 +323,11 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, net_prefetch(mxbuf->xdp.data); prog = rcu_dereference(rq->xdp_prog); - if (likely(prog && mlx5e_xdp_handle(rq, prog, mxbuf))) + if (likely(prog && mlx5e_xdp_handle(rq, prog, mxbuf))) { + if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))) + wi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE); return NULL; /* page/packet was consumed by XDP */ + } /* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse * will be handled by mlx5e_free_rx_wqe. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index dbe87bf89c0d..832d36be4a17 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -808,9 +808,9 @@ static void setup_fte_upper_proto_match(struct mlx5_flow_spec *spec, struct upsp } if (upspec->sport) { - MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, udp_dport, + MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, udp_sport, upspec->sport_mask); - MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, udp_dport, upspec->sport); + MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, udp_sport, upspec->sport); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index eab5bc718771..8d995e304869 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -58,7 +58,9 @@ static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x) trailer_len = alen + plen + 2; - pskb_trim(skb, skb->len - trailer_len); + ret = pskb_trim(skb, skb->len - trailer_len); + if (unlikely(ret)) + return ret; if (skb->protocol == htons(ETH_P_IP)) { ipv4hdr->tot_len = htons(ntohs(ipv4hdr->tot_len) - trailer_len); ip_send_check(ipv4hdr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c index cf704f106b7c..984fa04bd331 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c @@ -188,7 +188,6 @@ static void mlx5e_tls_debugfs_init(struct mlx5e_tls *tls, int mlx5e_ktls_init(struct mlx5e_priv *priv) { - struct mlx5_crypto_dek_pool *dek_pool; struct mlx5e_tls *tls; if (!mlx5e_is_ktls_device(priv->mdev)) @@ -199,12 +198,6 @@ int mlx5e_ktls_init(struct mlx5e_priv *priv) return -ENOMEM; tls->mdev = priv->mdev; - dek_pool = mlx5_crypto_dek_pool_create(priv->mdev, MLX5_ACCEL_OBJ_TLS_KEY); - if (IS_ERR(dek_pool)) { - kfree(tls); - return PTR_ERR(dek_pool); - } - tls->dek_pool = dek_pool; priv->tls = tls; mlx5e_tls_debugfs_init(tls, priv->dfs_root); @@ -222,7 +215,6 @@ void mlx5e_ktls_cleanup(struct mlx5e_priv *priv) debugfs_remove_recursive(tls->debugfs.dfs); tls->debugfs.dfs = NULL; - mlx5_crypto_dek_pool_destroy(tls->dek_pool); kfree(priv->tls); priv->tls = NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index efb2cf74ad6a..d61be26a4df1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -908,28 +908,51 @@ static void mlx5e_tls_tx_debugfs_init(struct mlx5e_tls *tls, int mlx5e_ktls_init_tx(struct mlx5e_priv *priv) { + struct mlx5_crypto_dek_pool *dek_pool; struct mlx5e_tls *tls = priv->tls; + int err; + + if (!mlx5e_is_ktls_device(priv->mdev)) + return 0; + + /* DEK pool could be used by either or both of TX and RX. But we have to + * put the creation here to avoid syndrome when doing devlink reload. + */ + dek_pool = mlx5_crypto_dek_pool_create(priv->mdev, MLX5_ACCEL_OBJ_TLS_KEY); + if (IS_ERR(dek_pool)) + return PTR_ERR(dek_pool); + tls->dek_pool = dek_pool; if (!mlx5e_is_ktls_tx(priv->mdev)) return 0; priv->tls->tx_pool = mlx5e_tls_tx_pool_init(priv->mdev, &priv->tls->sw_stats); - if (!priv->tls->tx_pool) - return -ENOMEM; + if (!priv->tls->tx_pool) { + err = -ENOMEM; + goto err_tx_pool_init; + } mlx5e_tls_tx_debugfs_init(tls, tls->debugfs.dfs); return 0; + +err_tx_pool_init: + mlx5_crypto_dek_pool_destroy(dek_pool); + return err; } void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv) { if (!mlx5e_is_ktls_tx(priv->mdev)) - return; + goto dek_pool_destroy; debugfs_remove_recursive(priv->tls->debugfs.dfs_tx); priv->tls->debugfs.dfs_tx = NULL; mlx5e_tls_tx_pool_cleanup(priv->tls->tx_pool); priv->tls->tx_pool = NULL; + +dek_pool_destroy: + if (mlx5e_is_ktls_device(priv->mdev)) + mlx5_crypto_dek_pool_destroy(priv->tls->dek_pool); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c index 7fc901a6ec5f..414e28584881 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c @@ -161,6 +161,7 @@ static int macsec_fs_tx_create_crypto_table_groups(struct mlx5e_flow_table *ft) if (!in) { kfree(ft->g); + ft->g = NULL; return -ENOMEM; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index 933a7772a7a3..5aa51d74f8b4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -135,6 +135,16 @@ static void arfs_del_rules(struct mlx5e_flow_steering *fs); int mlx5e_arfs_disable(struct mlx5e_flow_steering *fs) { + /* Moving to switchdev mode, fs->arfs is freed by mlx5e_nic_profile + * cleanup_rx callback and it is not recreated when + * mlx5e_uplink_rep_profile is loaded as mlx5e_create_flow_steering() + * is not called by the uplink_rep profile init_rx callback. Thus, if + * ntuple is set, moving to switchdev flow will enter this function + * with fs->arfs nullified. + */ + if (!mlx5e_fs_get_arfs(fs)) + return 0; + arfs_del_rules(fs); return arfs_disable(fs); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index defb1efccb78..c27df14df145 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1036,7 +1036,23 @@ static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_s return err; } -static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state) +static void mlx5e_flush_rq_cq(struct mlx5e_rq *rq) +{ + struct mlx5_cqwq *cqwq = &rq->cq.wq; + struct mlx5_cqe64 *cqe; + + if (test_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state)) { + while ((cqe = mlx5_cqwq_get_cqe_enahnced_comp(cqwq))) + mlx5_cqwq_pop(cqwq); + } else { + while ((cqe = mlx5_cqwq_get_cqe(cqwq))) + mlx5_cqwq_pop(cqwq); + } + + mlx5_cqwq_update_db_record(cqwq); +} + +int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state) { struct net_device *dev = rq->netdev; int err; @@ -1046,6 +1062,10 @@ static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state) netdev_err(dev, "Failed to move rq 0x%x to reset\n", rq->rqn); return err; } + + mlx5e_free_rx_descs(rq); + mlx5e_flush_rq_cq(rq); + err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); if (err) { netdev_err(dev, "Failed to move rq 0x%x to ready\n", rq->rqn); @@ -1055,13 +1075,6 @@ static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state) return 0; } -int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state) -{ - mlx5e_free_rx_descs(rq); - - return mlx5e_rq_to_ready(rq, curr_state); -} - static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd) { struct mlx5_core_dev *mdev = rq->mdev; @@ -5253,6 +5266,7 @@ void mlx5e_destroy_q_counters(struct mlx5e_priv *priv) static int mlx5e_nic_init(struct mlx5_core_dev *mdev, struct net_device *netdev) { + const bool take_rtnl = netdev->reg_state == NETREG_REGISTERED; struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_flow_steering *fs; int err; @@ -5281,9 +5295,19 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); mlx5e_health_create_reporters(priv); + + /* If netdev is already registered (e.g. move from uplink to nic profile), + * RTNL lock must be held before triggering netdev notifiers. + */ + if (take_rtnl) + rtnl_lock(); + /* update XDP supported features */ mlx5e_set_xdp_feature(netdev); + if (take_rtnl) + rtnl_unlock(); + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 152b62138450..99b3843396f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1012,7 +1012,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) err = mlx5e_open_drop_rq(priv, &priv->drop_rq); if (err) { mlx5_core_err(mdev, "open drop rq failed, %d\n", err); - return err; + goto err_rx_res_free; } err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0, @@ -1046,6 +1046,7 @@ err_destroy_rx_res: mlx5e_rx_res_destroy(priv->rx_res); err_close_drop_rq: mlx5e_close_drop_rq(&priv->drop_rq); +err_rx_res_free: mlx5e_rx_res_free(priv->rx_res); priv->rx_res = NULL; err_free_fs: @@ -1159,6 +1160,10 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) return err; } + err = mlx5e_rep_neigh_init(rpriv); + if (err) + goto err_neigh_init; + if (rpriv->rep->vport == MLX5_VPORT_UPLINK) { err = mlx5e_init_uplink_rep_tx(rpriv); if (err) @@ -1175,6 +1180,8 @@ err_ht_init: if (rpriv->rep->vport == MLX5_VPORT_UPLINK) mlx5e_cleanup_uplink_rep_tx(rpriv); err_init_tx: + mlx5e_rep_neigh_cleanup(rpriv); +err_neigh_init: mlx5e_destroy_tises(priv); return err; } @@ -1188,22 +1195,17 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv) if (rpriv->rep->vport == MLX5_VPORT_UPLINK) mlx5e_cleanup_uplink_rep_tx(rpriv); + mlx5e_rep_neigh_cleanup(rpriv); mlx5e_destroy_tises(priv); } static void mlx5e_rep_enable(struct mlx5e_priv *priv) { - struct mlx5e_rep_priv *rpriv = priv->ppriv; - mlx5e_set_netdev_mtu_boundaries(priv); - mlx5e_rep_neigh_init(rpriv); } static void mlx5e_rep_disable(struct mlx5e_priv *priv) { - struct mlx5e_rep_priv *rpriv = priv->ppriv; - - mlx5e_rep_neigh_cleanup(rpriv); } static int mlx5e_update_rep_rx(struct mlx5e_priv *priv) @@ -1253,7 +1255,6 @@ static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv) { - struct mlx5e_rep_priv *rpriv = priv->ppriv; struct net_device *netdev = priv->netdev; struct mlx5_core_dev *mdev = priv->mdev; u16 max_mtu; @@ -1275,7 +1276,6 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv) mlx5_notifier_register(mdev, &priv->events_nb); mlx5e_dcbnl_initialize(priv); mlx5e_dcbnl_init_app(priv); - mlx5e_rep_neigh_init(rpriv); mlx5e_rep_bridge_init(priv); netdev->wanted_features |= NETIF_F_HW_TC; @@ -1290,7 +1290,6 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv) static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv) { - struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_core_dev *mdev = priv->mdev; rtnl_lock(); @@ -1300,7 +1299,6 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv) rtnl_unlock(); mlx5e_rep_bridge_cleanup(priv); - mlx5e_rep_neigh_cleanup(rpriv); mlx5e_dcbnl_delete_app(priv); mlx5_notifier_unregister(mdev, &priv->events_nb); mlx5e_rep_tc_disable(priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 8d0a3f69693e..31708d5aa608 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1725,6 +1725,19 @@ verify_attr_actions(u32 actions, struct netlink_ext_ack *extack) return 0; } +static bool +has_encap_dests(struct mlx5_flow_attr *attr) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + int out_index; + + for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) + if (esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) + return true; + + return false; +} + static int post_process_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr, @@ -1737,9 +1750,11 @@ post_process_attr(struct mlx5e_tc_flow *flow, if (err) goto err_out; - err = mlx5e_tc_tun_encap_dests_set(flow->priv, flow, attr, extack, &vf_tun); - if (err) - goto err_out; + if (mlx5e_is_eswitch_flow(flow) && has_encap_dests(attr)) { + err = mlx5e_tc_tun_encap_dests_set(flow->priv, flow, attr, extack, &vf_tun); + if (err) + goto err_out; + } if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr); @@ -1928,9 +1943,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_flow_attr *attr = flow->attr; - struct mlx5_esw_flow_attr *esw_attr; - esw_attr = attr->esw_attr; mlx5e_put_flow_tunnel_id(flow); remove_unready_flow(flow); @@ -1951,12 +1964,6 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr); - if (esw_attr->int_port) - mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->int_port); - - if (esw_attr->dest_int_port) - mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->dest_int_port); - if (flow_flag_test(flow, L3_TO_L2_DECAP)) mlx5e_detach_decap(priv, flow); @@ -4253,6 +4260,7 @@ static void mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr) { struct mlx5_core_dev *counter_dev = get_flow_counter_dev(flow); + struct mlx5_esw_flow_attr *esw_attr; if (!attr) return; @@ -4270,6 +4278,18 @@ mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *a mlx5e_tc_detach_mod_hdr(flow->priv, flow, attr); } + if (mlx5e_is_eswitch_flow(flow)) { + esw_attr = attr->esw_attr; + + if (esw_attr->int_port) + mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(flow->priv), + esw_attr->int_port); + + if (esw_attr->dest_int_port) + mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(flow->priv), + esw_attr->dest_int_port); + } + mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr); free_branch_attr(flow, attr->branch_true); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_debugfs.c index b6a45eff28f5..dbd7cbe6cbf3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_debugfs.c @@ -64,7 +64,7 @@ void mlx5_esw_bridge_debugfs_init(struct net_device *br_netdev, struct mlx5_esw_ bridge->debugfs_dir = debugfs_create_dir(br_netdev->name, bridge->br_offloads->debugfs_root); - debugfs_create_file("fdb", 0444, bridge->debugfs_dir, bridge, + debugfs_create_file("fdb", 0400, bridge->debugfs_dir, bridge, &mlx5_esw_bridge_debugfs_fops); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c index af779c700278..fdf2be548e85 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c @@ -60,7 +60,7 @@ static struct devlink_port *mlx5_esw_dl_port_alloc(struct mlx5_eswitch *esw, u16 } else if (mlx5_core_is_ec_vf_vport(esw->dev, vport_num)) { memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len); dl_port->attrs.switch_id.id_len = ppid.id_len; - devlink_port_attrs_pci_vf_set(dl_port, controller_num, pfnum, + devlink_port_attrs_pci_vf_set(dl_port, 0, pfnum, vport_num - 1, false); } return dl_port; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index bdfe609cc9ec..e59380ee1ead 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1436,7 +1436,6 @@ esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb) esw_init_chains_offload_flags(esw, &attr.flags); attr.ns = MLX5_FLOW_NAMESPACE_FDB; - attr.fs_base_prio = FDB_TC_OFFLOAD; attr.max_grp_num = esw->params.large_group_num; attr.default_ft = miss_fdb; attr.mapping = esw->offloads.reg_c0_obj_pool; @@ -2779,9 +2778,9 @@ static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw, struct mlx5_eswitch *peer_esw, bool pair) { - u8 peer_idx = mlx5_get_dev_index(peer_esw->dev); + u16 peer_vhca_id = MLX5_CAP_GEN(peer_esw->dev, vhca_id); + u16 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id); struct mlx5_flow_root_namespace *peer_ns; - u8 idx = mlx5_get_dev_index(esw->dev); struct mlx5_flow_root_namespace *ns; int err; @@ -2789,18 +2788,18 @@ static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw, ns = esw->dev->priv.steering->fdb_root_ns; if (pair) { - err = mlx5_flow_namespace_set_peer(ns, peer_ns, peer_idx); + err = mlx5_flow_namespace_set_peer(ns, peer_ns, peer_vhca_id); if (err) return err; - err = mlx5_flow_namespace_set_peer(peer_ns, ns, idx); + err = mlx5_flow_namespace_set_peer(peer_ns, ns, vhca_id); if (err) { - mlx5_flow_namespace_set_peer(ns, NULL, peer_idx); + mlx5_flow_namespace_set_peer(ns, NULL, peer_vhca_id); return err; } } else { - mlx5_flow_namespace_set_peer(ns, NULL, peer_idx); - mlx5_flow_namespace_set_peer(peer_ns, NULL, idx); + mlx5_flow_namespace_set_peer(ns, NULL, peer_vhca_id); + mlx5_flow_namespace_set_peer(peer_ns, NULL, vhca_id); } return 0; @@ -4196,7 +4195,7 @@ int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable, } hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); - MLX5_SET(cmd_hca_cap_2, hca_caps, migratable, 1); + MLX5_SET(cmd_hca_cap_2, hca_caps, migratable, enable); err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport->vport, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 91dcb0dcad10..aab7059bf6e9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -140,7 +140,7 @@ static void mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace static int mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_root_namespace *peer_ns, - u8 peer_idx) + u16 peer_vhca_id) { return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h index b6b9a5a20591..7790ae5531e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h @@ -94,7 +94,7 @@ struct mlx5_flow_cmds { int (*set_peer)(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_root_namespace *peer_ns, - u8 peer_idx); + u16 peer_vhca_id); int (*create_ns)(struct mlx5_flow_root_namespace *ns); int (*destroy_ns)(struct mlx5_flow_root_namespace *ns); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 4ef04aa28771..6b069fa411c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -889,7 +889,7 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root, struct fs_node *iter = list_entry(start, struct fs_node, list); struct mlx5_flow_table *ft = NULL; - if (!root || root->type == FS_TYPE_PRIO_CHAINS) + if (!root) return NULL; list_for_each_advance_continue(iter, &root->children, reverse) { @@ -905,20 +905,42 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root, return ft; } -/* If reverse is false then return the first flow table in next priority of - * prio in the tree, else return the last flow table in the previous priority - * of prio in the tree. +static struct fs_node *find_prio_chains_parent(struct fs_node *parent, + struct fs_node **child) +{ + struct fs_node *node = NULL; + + while (parent && parent->type != FS_TYPE_PRIO_CHAINS) { + node = parent; + parent = parent->parent; + } + + if (child) + *child = node; + + return parent; +} + +/* If reverse is false then return the first flow table next to the passed node + * in the tree, else return the last flow table before the node in the tree. + * If skip is true, skip the flow tables in the same prio_chains prio. */ -static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse) +static struct mlx5_flow_table *find_closest_ft(struct fs_node *node, bool reverse, + bool skip) { + struct fs_node *prio_chains_parent = NULL; struct mlx5_flow_table *ft = NULL; struct fs_node *curr_node; struct fs_node *parent; - parent = prio->node.parent; - curr_node = &prio->node; + if (skip) + prio_chains_parent = find_prio_chains_parent(node, NULL); + parent = node->parent; + curr_node = node; while (!ft && parent) { - ft = find_closest_ft_recursive(parent, &curr_node->list, reverse); + if (parent != prio_chains_parent) + ft = find_closest_ft_recursive(parent, &curr_node->list, + reverse); curr_node = parent; parent = curr_node->parent; } @@ -926,15 +948,15 @@ static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool revers } /* Assuming all the tree is locked by mutex chain lock */ -static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio) +static struct mlx5_flow_table *find_next_chained_ft(struct fs_node *node) { - return find_closest_ft(prio, false); + return find_closest_ft(node, false, true); } /* Assuming all the tree is locked by mutex chain lock */ -static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio) +static struct mlx5_flow_table *find_prev_chained_ft(struct fs_node *node) { - return find_closest_ft(prio, true); + return find_closest_ft(node, true, true); } static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft, @@ -946,7 +968,7 @@ static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft, next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS; fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent); - return find_next_chained_ft(prio); + return find_next_chained_ft(&prio->node); } static int connect_fts_in_prio(struct mlx5_core_dev *dev, @@ -970,21 +992,55 @@ static int connect_fts_in_prio(struct mlx5_core_dev *dev, return 0; } +static struct mlx5_flow_table *find_closet_ft_prio_chains(struct fs_node *node, + struct fs_node *parent, + struct fs_node **child, + bool reverse) +{ + struct mlx5_flow_table *ft; + + ft = find_closest_ft(node, reverse, false); + + if (ft && parent == find_prio_chains_parent(&ft->node, child)) + return ft; + + return NULL; +} + /* Connect flow tables from previous priority of prio to ft */ static int connect_prev_fts(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, struct fs_prio *prio) { + struct fs_node *prio_parent, *parent = NULL, *child, *node; struct mlx5_flow_table *prev_ft; + int err = 0; + + prio_parent = find_prio_chains_parent(&prio->node, &child); + + /* return directly if not under the first sub ns of prio_chains prio */ + if (prio_parent && !list_is_first(&child->list, &prio_parent->children)) + return 0; - prev_ft = find_prev_chained_ft(prio); - if (prev_ft) { + prev_ft = find_prev_chained_ft(&prio->node); + while (prev_ft) { struct fs_prio *prev_prio; fs_get_obj(prev_prio, prev_ft->node.parent); - return connect_fts_in_prio(dev, prev_prio, ft); + err = connect_fts_in_prio(dev, prev_prio, ft); + if (err) + break; + + if (!parent) { + parent = find_prio_chains_parent(&prev_prio->node, &child); + if (!parent) + break; + } + + node = child; + prev_ft = find_closet_ft_prio_chains(node, parent, &child, true); } - return 0; + return err; } static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio @@ -1123,7 +1179,7 @@ static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table if (err) return err; - next_ft = first_ft ? first_ft : find_next_chained_ft(prio); + next_ft = first_ft ? first_ft : find_next_chained_ft(&prio->node); err = connect_fwd_rules(dev, ft, next_ft); if (err) return err; @@ -1198,7 +1254,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table); next_ft = unmanaged ? ft_attr->next_ft : - find_next_chained_ft(fs_prio); + find_next_chained_ft(&fs_prio->node); ft->def_miss_action = ns->def_miss_action; ft->ns = ns; err = root->cmds->create_flow_table(root, ft, ft_attr, next_ft); @@ -2195,13 +2251,20 @@ EXPORT_SYMBOL(mlx5_del_flow_rules); /* Assuming prio->node.children(flow tables) is sorted by level */ static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft) { + struct fs_node *prio_parent, *child; struct fs_prio *prio; fs_get_obj(prio, ft->node.parent); if (!list_is_last(&ft->node.list, &prio->node.children)) return list_next_entry(ft, node.list); - return find_next_chained_ft(prio); + + prio_parent = find_prio_chains_parent(&prio->node, &child); + + if (prio_parent && list_is_first(&child->list, &prio_parent->children)) + return find_closest_ft(&prio->node, false, false); + + return find_next_chained_ft(&prio->node); } static int update_root_ft_destroy(struct mlx5_flow_table *ft) @@ -3621,7 +3684,7 @@ void mlx5_destroy_match_definer(struct mlx5_core_dev *dev, int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_root_namespace *peer_ns, - u8 peer_idx) + u16 peer_vhca_id) { if (peer_ns && ns->mode != peer_ns->mode) { mlx5_core_err(ns->dev, @@ -3629,7 +3692,7 @@ int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns, return -EINVAL; } - return ns->cmds->set_peer(ns, peer_ns, peer_idx); + return ns->cmds->set_peer(ns, peer_ns, peer_vhca_id); } /* This function should be called only at init stage of the namespace. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 03e64c4c245d..4aed1768b85f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -303,7 +303,7 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void); int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_root_namespace *peer_ns, - u8 peer_idx); + u16 peer_vhca_id); int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns, enum mlx5_flow_steering_mode mode); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c index d3a3fe4ce670..7d9bbb494d95 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c @@ -574,7 +574,7 @@ static int __mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev, for (i = 0; i < ldev->ports; i++) { for (j = 0; j < ldev->buckets; j++) { idx = i * ldev->buckets + j; - if (ldev->v2p_map[i] == ports[i]) + if (ldev->v2p_map[idx] == ports[idx]) continue; dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[ports[idx] - 1].dev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 973babfaff25..377372f0578a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -227,10 +227,15 @@ static void mlx5_timestamp_overflow(struct work_struct *work) clock = container_of(timer, struct mlx5_clock, timer); mdev = container_of(clock, struct mlx5_core_dev, clock); + if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) + goto out; + write_seqlock_irqsave(&clock->lock, flags); timecounter_read(&timer->tc); mlx5_update_clock_info_page(mdev); write_sequnlock_irqrestore(&clock->lock, flags); + +out: schedule_delayed_work(&timer->overflow_work, timer->overflow_period); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c index db9df9798ffa..a80ecb672f33 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c @@ -178,7 +178,7 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains, if (!mlx5_chains_ignore_flow_level_supported(chains) || (chain == 0 && prio == 1 && level == 0)) { ft_attr.level = chains->fs_base_level; - ft_attr.prio = chains->fs_base_prio; + ft_attr.prio = chains->fs_base_prio + prio - 1; ns = (chains->ns == MLX5_FLOW_NAMESPACE_FDB) ? mlx5_get_fdb_sub_ns(chains->dev, chain) : mlx5_get_flow_namespace(chains->dev, chains->ns); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 88dbea6631d5..72ae560a1c68 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1506,6 +1506,7 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev) if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { mlx5_core_warn(dev, "%s: interface is down, NOP\n", __func__); + mlx5_devlink_params_unregister(priv_to_devlink(dev)); mlx5_cleanup_once(dev); goto out; } @@ -1988,7 +1989,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, mlx5_enter_error_state(dev, false); mlx5_error_sw_reset(dev); - mlx5_unload_one(dev, true); + mlx5_unload_one(dev, false); mlx5_drain_health_wq(dev); mlx5_pci_disable_device(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index c4be257c043d..682d3dc00dd1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -361,7 +361,7 @@ static inline bool mlx5_core_is_ec_vf_vport(const struct mlx5_core_dev *dev, u16 static inline int mlx5_vport_to_func_id(const struct mlx5_core_dev *dev, u16 vport, bool ec_vf_func) { - return ec_vf_func ? vport - mlx5_core_ec_vf_vport_base(dev) + return ec_vf_func ? vport - mlx5_core_ec_vf_vport_base(dev) + 1 : vport; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index 4e42a3b9b8ee..a2fc937d5461 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -285,8 +285,7 @@ static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev) host_total_vfs = MLX5_GET(query_esw_functions_out, out, host_params_context.host_total_vfs); kvfree(out); - if (host_total_vfs) - return host_total_vfs; + return host_total_vfs; } done: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index e739ec6cdf90..54bb0866ed72 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -2079,7 +2079,7 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn, peer_vport = vhca_id_valid && mlx5_core_is_pf(dmn->mdev) && (vhca_id != dmn->info.caps.gvmi); - vport_dmn = peer_vport ? dmn->peer_dmn[vhca_id] : dmn; + vport_dmn = peer_vport ? xa_load(&dmn->peer_dmn_xa, vhca_id) : dmn; if (!vport_dmn) { mlx5dr_dbg(dmn, "No peer vport domain for given vhca_id\n"); return NULL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index 7491911ebcb5..8c2a34a0d6be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -564,11 +564,12 @@ int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev, err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); if (err) - return err; + goto err_free_in; *reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id); - kvfree(in); +err_free_in: + kvfree(in); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c index 75dc85dc24ef..3d74109f8230 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c @@ -475,6 +475,7 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type) mutex_init(&dmn->info.rx.mutex); mutex_init(&dmn->info.tx.mutex); xa_init(&dmn->definers_xa); + xa_init(&dmn->peer_dmn_xa); if (dr_domain_caps_init(mdev, dmn)) { mlx5dr_err(dmn, "Failed init domain, no caps\n"); @@ -507,6 +508,7 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type) uninit_caps: dr_domain_caps_uninit(dmn); def_xa_destroy: + xa_destroy(&dmn->peer_dmn_xa); xa_destroy(&dmn->definers_xa); kfree(dmn); return NULL; @@ -547,6 +549,7 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn) dr_domain_uninit_csum_recalc_fts(dmn); dr_domain_uninit_resources(dmn); dr_domain_caps_uninit(dmn); + xa_destroy(&dmn->peer_dmn_xa); xa_destroy(&dmn->definers_xa); mutex_destroy(&dmn->info.tx.mutex); mutex_destroy(&dmn->info.rx.mutex); @@ -556,17 +559,21 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn) void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn, struct mlx5dr_domain *peer_dmn, - u8 peer_idx) + u16 peer_vhca_id) { + struct mlx5dr_domain *peer; + mlx5dr_domain_lock(dmn); - if (dmn->peer_dmn[peer_idx]) - refcount_dec(&dmn->peer_dmn[peer_idx]->refcount); + peer = xa_load(&dmn->peer_dmn_xa, peer_vhca_id); + if (peer) + refcount_dec(&peer->refcount); - dmn->peer_dmn[peer_idx] = peer_dmn; + WARN_ON(xa_err(xa_store(&dmn->peer_dmn_xa, peer_vhca_id, peer_dmn, GFP_KERNEL))); - if (dmn->peer_dmn[peer_idx]) - refcount_inc(&dmn->peer_dmn[peer_idx]->refcount); + peer = xa_load(&dmn->peer_dmn_xa, peer_vhca_id); + if (peer) + refcount_inc(&peer->refcount); mlx5dr_domain_unlock(dmn); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c index d6947fe13d56..8ca534ef5d03 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c @@ -82,7 +82,7 @@ dr_ptrn_alloc_pattern(struct mlx5dr_ptrn_mgr *mgr, u32 chunk_size; u32 index; - chunk_size = ilog2(num_of_actions); + chunk_size = ilog2(roundup_pow_of_two(num_of_actions)); /* HW modify action index granularity is at least 64B */ chunk_size = max_t(u32, chunk_size, DR_CHUNK_SIZE_8); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c index 69d7a8f3c402..f708b029425a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c @@ -1652,17 +1652,18 @@ dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, struct mlx5dr_domain *dmn = sb->dmn; struct mlx5dr_domain *vport_dmn; u8 *bit_mask = sb->bit_mask; + struct mlx5dr_domain *peer; bool source_gvmi_set; DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn); if (sb->vhca_id_valid) { + peer = xa_load(&dmn->peer_dmn_xa, id); /* Find port GVMI based on the eswitch_owner_vhca_id */ if (id == dmn->info.caps.gvmi) vport_dmn = dmn; - else if (id < MLX5_MAX_PORTS && dmn->peer_dmn[id] && - (id == dmn->peer_dmn[id]->info.caps.gvmi)) - vport_dmn = dmn->peer_dmn[id]; + else if (peer && (id == peer->info.caps.gvmi)) + vport_dmn = peer; else return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c index f4ef0b22b991..dd856cde188d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c @@ -1984,16 +1984,17 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, struct mlx5dr_domain *dmn = sb->dmn; struct mlx5dr_domain *vport_dmn; u8 *bit_mask = sb->bit_mask; + struct mlx5dr_domain *peer; DR_STE_SET_TAG(src_gvmi_qp_v1, tag, source_qp, misc, source_sqn); if (sb->vhca_id_valid) { + peer = xa_load(&dmn->peer_dmn_xa, id); /* Find port GVMI based on the eswitch_owner_vhca_id */ if (id == dmn->info.caps.gvmi) vport_dmn = dmn; - else if (id < MLX5_MAX_PORTS && dmn->peer_dmn[id] && - (id == dmn->peer_dmn[id]->info.caps.gvmi)) - vport_dmn = dmn->peer_dmn[id]; + else if (peer && (id == peer->info.caps.gvmi)) + vport_dmn = peer; else return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 1622dbbe6b97..6c59de3e28f6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -935,7 +935,6 @@ struct mlx5dr_domain_info { }; struct mlx5dr_domain { - struct mlx5dr_domain *peer_dmn[MLX5_MAX_PORTS]; struct mlx5_core_dev *mdev; u32 pdn; struct mlx5_uars_page *uar; @@ -956,6 +955,7 @@ struct mlx5dr_domain { struct list_head dbg_tbl_list; struct mlx5dr_dbg_dump_info dump_info; struct xarray definers_xa; + struct xarray peer_dmn_xa; /* memory management statistics */ u32 num_buddies[DR_ICM_TYPE_MAX]; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index 6aac5f006bf8..feb307fb3440 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -781,14 +781,14 @@ restore_fte: static int mlx5_cmd_dr_set_peer(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_root_namespace *peer_ns, - u8 peer_idx) + u16 peer_vhca_id) { struct mlx5dr_domain *peer_domain = NULL; if (peer_ns) peer_domain = peer_ns->fs_dr_domain.dr_domain; mlx5dr_domain_set_peer(ns->fs_dr_domain.dr_domain, - peer_domain, peer_idx); + peer_domain, peer_vhca_id); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h index 24cbb33ecd6c..89fced86936f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h @@ -49,7 +49,7 @@ int mlx5dr_domain_sync(struct mlx5dr_domain *domain, u32 flags); void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn, struct mlx5dr_domain *peer_dmn, - u8 peer_idx); + u16 peer_vhca_id); struct mlx5dr_table * mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags, diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index a499e460594b..c2ad0921e893 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -8,6 +8,7 @@ #include <linux/ethtool.h> #include <linux/filter.h> #include <linux/mm.h> +#include <linux/pci.h> #include <net/checksum.h> #include <net/ip6_checksum.h> @@ -2345,9 +2346,12 @@ int mana_attach(struct net_device *ndev) static int mana_dealloc_queues(struct net_device *ndev) { struct mana_port_context *apc = netdev_priv(ndev); + unsigned long timeout = jiffies + 120 * HZ; struct gdma_dev *gd = apc->ac->gdma_dev; struct mana_txq *txq; + struct sk_buff *skb; int i, err; + u32 tsleep; if (apc->port_is_up) return -EINVAL; @@ -2363,15 +2367,40 @@ static int mana_dealloc_queues(struct net_device *ndev) * to false, but it doesn't matter since mana_start_xmit() drops any * new packets due to apc->port_is_up being false. * - * Drain all the in-flight TX packets + * Drain all the in-flight TX packets. + * A timeout of 120 seconds for all the queues is used. + * This will break the while loop when h/w is not responding. + * This value of 120 has been decided here considering max + * number of queues. */ + for (i = 0; i < apc->num_queues; i++) { txq = &apc->tx_qp[i].txq; - - while (atomic_read(&txq->pending_sends) > 0) - usleep_range(1000, 2000); + tsleep = 1000; + while (atomic_read(&txq->pending_sends) > 0 && + time_before(jiffies, timeout)) { + usleep_range(tsleep, tsleep + 1000); + tsleep <<= 1; + } + if (atomic_read(&txq->pending_sends)) { + err = pcie_flr(to_pci_dev(gd->gdma_context->dev)); + if (err) { + netdev_err(ndev, "flr failed %d with %d pkts pending in txq %u\n", + err, atomic_read(&txq->pending_sends), + txq->gdma_txq_id); + } + break; + } } + for (i = 0; i < apc->num_queues; i++) { + txq = &apc->tx_qp[i].txq; + while ((skb = skb_dequeue(&txq->pending_skbs))) { + mana_unmap_skb(skb, apc); + dev_kfree_skb_any(skb); + } + atomic_set(&txq->pending_sends, 0); + } /* We're 100% sure the queues can no longer be woken up, because * we're sure now mana_poll_tx_cq() can't be running. */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 612b0015dc43..432fb93aa801 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -1817,6 +1817,7 @@ static int ionic_change_mtu(struct net_device *netdev, int new_mtu) static void ionic_tx_timeout_work(struct work_struct *ws) { struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work); + int err; if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) return; @@ -1829,8 +1830,11 @@ static void ionic_tx_timeout_work(struct work_struct *ws) mutex_lock(&lif->queue_lock); ionic_stop_queues_reconfig(lif); - ionic_start_queues_reconfig(lif); + err = ionic_start_queues_reconfig(lif); mutex_unlock(&lif->queue_lock); + + if (err) + dev_err(lif->ionic->dev, "%s: Restarting queues failed\n", __func__); } static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue) @@ -2800,17 +2804,22 @@ static int ionic_cmb_reconfig(struct ionic_lif *lif, if (err) { dev_err(lif->ionic->dev, "CMB restore failed: %d\n", err); - goto errout; + goto err_out; } } - ionic_start_queues_reconfig(lif); - } else { - /* This was detached in ionic_stop_queues_reconfig() */ - netif_device_attach(lif->netdev); + err = ionic_start_queues_reconfig(lif); + if (err) { + dev_err(lif->ionic->dev, + "CMB reconfig failed: %d\n", err); + goto err_out; + } } -errout: +err_out: + /* This was detached in ionic_stop_queues_reconfig() */ + netif_device_attach(lif->netdev); + return err; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index f8682356d0cf..94d4f9413ab7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -194,6 +194,22 @@ void qed_hw_remove(struct qed_dev *cdev); struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn); /** + * qed_ptt_acquire_context(): Allocate a PTT window honoring the context + * atomicy. + * + * @p_hwfn: HW device data. + * @is_atomic: Hint from the caller - if the func can sleep or not. + * + * Context: The function should not sleep in case is_atomic == true. + * Return: struct qed_ptt. + * + * Should be called at the entry point to the driver + * (at the beginning of an exported function). + */ +struct qed_ptt *qed_ptt_acquire_context(struct qed_hwfn *p_hwfn, + bool is_atomic); + +/** * qed_ptt_release(): Release PTT Window. * * @p_hwfn: HW device data. diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c index 3764190b948e..04602ac94708 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -693,13 +693,14 @@ static void _qed_fcoe_get_pstats(struct qed_hwfn *p_hwfn, } static int qed_fcoe_get_stats(struct qed_hwfn *p_hwfn, - struct qed_fcoe_stats *p_stats) + struct qed_fcoe_stats *p_stats, + bool is_atomic) { struct qed_ptt *p_ptt; memset(p_stats, 0, sizeof(*p_stats)); - p_ptt = qed_ptt_acquire(p_hwfn); + p_ptt = qed_ptt_acquire_context(p_hwfn, is_atomic); if (!p_ptt) { DP_ERR(p_hwfn, "Failed to acquire ptt\n"); @@ -973,19 +974,27 @@ static int qed_fcoe_destroy_conn(struct qed_dev *cdev, QED_SPQ_MODE_EBLOCK, NULL); } +static int qed_fcoe_stats_context(struct qed_dev *cdev, + struct qed_fcoe_stats *stats, + bool is_atomic) +{ + return qed_fcoe_get_stats(QED_AFFIN_HWFN(cdev), stats, is_atomic); +} + static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats) { - return qed_fcoe_get_stats(QED_AFFIN_HWFN(cdev), stats); + return qed_fcoe_stats_context(cdev, stats, false); } void qed_get_protocol_stats_fcoe(struct qed_dev *cdev, - struct qed_mcp_fcoe_stats *stats) + struct qed_mcp_fcoe_stats *stats, + bool is_atomic) { struct qed_fcoe_stats proto_stats; /* Retrieve FW statistics */ memset(&proto_stats, 0, sizeof(proto_stats)); - if (qed_fcoe_stats(cdev, &proto_stats)) { + if (qed_fcoe_stats_context(cdev, &proto_stats, is_atomic)) { DP_VERBOSE(cdev, QED_MSG_STORAGE, "Failed to collect FCoE statistics\n"); return; diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h index 19c85adf4ceb..214e8299ecb4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h @@ -28,8 +28,20 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn); void qed_fcoe_setup(struct qed_hwfn *p_hwfn); void qed_fcoe_free(struct qed_hwfn *p_hwfn); +/** + * qed_get_protocol_stats_fcoe(): Fills provided statistics + * struct with statistics. + * + * @cdev: Qed dev pointer. + * @stats: Points to struct that will be filled with statistics. + * @is_atomic: Hint from the caller - if the func can sleep or not. + * + * Context: The function should not sleep in case is_atomic == true. + * Return: Void. + */ void qed_get_protocol_stats_fcoe(struct qed_dev *cdev, - struct qed_mcp_fcoe_stats *stats); + struct qed_mcp_fcoe_stats *stats, + bool is_atomic); #else /* CONFIG_QED_FCOE */ static inline int qed_fcoe_alloc(struct qed_hwfn *p_hwfn) { @@ -40,7 +52,8 @@ static inline void qed_fcoe_setup(struct qed_hwfn *p_hwfn) {} static inline void qed_fcoe_free(struct qed_hwfn *p_hwfn) {} static inline void qed_get_protocol_stats_fcoe(struct qed_dev *cdev, - struct qed_mcp_fcoe_stats *stats) + struct qed_mcp_fcoe_stats *stats, + bool is_atomic) { } #endif /* CONFIG_QED_FCOE */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index 554f30b0cfd5..6263f847b6b9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -23,7 +23,10 @@ #include "qed_reg_addr.h" #include "qed_sriov.h" -#define QED_BAR_ACQUIRE_TIMEOUT 1000 +#define QED_BAR_ACQUIRE_TIMEOUT_USLEEP_CNT 1000 +#define QED_BAR_ACQUIRE_TIMEOUT_USLEEP 1000 +#define QED_BAR_ACQUIRE_TIMEOUT_UDELAY_CNT 100000 +#define QED_BAR_ACQUIRE_TIMEOUT_UDELAY 10 /* Invalid values */ #define QED_BAR_INVALID_OFFSET (cpu_to_le32(-1)) @@ -85,11 +88,21 @@ void qed_ptt_pool_free(struct qed_hwfn *p_hwfn) struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn) { + return qed_ptt_acquire_context(p_hwfn, false); +} + +struct qed_ptt *qed_ptt_acquire_context(struct qed_hwfn *p_hwfn, bool is_atomic) +{ struct qed_ptt *p_ptt; - unsigned int i; + unsigned int i, count; + + if (is_atomic) + count = QED_BAR_ACQUIRE_TIMEOUT_UDELAY_CNT; + else + count = QED_BAR_ACQUIRE_TIMEOUT_USLEEP_CNT; /* Take the free PTT from the list */ - for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) { + for (i = 0; i < count; i++) { spin_lock_bh(&p_hwfn->p_ptt_pool->lock); if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) { @@ -105,7 +118,12 @@ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn) } spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); - usleep_range(1000, 2000); + + if (is_atomic) + udelay(QED_BAR_ACQUIRE_TIMEOUT_UDELAY); + else + usleep_range(QED_BAR_ACQUIRE_TIMEOUT_USLEEP, + QED_BAR_ACQUIRE_TIMEOUT_USLEEP * 2); } DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n"); diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index 511ab214eb9c..980e7289b481 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -999,13 +999,14 @@ static void _qed_iscsi_get_pstats(struct qed_hwfn *p_hwfn, } static int qed_iscsi_get_stats(struct qed_hwfn *p_hwfn, - struct qed_iscsi_stats *stats) + struct qed_iscsi_stats *stats, + bool is_atomic) { struct qed_ptt *p_ptt; memset(stats, 0, sizeof(*stats)); - p_ptt = qed_ptt_acquire(p_hwfn); + p_ptt = qed_ptt_acquire_context(p_hwfn, is_atomic); if (!p_ptt) { DP_ERR(p_hwfn, "Failed to acquire ptt\n"); return -EAGAIN; @@ -1336,9 +1337,16 @@ static int qed_iscsi_destroy_conn(struct qed_dev *cdev, QED_SPQ_MODE_EBLOCK, NULL); } +static int qed_iscsi_stats_context(struct qed_dev *cdev, + struct qed_iscsi_stats *stats, + bool is_atomic) +{ + return qed_iscsi_get_stats(QED_AFFIN_HWFN(cdev), stats, is_atomic); +} + static int qed_iscsi_stats(struct qed_dev *cdev, struct qed_iscsi_stats *stats) { - return qed_iscsi_get_stats(QED_AFFIN_HWFN(cdev), stats); + return qed_iscsi_stats_context(cdev, stats, false); } static int qed_iscsi_change_mac(struct qed_dev *cdev, @@ -1358,13 +1366,14 @@ static int qed_iscsi_change_mac(struct qed_dev *cdev, } void qed_get_protocol_stats_iscsi(struct qed_dev *cdev, - struct qed_mcp_iscsi_stats *stats) + struct qed_mcp_iscsi_stats *stats, + bool is_atomic) { struct qed_iscsi_stats proto_stats; /* Retrieve FW statistics */ memset(&proto_stats, 0, sizeof(proto_stats)); - if (qed_iscsi_stats(cdev, &proto_stats)) { + if (qed_iscsi_stats_context(cdev, &proto_stats, is_atomic)) { DP_VERBOSE(cdev, QED_MSG_STORAGE, "Failed to collect ISCSI statistics\n"); return; diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h index dec2b00259d4..974cb8d26608 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h @@ -39,11 +39,14 @@ void qed_iscsi_free(struct qed_hwfn *p_hwfn); * * @cdev: Qed dev pointer. * @stats: Points to struct that will be filled with statistics. + * @is_atomic: Hint from the caller - if the func can sleep or not. * + * Context: The function should not sleep in case is_atomic == true. * Return: Void. */ void qed_get_protocol_stats_iscsi(struct qed_dev *cdev, - struct qed_mcp_iscsi_stats *stats); + struct qed_mcp_iscsi_stats *stats, + bool is_atomic); #else /* IS_ENABLED(CONFIG_QED_ISCSI) */ static inline int qed_iscsi_alloc(struct qed_hwfn *p_hwfn) { @@ -56,7 +59,8 @@ static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn) {} static inline void qed_get_protocol_stats_iscsi(struct qed_dev *cdev, - struct qed_mcp_iscsi_stats *stats) {} + struct qed_mcp_iscsi_stats *stats, + bool is_atomic) {} #endif /* IS_ENABLED(CONFIG_QED_ISCSI) */ #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 7776d3bdd459..970b9aabbc3d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1863,7 +1863,8 @@ static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn, } static void _qed_get_vport_stats(struct qed_dev *cdev, - struct qed_eth_stats *stats) + struct qed_eth_stats *stats, + bool is_atomic) { u8 fw_vport = 0; int i; @@ -1872,10 +1873,11 @@ static void _qed_get_vport_stats(struct qed_dev *cdev, for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; - struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn) - : NULL; + struct qed_ptt *p_ptt; bool b_get_port_stats; + p_ptt = IS_PF(cdev) ? qed_ptt_acquire_context(p_hwfn, is_atomic) + : NULL; if (IS_PF(cdev)) { /* The main vport index is relative first */ if (qed_fw_vport(p_hwfn, 0, &fw_vport)) { @@ -1901,6 +1903,13 @@ out: void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats) { + qed_get_vport_stats_context(cdev, stats, false); +} + +void qed_get_vport_stats_context(struct qed_dev *cdev, + struct qed_eth_stats *stats, + bool is_atomic) +{ u32 i; if (!cdev || cdev->recov_in_prog) { @@ -1908,7 +1917,7 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats) return; } - _qed_get_vport_stats(cdev, stats); + _qed_get_vport_stats(cdev, stats, is_atomic); if (!cdev->reset_stats) return; @@ -1960,7 +1969,7 @@ void qed_reset_vport_stats(struct qed_dev *cdev) if (!cdev->reset_stats) { DP_INFO(cdev, "Reset stats not allocated\n"); } else { - _qed_get_vport_stats(cdev, cdev->reset_stats); + _qed_get_vport_stats(cdev, cdev->reset_stats, false); cdev->reset_stats->common.link_change_count = 0; } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index a538cf478c14..2d2f82c785ad 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -249,8 +249,32 @@ qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_data); +/** + * qed_get_vport_stats(): Fills provided statistics + * struct with statistics. + * + * @cdev: Qed dev pointer. + * @stats: Points to struct that will be filled with statistics. + * + * Return: Void. + */ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats); +/** + * qed_get_vport_stats_context(): Fills provided statistics + * struct with statistics. + * + * @cdev: Qed dev pointer. + * @stats: Points to struct that will be filled with statistics. + * @is_atomic: Hint from the caller - if the func can sleep or not. + * + * Context: The function should not sleep in case is_atomic == true. + * Return: Void. + */ +void qed_get_vport_stats_context(struct qed_dev *cdev, + struct qed_eth_stats *stats, + bool is_atomic); + void qed_reset_vport_stats(struct qed_dev *cdev); /** diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index f5af83342856..c278f8893042 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -3092,7 +3092,7 @@ void qed_get_protocol_stats(struct qed_dev *cdev, switch (type) { case QED_MCP_LAN_STATS: - qed_get_vport_stats(cdev, ð_stats); + qed_get_vport_stats_context(cdev, ð_stats, true); stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts; stats->lan_stats.ucast_tx_pkts = @@ -3100,10 +3100,10 @@ void qed_get_protocol_stats(struct qed_dev *cdev, stats->lan_stats.fcs_err = -1; break; case QED_MCP_FCOE_STATS: - qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats); + qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats, true); break; case QED_MCP_ISCSI_STATS: - qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats); + qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats, true); break; default: DP_VERBOSE(cdev, QED_MSG_SP, diff --git a/drivers/net/ethernet/sfc/falcon/selftest.c b/drivers/net/ethernet/sfc/falcon/selftest.c index 9e5ce2a13787..cf1d67b6d86d 100644 --- a/drivers/net/ethernet/sfc/falcon/selftest.c +++ b/drivers/net/ethernet/sfc/falcon/selftest.c @@ -40,15 +40,16 @@ */ struct ef4_loopback_payload { char pad[2]; /* Ensures ip is 4-byte aligned */ - struct ethhdr header; - struct iphdr ip; - struct udphdr udp; - __be16 iteration; - char msg[64]; + struct_group_attr(packet, __packed, + struct ethhdr header; + struct iphdr ip; + struct udphdr udp; + __be16 iteration; + char msg[64]; + ); } __packed __aligned(4); -#define EF4_LOOPBACK_PAYLOAD_LEN (sizeof(struct ef4_loopback_payload) - \ - offsetof(struct ef4_loopback_payload, \ - header)) +#define EF4_LOOPBACK_PAYLOAD_LEN \ + sizeof_field(struct ef4_loopback_payload, packet) /* Loopback test source MAC address */ static const u8 payload_source[ETH_ALEN] __aligned(2) = { @@ -299,7 +300,7 @@ void ef4_loopback_rx_packet(struct ef4_nic *efx, payload = &state->payload; - memcpy(&received.header, buf_ptr, + memcpy(&received.packet, buf_ptr, min_t(int, pkt_len, EF4_LOOPBACK_PAYLOAD_LEN)); received.ip.saddr = payload->ip.saddr; if (state->offload_csum) @@ -370,7 +371,7 @@ void ef4_loopback_rx_packet(struct ef4_nic *efx, buf_ptr, pkt_len, 0); netif_err(efx, drv, efx->net_dev, "expected packet:\n"); print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, - &state->payload.header, EF4_LOOPBACK_PAYLOAD_LEN, + &state->payload.packet, EF4_LOOPBACK_PAYLOAD_LEN, 0); } #endif @@ -440,6 +441,8 @@ static int ef4_begin_loopback(struct ef4_tx_queue *tx_queue) payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2)); /* Strip off the leading padding */ skb_pull(skb, offsetof(struct ef4_loopback_payload, header)); + /* Strip off the trailing padding */ + skb_trim(skb, EF4_LOOPBACK_PAYLOAD_LEN); /* Ensure everything we've written is visible to the * interrupt handler. */ diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c index 96d856b9043c..19a0b8584afb 100644 --- a/drivers/net/ethernet/sfc/selftest.c +++ b/drivers/net/ethernet/sfc/selftest.c @@ -43,15 +43,16 @@ */ struct efx_loopback_payload { char pad[2]; /* Ensures ip is 4-byte aligned */ - struct ethhdr header; - struct iphdr ip; - struct udphdr udp; - __be16 iteration; - char msg[64]; + struct_group_attr(packet, __packed, + struct ethhdr header; + struct iphdr ip; + struct udphdr udp; + __be16 iteration; + char msg[64]; + ); } __packed __aligned(4); -#define EFX_LOOPBACK_PAYLOAD_LEN (sizeof(struct efx_loopback_payload) - \ - offsetof(struct efx_loopback_payload, \ - header)) +#define EFX_LOOPBACK_PAYLOAD_LEN \ + sizeof_field(struct efx_loopback_payload, packet) /* Loopback test source MAC address */ static const u8 payload_source[ETH_ALEN] __aligned(2) = { @@ -297,7 +298,7 @@ void efx_loopback_rx_packet(struct efx_nic *efx, payload = &state->payload; - memcpy(&received.header, buf_ptr, + memcpy(&received.packet, buf_ptr, min_t(int, pkt_len, EFX_LOOPBACK_PAYLOAD_LEN)); received.ip.saddr = payload->ip.saddr; if (state->offload_csum) @@ -368,7 +369,7 @@ void efx_loopback_rx_packet(struct efx_nic *efx, buf_ptr, pkt_len, 0); netif_err(efx, drv, efx->net_dev, "expected packet:\n"); print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, - &state->payload.header, EFX_LOOPBACK_PAYLOAD_LEN, + &state->payload.packet, EFX_LOOPBACK_PAYLOAD_LEN, 0); } #endif @@ -438,6 +439,8 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue) payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2)); /* Strip off the leading padding */ skb_pull(skb, offsetof(struct efx_loopback_payload, header)); + /* Strip off the trailing padding */ + skb_trim(skb, EFX_LOOPBACK_PAYLOAD_LEN); /* Ensure everything we've written is visible to the * interrupt handler. */ diff --git a/drivers/net/ethernet/sfc/siena/selftest.c b/drivers/net/ethernet/sfc/siena/selftest.c index 111ac17194a5..b55fd3346972 100644 --- a/drivers/net/ethernet/sfc/siena/selftest.c +++ b/drivers/net/ethernet/sfc/siena/selftest.c @@ -43,15 +43,16 @@ */ struct efx_loopback_payload { char pad[2]; /* Ensures ip is 4-byte aligned */ - struct ethhdr header; - struct iphdr ip; - struct udphdr udp; - __be16 iteration; - char msg[64]; + struct_group_attr(packet, __packed, + struct ethhdr header; + struct iphdr ip; + struct udphdr udp; + __be16 iteration; + char msg[64]; + ); } __packed __aligned(4); -#define EFX_LOOPBACK_PAYLOAD_LEN (sizeof(struct efx_loopback_payload) - \ - offsetof(struct efx_loopback_payload, \ - header)) +#define EFX_LOOPBACK_PAYLOAD_LEN \ + sizeof_field(struct efx_loopback_payload, packet) /* Loopback test source MAC address */ static const u8 payload_source[ETH_ALEN] __aligned(2) = { @@ -297,7 +298,7 @@ void efx_siena_loopback_rx_packet(struct efx_nic *efx, payload = &state->payload; - memcpy(&received.header, buf_ptr, + memcpy(&received.packet, buf_ptr, min_t(int, pkt_len, EFX_LOOPBACK_PAYLOAD_LEN)); received.ip.saddr = payload->ip.saddr; if (state->offload_csum) @@ -368,7 +369,7 @@ void efx_siena_loopback_rx_packet(struct efx_nic *efx, buf_ptr, pkt_len, 0); netif_err(efx, drv, efx->net_dev, "expected packet:\n"); print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, - &state->payload.header, EFX_LOOPBACK_PAYLOAD_LEN, + &state->payload.packet, EFX_LOOPBACK_PAYLOAD_LEN, 0); } #endif @@ -438,6 +439,8 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue) payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2)); /* Strip off the leading padding */ skb_pull(skb, offsetof(struct efx_loopback_payload, header)); + /* Strip off the trailing padding */ + skb_trim(skb, EFX_LOOPBACK_PAYLOAD_LEN); /* Ensure everything we've written is visible to the * interrupt handler. */ diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 2d7347b71c41..0dcd6a568b06 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -1851,6 +1851,17 @@ static int netsec_of_probe(struct platform_device *pdev, return err; } + /* + * SynQuacer is physically configured with TX and RX delays + * but the standard firmware claimed otherwise for a long + * time, ignore it. + */ + if (of_machine_is_compatible("socionext,developer-box") && + priv->phy_interface != PHY_INTERFACE_MODE_RGMII_ID) { + dev_warn(&pdev->dev, "Outdated firmware reports incorrect PHY mode, overriding\n"); + priv->phy_interface = PHY_INTERFACE_MODE_RGMII_ID; + } + priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); if (!priv->phy_np) { dev_err(&pdev->dev, "missing required property 'phy-handle'\n"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c index f8367c5b490b..fbb0ccf84afc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c @@ -234,7 +234,8 @@ static int tegra_mgbe_probe(struct platform_device *pdev) res.addr = mgbe->regs; res.irq = irq; - mgbe->clks = devm_kzalloc(&pdev->dev, sizeof(*mgbe->clks), GFP_KERNEL); + mgbe->clks = devm_kcalloc(&pdev->dev, ARRAY_SIZE(mgbe_clks), + sizeof(*mgbe->clks), GFP_KERNEL); if (!mgbe->clks) return -ENOMEM; diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index e0ac1bcd9925..49f303353ecb 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1567,12 +1567,16 @@ static int temac_probe(struct platform_device *pdev) } /* Error handle returned DMA RX and TX interrupts */ - if (lp->rx_irq < 0) - return dev_err_probe(&pdev->dev, lp->rx_irq, + if (lp->rx_irq <= 0) { + rc = lp->rx_irq ?: -EINVAL; + return dev_err_probe(&pdev->dev, rc, "could not get DMA RX irq\n"); - if (lp->tx_irq < 0) - return dev_err_probe(&pdev->dev, lp->tx_irq, + } + if (lp->tx_irq <= 0) { + rc = lp->tx_irq ?: -EINVAL; + return dev_err_probe(&pdev->dev, rc, "could not get DMA TX irq\n"); + } if (temac_np) { /* Retrieve the MAC address */ diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 984dfa5d6c11..144ec756c796 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -743,7 +743,7 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsLate++; u64_stats_update_end(&rxsc_stats->syncp); - secy->netdev->stats.rx_dropped++; + DEV_STATS_INC(secy->netdev, rx_dropped); return false; } @@ -767,7 +767,7 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u rxsc_stats->stats.InPktsNotValid++; u64_stats_update_end(&rxsc_stats->syncp); this_cpu_inc(rx_sa->stats->InPktsNotValid); - secy->netdev->stats.rx_errors++; + DEV_STATS_INC(secy->netdev, rx_errors); return false; } @@ -1069,7 +1069,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb) u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsNoTag++; u64_stats_update_end(&secy_stats->syncp); - macsec->secy.netdev->stats.rx_dropped++; + DEV_STATS_INC(macsec->secy.netdev, rx_dropped); continue; } @@ -1179,7 +1179,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsBadTag++; u64_stats_update_end(&secy_stats->syncp); - secy->netdev->stats.rx_errors++; + DEV_STATS_INC(secy->netdev, rx_errors); goto drop_nosa; } @@ -1196,7 +1196,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsNotUsingSA++; u64_stats_update_end(&rxsc_stats->syncp); - secy->netdev->stats.rx_errors++; + DEV_STATS_INC(secy->netdev, rx_errors); if (active_rx_sa) this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA); goto drop_nosa; @@ -1230,7 +1230,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsLate++; u64_stats_update_end(&rxsc_stats->syncp); - macsec->secy.netdev->stats.rx_dropped++; + DEV_STATS_INC(macsec->secy.netdev, rx_dropped); goto drop; } } @@ -1271,7 +1271,7 @@ deliver: if (ret == NET_RX_SUCCESS) count_rx(dev, len); else - macsec->secy.netdev->stats.rx_dropped++; + DEV_STATS_INC(macsec->secy.netdev, rx_dropped); rcu_read_unlock(); @@ -1308,7 +1308,7 @@ nosci: u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsNoSCI++; u64_stats_update_end(&secy_stats->syncp); - macsec->secy.netdev->stats.rx_errors++; + DEV_STATS_INC(macsec->secy.netdev, rx_errors); continue; } @@ -1327,7 +1327,7 @@ nosci: secy_stats->stats.InPktsUnknownSCI++; u64_stats_update_end(&secy_stats->syncp); } else { - macsec->secy.netdev->stats.rx_dropped++; + DEV_STATS_INC(macsec->secy.netdev, rx_dropped); } } @@ -3422,7 +3422,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, if (!secy->operational) { kfree_skb(skb); - dev->stats.tx_dropped++; + DEV_STATS_INC(dev, tx_dropped); return NETDEV_TX_OK; } @@ -3430,7 +3430,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, skb = macsec_encrypt(skb, dev); if (IS_ERR(skb)) { if (PTR_ERR(skb) != -EINPROGRESS) - dev->stats.tx_dropped++; + DEV_STATS_INC(dev, tx_dropped); return NETDEV_TX_OK; } @@ -3667,9 +3667,9 @@ static void macsec_get_stats64(struct net_device *dev, dev_fetch_sw_netstats(s, dev->tstats); - s->rx_dropped = dev->stats.rx_dropped; - s->tx_dropped = dev->stats.tx_dropped; - s->rx_errors = dev->stats.rx_errors; + s->rx_dropped = atomic_long_read(&dev->stats.__rx_dropped); + s->tx_dropped = atomic_long_read(&dev->stats.__tx_dropped); + s->rx_errors = atomic_long_read(&dev->stats.__rx_errors); } static int macsec_get_iflink(const struct net_device *dev) diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index c1f307d90518..8a77ec33b417 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -459,21 +459,27 @@ static int at803x_set_wol(struct phy_device *phydev, phy_write_mmd(phydev, MDIO_MMD_PCS, offsets[i], mac[(i * 2) + 1] | (mac[(i * 2)] << 8)); - /* Enable WOL function */ - ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_PHY_MMD3_WOL_CTRL, - 0, AT803X_WOL_EN); - if (ret) - return ret; + /* Enable WOL function for 1588 */ + if (phydev->drv->phy_id == ATH8031_PHY_ID) { + ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, + AT803X_PHY_MMD3_WOL_CTRL, + 0, AT803X_WOL_EN); + if (ret) + return ret; + } /* Enable WOL interrupt */ ret = phy_modify(phydev, AT803X_INTR_ENABLE, 0, AT803X_INTR_ENABLE_WOL); if (ret) return ret; } else { - /* Disable WoL function */ - ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_PHY_MMD3_WOL_CTRL, - AT803X_WOL_EN, 0); - if (ret) - return ret; + /* Disable WoL function for 1588 */ + if (phydev->drv->phy_id == ATH8031_PHY_ID) { + ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, + AT803X_PHY_MMD3_WOL_CTRL, + AT803X_WOL_EN, 0); + if (ret) + return ret; + } /* Disable WOL interrupt */ ret = phy_modify(phydev, AT803X_INTR_ENABLE, AT803X_INTR_ENABLE_WOL, 0); if (ret) @@ -508,11 +514,11 @@ static void at803x_get_wol(struct phy_device *phydev, wol->supported = WAKE_MAGIC; wol->wolopts = 0; - value = phy_read_mmd(phydev, MDIO_MMD_PCS, AT803X_PHY_MMD3_WOL_CTRL); + value = phy_read(phydev, AT803X_INTR_ENABLE); if (value < 0) return; - if (value & AT803X_WOL_EN) + if (value & AT803X_INTR_ENABLE_WOL) wol->wolopts |= WAKE_MAGIC; } @@ -858,9 +864,6 @@ static int at803x_probe(struct phy_device *phydev) if (phydev->drv->phy_id == ATH8031_PHY_ID) { int ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG); int mode_cfg; - struct ethtool_wolinfo wol = { - .wolopts = 0, - }; if (ccr < 0) return ccr; @@ -877,12 +880,14 @@ static int at803x_probe(struct phy_device *phydev) break; } - /* Disable WOL by default */ - ret = at803x_set_wol(phydev, &wol); - if (ret < 0) { - phydev_err(phydev, "failed to disable WOL on probe: %d\n", ret); + /* Disable WoL in 1588 register which is enabled + * by default + */ + ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, + AT803X_PHY_MMD3_WOL_CTRL, + AT803X_WOL_EN, 0); + if (ret) return ret; - } } return 0; @@ -2059,8 +2064,6 @@ static struct phy_driver at803x_driver[] = { .flags = PHY_POLL_CABLE_TEST, .config_init = at803x_config_init, .link_change_notify = at803x_link_change_notify, - .set_wol = at803x_set_wol, - .get_wol = at803x_get_wol, .suspend = at803x_suspend, .resume = at803x_resume, /* PHY_BASIC_FEATURES */ diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 9137fb8c1c42..49d1d6acf95e 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -534,7 +534,7 @@ static int tap_open(struct inode *inode, struct file *file) q->sock.state = SS_CONNECTED; q->sock.file = file; q->sock.ops = &tap_socket_ops; - sock_init_data_uid(&q->sock, &q->sk, inode->i_uid); + sock_init_data_uid(&q->sock, &q->sk, current_fsuid()); q->sk.sk_write_space = tap_sock_write_space; q->sk.sk_destruct = tap_sock_destruct; q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index d75456adc62a..100339bc8b04 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1594,7 +1594,7 @@ static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, if (zerocopy) return false; - if (SKB_DATA_ALIGN(len + TUN_RX_PAD) + + if (SKB_DATA_ALIGN(len + TUN_RX_PAD + XDP_PACKET_HEADROOM) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) return false; @@ -3469,7 +3469,7 @@ static int tun_chr_open(struct inode *inode, struct file * file) tfile->socket.file = file; tfile->socket.ops = &tun_socket_ops; - sock_init_data_uid(&tfile->socket, &tfile->sk, inode->i_uid); + sock_init_data_uid(&tfile->socket, &tfile->sk, current_fsuid()); tfile->sk.sk_write_space = tun_sock_write_space; tfile->sk.sk_sndbuf = INT_MAX; diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index c00a89b24df9..6d61052353f0 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -618,6 +618,13 @@ static const struct usb_device_id products[] = { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, + .idProduct = 0x8005, /* A-300 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = 0, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, .idProduct = 0x8006, /* B-500/SL-5600 */ ZAURUS_MASTER_INTERFACE, .driver_info = 0, @@ -625,11 +632,25 @@ static const struct usb_device_id products[] = { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, + .idProduct = 0x8006, /* B-500/SL-5600 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = 0, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, .idProduct = 0x8007, /* C-700 */ ZAURUS_MASTER_INTERFACE, .driver_info = 0, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x8007, /* C-700 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = 0, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, .idProduct = 0x9031, /* C-750 C-760 */ diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index c458c030fadf..59cde06aa7f6 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -4224,8 +4224,6 @@ static void lan78xx_disconnect(struct usb_interface *intf) if (!dev) return; - set_bit(EVENT_DEV_DISCONNECT, &dev->flags); - netif_napi_del(&dev->napi); udev = interface_to_usbdev(intf); @@ -4233,6 +4231,8 @@ static void lan78xx_disconnect(struct usb_interface *intf) unregister_netdev(net); + timer_shutdown_sync(&dev->stat_monitor); + set_bit(EVENT_DEV_DISCONNECT, &dev->flags); cancel_delayed_work_sync(&dev->wq); phydev = net->phydev; @@ -4247,9 +4247,6 @@ static void lan78xx_disconnect(struct usb_interface *intf) usb_scuttle_anchored_urbs(&dev->deferred); - if (timer_pending(&dev->stat_monitor)) - del_timer_sync(&dev->stat_monitor); - lan78xx_unbind(dev, intf); lan78xx_free_tx_resources(dev); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 417f7ea1fffa..344af3c5c836 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1423,6 +1423,7 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)}, /* Quectel EG95 */ {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ + {QMI_QUIRK_SET_DTR(0x2c7c, 0x030e, 4)}, /* Quectel EM05GV2 */ {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */ {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */ {QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/ diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c index 7984f2157d22..df3617c4c44e 100644 --- a/drivers/net/usb/zaurus.c +++ b/drivers/net/usb/zaurus.c @@ -289,11 +289,25 @@ static const struct usb_device_id products [] = { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, + .idProduct = 0x8005, /* A-300 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = (unsigned long)&bogus_mdlm_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, .idProduct = 0x8006, /* B-500/SL-5600 */ ZAURUS_MASTER_INTERFACE, .driver_info = ZAURUS_PXA_INFO, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x8006, /* B-500/SL-5600 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = (unsigned long)&bogus_mdlm_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, .idProduct = 0x8007, /* C-700 */ @@ -301,6 +315,13 @@ static const struct usb_device_id products [] = { .driver_info = ZAURUS_PXA_INFO, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x8007, /* C-700 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = (unsigned long)&bogus_mdlm_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, .idProduct = 0x9031, /* C-750 C-760 */ diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c index a3de081cda5e..c3ff30ab782e 100644 --- a/drivers/net/vxlan/vxlan_vnifilter.c +++ b/drivers/net/vxlan/vxlan_vnifilter.c @@ -713,6 +713,12 @@ static struct vxlan_vni_node *vxlan_vni_alloc(struct vxlan_dev *vxlan, return vninode; } +static void vxlan_vni_free(struct vxlan_vni_node *vninode) +{ + free_percpu(vninode->stats); + kfree(vninode); +} + static int vxlan_vni_add(struct vxlan_dev *vxlan, struct vxlan_vni_group *vg, u32 vni, union vxlan_addr *group, @@ -740,7 +746,7 @@ static int vxlan_vni_add(struct vxlan_dev *vxlan, &vninode->vnode, vxlan_vni_rht_params); if (err) { - kfree(vninode); + vxlan_vni_free(vninode); return err; } @@ -763,8 +769,7 @@ static void vxlan_vni_node_rcu_free(struct rcu_head *rcu) struct vxlan_vni_node *v; v = container_of(rcu, struct vxlan_vni_node, rcu); - free_percpu(v->stats); - kfree(v); + vxlan_vni_free(v); } static int vxlan_vni_del(struct vxlan_dev *vxlan, diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c index 5bf7822c53f1..0ba714ca5185 100644 --- a/drivers/net/wireguard/allowedips.c +++ b/drivers/net/wireguard/allowedips.c @@ -6,7 +6,7 @@ #include "allowedips.h" #include "peer.h" -enum { MAX_ALLOWEDIPS_BITS = 128 }; +enum { MAX_ALLOWEDIPS_DEPTH = 129 }; static struct kmem_cache *node_cache; @@ -42,7 +42,7 @@ static void push_rcu(struct allowedips_node **stack, struct allowedips_node __rcu *p, unsigned int *len) { if (rcu_access_pointer(p)) { - if (WARN_ON(IS_ENABLED(DEBUG) && *len >= MAX_ALLOWEDIPS_BITS)) + if (WARN_ON(IS_ENABLED(DEBUG) && *len >= MAX_ALLOWEDIPS_DEPTH)) return; stack[(*len)++] = rcu_dereference_raw(p); } @@ -55,7 +55,7 @@ static void node_free_rcu(struct rcu_head *rcu) static void root_free_rcu(struct rcu_head *rcu) { - struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_BITS] = { + struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_DEPTH] = { container_of(rcu, struct allowedips_node, rcu) }; unsigned int len = 1; @@ -68,7 +68,7 @@ static void root_free_rcu(struct rcu_head *rcu) static void root_remove_peer_lists(struct allowedips_node *root) { - struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_BITS] = { root }; + struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_DEPTH] = { root }; unsigned int len = 1; while (len > 0 && (node = stack[--len])) { diff --git a/drivers/net/wireguard/selftest/allowedips.c b/drivers/net/wireguard/selftest/allowedips.c index 78ebe2892a78..3d1f64ff2e12 100644 --- a/drivers/net/wireguard/selftest/allowedips.c +++ b/drivers/net/wireguard/selftest/allowedips.c @@ -593,16 +593,20 @@ bool __init wg_allowedips_selftest(void) wg_allowedips_remove_by_peer(&t, a, &mutex); test_negative(4, a, 192, 168, 0, 1); - /* These will hit the WARN_ON(len >= MAX_ALLOWEDIPS_BITS) in free_node + /* These will hit the WARN_ON(len >= MAX_ALLOWEDIPS_DEPTH) in free_node * if something goes wrong. */ - for (i = 0; i < MAX_ALLOWEDIPS_BITS; ++i) { - part = cpu_to_be64(~(1LLU << (i % 64))); - memset(&ip, 0xff, 16); - memcpy((u8 *)&ip + (i < 64) * 8, &part, 8); + for (i = 0; i < 64; ++i) { + part = cpu_to_be64(~0LLU << i); + memset(&ip, 0xff, 8); + memcpy((u8 *)&ip + 8, &part, 8); + wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex); + memcpy(&ip, &part, 8); + memset((u8 *)&ip + 8, 0, 8); wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex); } - + memset(&ip, 0, 16); + wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex); wg_allowedips_free(&t, &mutex); wg_allowedips_init(&t); diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c index 1cebba7889d7..139da578831a 100644 --- a/drivers/net/wireless/ath/ath11k/ahb.c +++ b/drivers/net/wireless/ath/ath11k/ahb.c @@ -376,7 +376,6 @@ static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab) struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; if (!irq_grp->napi_enabled) { - dev_set_threaded(&irq_grp->napi_ndev, true); napi_enable(&irq_grp->napi); irq_grp->napi_enabled = true; } diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c index c899616fbee4..c63083633b37 100644 --- a/drivers/net/wireless/ath/ath11k/pcic.c +++ b/drivers/net/wireless/ath/ath11k/pcic.c @@ -466,7 +466,6 @@ void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab) struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; if (!irq_grp->napi_enabled) { - dev_set_threaded(&irq_grp->napi_ndev, true); napi_enable(&irq_grp->napi); irq_grp->napi_enabled = true; } diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c index 6512267ae4ca..4928e4e91660 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.c +++ b/drivers/net/wireless/ath/ath12k/wmi.c @@ -2144,8 +2144,7 @@ int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar, struct wmi_tlv *tlv; void *ptr; int i, ret, len; - u32 *tmp_ptr; - u8 extraie_len_with_pad = 0; + u32 *tmp_ptr, extraie_len_with_pad = 0; struct ath12k_wmi_hint_short_ssid_arg *s_ssid = NULL; struct ath12k_wmi_hint_bssid_arg *hint_bssid = NULL; diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile index a75bfa9fd1cf..dc2b3b46781e 100644 --- a/drivers/net/wireless/ath/ath6kl/Makefile +++ b/drivers/net/wireless/ath/ath6kl/Makefile @@ -36,11 +36,6 @@ ath6kl_core-y += wmi.o ath6kl_core-y += core.o ath6kl_core-y += recovery.o -# FIXME: temporarily silence -Wdangling-pointer on non W=1+ builds -ifndef KBUILD_EXTRA_WARN -CFLAGS_htc_mbox.o += $(call cc-disable-warning, dangling-pointer) -endif - ath6kl_core-$(CONFIG_NL80211_TESTMODE) += testmode.o ath6kl_core-$(CONFIG_ATH6KL_TRACING) += trace.o diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index de8a2e27f49c..2a90bb24ba77 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -1456,6 +1456,10 @@ brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp, params_size -= BRCMF_SCAN_PARAMS_V2_FIXED_SIZE; params_size += BRCMF_SCAN_PARAMS_FIXED_SIZE; params_v1 = kzalloc(params_size, GFP_KERNEL); + if (!params_v1) { + err = -ENOMEM; + goto exit_params; + } params_v1->version = cpu_to_le32(BRCMF_ESCAN_REQ_VERSION); brcmf_scan_params_v2_to_v1(¶ms->params_v2_le, ¶ms_v1->params_le); kfree(params); @@ -1473,6 +1477,7 @@ brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp, bphy_err(drvr, "error (%d)\n", err); } +exit_params: kfree(params); exit: return err; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h index 792adaf880b4..bece26741d3a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h @@ -398,7 +398,12 @@ struct brcmf_scan_params_le { * fixed parameter portion is assumed, otherwise * ssid in the fixed portion is ignored */ - __le16 channel_list[1]; /* list of chanspecs */ + union { + __le16 padding; /* Reserve space for at least 1 entry for abort + * which uses an on stack brcmf_scan_params_le + */ + DECLARE_FLEX_ARRAY(__le16, channel_list); /* chanspecs */ + }; }; struct brcmf_scan_params_v2_le { diff --git a/drivers/net/wireless/legacy/rayctl.h b/drivers/net/wireless/legacy/rayctl.h index 2b0f332043d7..1f3bde8ac73d 100644 --- a/drivers/net/wireless/legacy/rayctl.h +++ b/drivers/net/wireless/legacy/rayctl.h @@ -577,7 +577,7 @@ struct tx_msg { struct tib_structure tib; struct phy_header phy; struct mac_header mac; - UCHAR var[1]; + UCHAR var[]; }; /****** ECF Receive Control Structure (RCS) Area at Shared RAM offset 0x0800 */ diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c index 68e88224b8b1..ccedea7e8a50 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c @@ -128,12 +128,12 @@ mt7615_eeprom_parse_hw_band_cap(struct mt7615_dev *dev) case MT_EE_5GHZ: dev->mphy.cap.has_5ghz = true; break; - case MT_EE_2GHZ: - dev->mphy.cap.has_2ghz = true; - break; case MT_EE_DBDC: dev->dbdc_support = true; fallthrough; + case MT_EE_2GHZ: + dev->mphy.cap.has_2ghz = true; + break; default: dev->mphy.cap.has_2ghz = true; dev->mphy.cap.has_5ghz = true; diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index 5e5c7bf51174..1584665fe3cb 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -286,8 +286,7 @@ static bool mt76u_check_sg(struct mt76_dev *dev) struct usb_device *udev = interface_to_usbdev(uintf); return (!disable_usb_sg && udev->bus->sg_tablesize > 0 && - (udev->bus->no_sg_constraint || - udev->speed == USB_SPEED_WIRELESS)); + udev->bus->no_sg_constraint); } static int diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index b114babec698..c93e6250cb8b 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -2524,7 +2524,7 @@ static int cmac_dma_init(struct rtw89_dev *rtwdev, u8 mac_idx) u32 reg; int ret; - if (chip_id != RTL8852A && chip_id != RTL8852B) + if (chip_id != RTL8852B) return 0; ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index c8d20cddf658..88f760a7cbc3 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -396,7 +396,7 @@ static void xenvif_get_requests(struct xenvif_queue *queue, struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops; struct xen_netif_tx_request *txp = first; - nr_slots = shinfo->nr_frags + 1; + nr_slots = shinfo->nr_frags + frag_overflow + 1; copy_count(skb) = 0; XENVIF_TX_CB(skb)->split_mask = 0; @@ -462,8 +462,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue, } } - for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; - shinfo->nr_frags++, gop++) { + for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS; + shinfo->nr_frags++, gop++, nr_slots--) { index = pending_index(queue->pending_cons++); pending_idx = queue->pending_ring[index]; xenvif_tx_create_map_op(queue, pending_idx, txp, @@ -476,12 +476,12 @@ static void xenvif_get_requests(struct xenvif_queue *queue, txp++; } - if (frag_overflow) { + if (nr_slots > 0) { shinfo = skb_shinfo(nskb); frags = shinfo->frags; - for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow; + for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; shinfo->nr_frags++, txp++, gop++) { index = pending_index(queue->pending_cons++); pending_idx = queue->pending_ring[index]; @@ -492,6 +492,11 @@ static void xenvif_get_requests(struct xenvif_queue *queue, } skb_shinfo(skb)->frag_list = nskb; + } else if (nskb) { + /* A frag_list skb was allocated but it is no longer needed + * because enough slots were converted to copy ops above. + */ + kfree_skb(nskb); } (*copy_ops) = cop - queue->tx_copy_ops; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 37b6fa746662..f3a01b79148c 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3933,6 +3933,12 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl) */ nvme_mpath_clear_ctrl_paths(ctrl); + /* + * Unquiesce io queues so any pending IO won't hang, especially + * those submitted from scan work + */ + nvme_unquiesce_io_queues(ctrl); + /* prevent racing with ns scanning */ flush_work(&ctrl->scan_work); @@ -3942,10 +3948,8 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl) * removing the namespaces' disks; fail all the queues now to avoid * potentially having to clean up the failed sync later. */ - if (ctrl->state == NVME_CTRL_DEAD) { + if (ctrl->state == NVME_CTRL_DEAD) nvme_mark_namespaces_dead(ctrl); - nvme_unquiesce_io_queues(ctrl); - } /* this is a no-op when called from the controller reset handler */ nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO); diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 5c3250f36ce7..d39f3219358b 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -786,11 +786,9 @@ int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd, if (!(ioucmd->flags & IORING_URING_CMD_POLLED)) return 0; - rcu_read_lock(); req = READ_ONCE(ioucmd->cookie); if (req && blk_rq_is_poll(req)) ret = blk_rq_poll(req, iob, poll_flags); - rcu_read_unlock(); return ret; } #ifdef CONFIG_NVME_MULTIPATH diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index baf69af7ea78..2f57da12d983 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -3402,7 +3402,8 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x144d, 0xa80b), /* Samsung PM9B1 256G and 512G */ - .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES | + NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x144d, 0xa809), /* Samsung MZALQ256HBJD 256G */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x144d, 0xa802), /* Samsung SM953 */ diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index d433b2ec07a6..337a624a537c 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -883,6 +883,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) goto out_cleanup_tagset; if (!new) { + nvme_start_freeze(&ctrl->ctrl); nvme_unquiesce_io_queues(&ctrl->ctrl); if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) { /* @@ -891,6 +892,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) * to be safe. */ ret = -ENODEV; + nvme_unfreeze(&ctrl->ctrl); goto out_wait_freeze_timed_out; } blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset, @@ -940,7 +942,6 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, bool remove) { if (ctrl->ctrl.queue_count > 1) { - nvme_start_freeze(&ctrl->ctrl); nvme_quiesce_io_queues(&ctrl->ctrl); nvme_sync_io_queues(&ctrl->ctrl); nvme_rdma_stop_io_queues(ctrl); diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 9ce417cd32a7..5b332d9f87fc 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1868,6 +1868,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) goto out_cleanup_connect_q; if (!new) { + nvme_start_freeze(ctrl); nvme_unquiesce_io_queues(ctrl); if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) { /* @@ -1876,6 +1877,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) * to be safe. */ ret = -ENODEV; + nvme_unfreeze(ctrl); goto out_wait_freeze_timed_out; } blk_mq_update_nr_hw_queues(ctrl->tagset, @@ -1980,7 +1982,6 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, if (ctrl->queue_count <= 1) return; nvme_quiesce_admin_queue(ctrl); - nvme_start_freeze(ctrl); nvme_quiesce_io_queues(ctrl); nvme_sync_io_queues(ctrl); nvme_tcp_stop_io_queues(ctrl); diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index bf3405f4289e..8b1dcd537020 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -121,6 +121,8 @@ module_param(sba_reserve_agpgart, int, 0444); MODULE_PARM_DESC(sba_reserve_agpgart, "Reserve half of IO pdir as AGPGART"); #endif +struct proc_dir_entry *proc_runway_root __ro_after_init; +struct proc_dir_entry *proc_mckinley_root __ro_after_init; /************************************ ** SBA register read and write support @@ -1968,11 +1970,15 @@ static int __init sba_driver_callback(struct parisc_device *dev) #ifdef CONFIG_PROC_FS switch (dev->id.hversion) { case PLUTO_MCKINLEY_PORT: + if (!proc_mckinley_root) + proc_mckinley_root = proc_mkdir("bus/mckinley", NULL); root = proc_mckinley_root; break; case ASTRO_RUNWAY_PORT: case IKE_MERCED_PORT: default: + if (!proc_runway_root) + proc_runway_root = proc_mkdir("bus/runway", NULL); root = proc_runway_root; break; } diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c index 0dcc497b0449..5e4475254bd0 100644 --- a/drivers/parport/parport_gsc.c +++ b/drivers/parport/parport_gsc.c @@ -28,7 +28,6 @@ #include <linux/sysctl.h> #include <asm/io.h> -#include <asm/dma.h> #include <linux/uaccess.h> #include <asm/superio.h> @@ -226,9 +225,9 @@ static int parport_PS2_supported(struct parport *pb) /* --- Initialisation code -------------------------------- */ -struct parport *parport_gsc_probe_port(unsigned long base, +static struct parport *parport_gsc_probe_port(unsigned long base, unsigned long base_hi, int irq, - int dma, struct parisc_device *padev) + struct parisc_device *padev) { struct parport_gsc_private *priv; struct parport_operations *ops; @@ -250,12 +249,9 @@ struct parport *parport_gsc_probe_port(unsigned long base, } priv->ctr = 0xc; priv->ctr_writable = 0xff; - priv->dma_buf = NULL; - priv->dma_handle = 0; p->base = base; p->base_hi = base_hi; p->irq = irq; - p->dma = dma; p->modes = PARPORT_MODE_PCSPP | PARPORT_MODE_SAFEININT; p->ops = ops; p->private_data = priv; @@ -286,17 +282,9 @@ struct parport *parport_gsc_probe_port(unsigned long base, if (p->irq == PARPORT_IRQ_AUTO) { p->irq = PARPORT_IRQ_NONE; } - if (p->irq != PARPORT_IRQ_NONE) { + if (p->irq != PARPORT_IRQ_NONE) pr_cont(", irq %d", p->irq); - if (p->dma == PARPORT_DMA_AUTO) { - p->dma = PARPORT_DMA_NONE; - } - } - if (p->dma == PARPORT_DMA_AUTO) /* To use DMA, giving the irq - is mandatory (see above) */ - p->dma = PARPORT_DMA_NONE; - pr_cont(" ["); #define printmode(x) \ do { \ @@ -321,7 +309,6 @@ do { \ pr_warn("%s: irq %d in use, resorting to polled operation\n", p->name, p->irq); p->irq = PARPORT_IRQ_NONE; - p->dma = PARPORT_DMA_NONE; } } @@ -369,8 +356,7 @@ static int __init parport_init_chip(struct parisc_device *dev) pr_info("%s: enhanced parport-modes not supported\n", __func__); } - p = parport_gsc_probe_port(port, 0, dev->irq, - /* PARPORT_IRQ_NONE */ PARPORT_DMA_NONE, dev); + p = parport_gsc_probe_port(port, 0, dev->irq, dev); if (p) parport_count++; dev_set_drvdata(&dev->dev, p); @@ -382,16 +368,10 @@ static void __exit parport_remove_chip(struct parisc_device *dev) { struct parport *p = dev_get_drvdata(&dev->dev); if (p) { - struct parport_gsc_private *priv = p->private_data; struct parport_operations *ops = p->ops; parport_remove_port(p); - if (p->dma != PARPORT_DMA_NONE) - free_dma(p->dma); if (p->irq != PARPORT_IRQ_NONE) free_irq(p->irq, p); - if (priv->dma_buf) - dma_free_coherent(&priv->dev->dev, PAGE_SIZE, - priv->dma_buf, priv->dma_handle); kfree (p->private_data); parport_put_port(p); kfree (ops); /* hope no-one cached it */ diff --git a/drivers/parport/parport_gsc.h b/drivers/parport/parport_gsc.h index 9301217edf12..d447a568c257 100644 --- a/drivers/parport/parport_gsc.h +++ b/drivers/parport/parport_gsc.h @@ -63,8 +63,6 @@ struct parport_gsc_private { int writeIntrThreshold; /* buffer suitable for DMA, if DMA enabled */ - char *dma_buf; - dma_addr_t dma_handle; struct pci_dev *dev; }; @@ -199,9 +197,4 @@ extern void parport_gsc_inc_use_count(void); extern void parport_gsc_dec_use_count(void); -extern struct parport *parport_gsc_probe_port(unsigned long base, - unsigned long base_hi, - int irq, int dma, - struct parisc_device *padev); - #endif /* __DRIVERS_PARPORT_PARPORT_GSC_H */ diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 5bc81cc0a2de..46b252bbe500 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -11,6 +11,7 @@ #include <linux/pci.h> #include <linux/errno.h> #include <linux/ioport.h> +#include <linux/of.h> #include <linux/proc_fs.h> #include <linux/slab.h> @@ -332,6 +333,7 @@ void __weak pcibios_bus_add_device(struct pci_dev *pdev) { } */ void pci_bus_add_device(struct pci_dev *dev) { + struct device_node *dn = dev->dev.of_node; int retval; /* @@ -344,7 +346,7 @@ void pci_bus_add_device(struct pci_dev *dev) pci_proc_attach_device(dev); pci_bridge_d3_update(dev); - dev->match_driver = true; + dev->match_driver = !dn || of_device_is_available(dn); retval = device_attach(&dev->dev); if (retval < 0 && retval != -EPROBE_DEFER) pci_warn(dev, "device attach failed (%d)\n", retval); diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index 8d49bad7f847..0859be86e718 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -179,7 +179,6 @@ config PCI_MVEBU depends on MVEBU_MBUS depends on ARM depends on OF - depends on BROKEN select PCI_BRIDGE_EMUL help Add support for Marvell EBU PCIe controller. This PCIe controller diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index cf61733bf78d..9952057c8819 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -485,20 +485,15 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp) if (ret) goto err_remove_edma; - if (dw_pcie_link_up(pci)) { - dw_pcie_print_link_status(pci); - } else { + if (!dw_pcie_link_up(pci)) { ret = dw_pcie_start_link(pci); if (ret) goto err_remove_edma; - - if (pci->ops && pci->ops->start_link) { - ret = dw_pcie_wait_for_link(pci); - if (ret) - goto err_stop_link; - } } + /* Ignore errors, the link may come up later */ + dw_pcie_wait_for_link(pci); + bridge->sysdata = pp; ret = pci_host_probe(bridge); diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index c87848cd8686..1f2ee71da4da 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -644,20 +644,9 @@ void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index) dw_pcie_writel_atu(pci, dir, index, PCIE_ATU_REGION_CTRL2, 0); } -void dw_pcie_print_link_status(struct dw_pcie *pci) -{ - u32 offset, val; - - offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); - val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA); - - dev_info(pci->dev, "PCIe Gen.%u x%u link up\n", - FIELD_GET(PCI_EXP_LNKSTA_CLS, val), - FIELD_GET(PCI_EXP_LNKSTA_NLW, val)); -} - int dw_pcie_wait_for_link(struct dw_pcie *pci) { + u32 offset, val; int retries; /* Check if the link is up or not */ @@ -673,7 +662,12 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci) return -ETIMEDOUT; } - dw_pcie_print_link_status(pci); + offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); + val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA); + + dev_info(pci->dev, "PCIe Gen.%u x%u link up\n", + FIELD_GET(PCI_EXP_LNKSTA_CLS, val), + FIELD_GET(PCI_EXP_LNKSTA_NLW, val)); return 0; } diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index 615660640801..79713ce075cc 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -429,7 +429,6 @@ void dw_pcie_setup(struct dw_pcie *pci); void dw_pcie_iatu_detect(struct dw_pcie *pci); int dw_pcie_edma_detect(struct dw_pcie *pci); void dw_pcie_edma_remove(struct dw_pcie *pci); -void dw_pcie_print_link_status(struct dw_pcie *pci); static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val) { diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 328d1e416014..601129772b2d 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -498,6 +498,7 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge) acpiphp_native_scan_bridge(dev); } } else { + LIST_HEAD(add_list); int max, pass; acpiphp_rescan_slot(slot); @@ -511,10 +512,15 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge) if (pass && dev->subordinate) { check_hotplug_bridge(slot, dev); pcibios_resource_survey_bus(dev->subordinate); + if (pci_is_root_bus(bus)) + __pci_bus_size_bridges(dev->subordinate, &add_list); } } } - pci_assign_unassigned_bridge_resources(bus->self); + if (pci_is_root_bus(bus)) + __pci_bus_assign_resources(bus, &add_list, NULL); + else + pci_assign_unassigned_bridge_resources(bus->self); } acpiphp_sanitize_bus(bus); diff --git a/drivers/pci/of.c b/drivers/pci/of.c index e51219f9f523..3c158b17dcb5 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c @@ -34,11 +34,6 @@ int pci_set_of_node(struct pci_dev *dev) if (!node) return 0; - if (!of_device_is_available(node)) { - of_node_put(node); - return -ENODEV; - } - device_set_node(&dev->dev, of_fwnode_handle(node)); return 0; } diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index 8dba9596408f..aac670b90589 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -87,6 +87,7 @@ source "drivers/phy/motorola/Kconfig" source "drivers/phy/mscc/Kconfig" source "drivers/phy/qualcomm/Kconfig" source "drivers/phy/ralink/Kconfig" +source "drivers/phy/realtek/Kconfig" source "drivers/phy/renesas/Kconfig" source "drivers/phy/rockchip/Kconfig" source "drivers/phy/samsung/Kconfig" diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile index 54f312c10a40..ba7c100b14fc 100644 --- a/drivers/phy/Makefile +++ b/drivers/phy/Makefile @@ -26,6 +26,7 @@ obj-y += allwinner/ \ mscc/ \ qualcomm/ \ ralink/ \ + realtek/ \ renesas/ \ rockchip/ \ samsung/ \ diff --git a/drivers/phy/realtek/Kconfig b/drivers/phy/realtek/Kconfig new file mode 100644 index 000000000000..650e20ed69af --- /dev/null +++ b/drivers/phy/realtek/Kconfig @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Phy drivers for Realtek platforms +# +config PHY_RTK_RTD_USB2PHY + tristate "Realtek RTD USB2 PHY Transceiver Driver" + depends on USB_SUPPORT + select GENERIC_PHY + select USB_PHY + select USB_COMMON + help + Enable this to support Realtek SoC USB2 phy transceiver. + The DHC (digital home center) RTD series SoCs used the Synopsys + DWC3 USB IP. This driver will do the PHY initialization + of the parameters. + +config PHY_RTK_RTD_USB3PHY + tristate "Realtek RTD USB3 PHY Transceiver Driver" + depends on USB_SUPPORT + select GENERIC_PHY + select USB_PHY + select USB_COMMON + help + Enable this to support Realtek SoC USB3 phy transceiver. + The DHC (digital home center) RTD series SoCs used the Synopsys + DWC3 USB IP. This driver will do the PHY initialization + of the parameters. diff --git a/drivers/phy/realtek/Makefile b/drivers/phy/realtek/Makefile new file mode 100644 index 000000000000..ed7b47ff8a26 --- /dev/null +++ b/drivers/phy/realtek/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_PHY_RTK_RTD_USB2PHY) += phy-rtk-usb2.o +obj-$(CONFIG_PHY_RTK_RTD_USB3PHY) += phy-rtk-usb3.o diff --git a/drivers/phy/realtek/phy-rtk-usb2.c b/drivers/phy/realtek/phy-rtk-usb2.c new file mode 100644 index 000000000000..5e7ee060b404 --- /dev/null +++ b/drivers/phy/realtek/phy-rtk-usb2.c @@ -0,0 +1,1331 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * phy-rtk-usb2.c RTK usb2.0 PHY driver + * + * Copyright (C) 2023 Realtek Semiconductor Corporation + * + */ + +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_address.h> +#include <linux/uaccess.h> +#include <linux/debugfs.h> +#include <linux/nvmem-consumer.h> +#include <linux/regmap.h> +#include <linux/sys_soc.h> +#include <linux/mfd/syscon.h> +#include <linux/phy/phy.h> +#include <linux/usb.h> +#include <linux/usb/phy.h> +#include <linux/usb/hcd.h> + +/* GUSB2PHYACCn register */ +#define PHY_NEW_REG_REQ BIT(25) +#define PHY_VSTS_BUSY BIT(23) +#define PHY_VCTRL_SHIFT 8 +#define PHY_REG_DATA_MASK 0xff + +#define GET_LOW_NIBBLE(addr) ((addr) & 0x0f) +#define GET_HIGH_NIBBLE(addr) (((addr) & 0xf0) >> 4) + +#define EFUS_USB_DC_CAL_RATE 2 +#define EFUS_USB_DC_CAL_MAX 7 + +#define EFUS_USB_DC_DIS_RATE 1 +#define EFUS_USB_DC_DIS_MAX 7 + +#define MAX_PHY_DATA_SIZE 20 +#define OFFEST_PHY_READ 0x20 + +#define MAX_USB_PHY_NUM 4 +#define MAX_USB_PHY_PAGE0_DATA_SIZE 16 +#define MAX_USB_PHY_PAGE1_DATA_SIZE 16 +#define MAX_USB_PHY_PAGE2_DATA_SIZE 8 + +#define SET_PAGE_OFFSET 0xf4 +#define SET_PAGE_0 0x9b +#define SET_PAGE_1 0xbb +#define SET_PAGE_2 0xdb + +#define PAGE_START 0xe0 +#define PAGE0_0XE4 0xe4 +#define PAGE0_0XE6 0xe6 +#define PAGE0_0XE7 0xe7 +#define PAGE1_0XE0 0xe0 +#define PAGE1_0XE2 0xe2 + +#define SENSITIVITY_CTRL (BIT(4) | BIT(5) | BIT(6)) +#define ENABLE_AUTO_SENSITIVITY_CALIBRATION BIT(2) +#define DEFAULT_DC_DRIVING_VALUE (0x8) +#define DEFAULT_DC_DISCONNECTION_VALUE (0x6) +#define HS_CLK_SELECT BIT(6) + +struct phy_reg { + void __iomem *reg_wrap_vstatus; + void __iomem *reg_gusb2phyacc0; + int vstatus_index; +}; + +struct phy_data { + u8 addr; + u8 data; +}; + +struct phy_cfg { + int page0_size; + struct phy_data page0[MAX_USB_PHY_PAGE0_DATA_SIZE]; + int page1_size; + struct phy_data page1[MAX_USB_PHY_PAGE1_DATA_SIZE]; + int page2_size; + struct phy_data page2[MAX_USB_PHY_PAGE2_DATA_SIZE]; + + int num_phy; + + bool check_efuse; + int check_efuse_version; +#define CHECK_EFUSE_V1 1 +#define CHECK_EFUSE_V2 2 + int efuse_dc_driving_rate; + int efuse_dc_disconnect_rate; + int dc_driving_mask; + int dc_disconnect_mask; + bool usb_dc_disconnect_at_page0; + int driving_updated_for_dev_dis; + + bool do_toggle; + bool do_toggle_driving; + bool use_default_parameter; + bool is_double_sensitivity_mode; +}; + +struct phy_parameter { + struct phy_reg phy_reg; + + /* Get from efuse */ + s8 efuse_usb_dc_cal; + s8 efuse_usb_dc_dis; + + /* Get from dts */ + bool inverse_hstx_sync_clock; + u32 driving_level; + s32 driving_level_compensate; + s32 disconnection_compensate; +}; + +struct rtk_phy { + struct usb_phy phy; + struct device *dev; + + struct phy_cfg *phy_cfg; + int num_phy; + struct phy_parameter *phy_parameter; + + struct dentry *debug_dir; +}; + +/* mapping 0xE0 to 0 ... 0xE7 to 7, 0xF0 to 8 ,,, 0xF7 to 15 */ +static inline int page_addr_to_array_index(u8 addr) +{ + return (int)((((addr) - PAGE_START) & 0x7) + + ((((addr) - PAGE_START) & 0x10) >> 1)); +} + +static inline u8 array_index_to_page_addr(int index) +{ + return ((((index) + PAGE_START) & 0x7) + + ((((index) & 0x8) << 1) + PAGE_START)); +} + +#define PHY_IO_TIMEOUT_USEC (50000) +#define PHY_IO_DELAY_US (100) + +static inline int utmi_wait_register(void __iomem *reg, u32 mask, u32 result) +{ + int ret; + unsigned int val; + + ret = read_poll_timeout(readl, val, ((val & mask) == result), + PHY_IO_DELAY_US, PHY_IO_TIMEOUT_USEC, false, reg); + if (ret) { + pr_err("%s can't program USB phy\n", __func__); + return -ETIMEDOUT; + } + + return 0; +} + +static char rtk_phy_read(struct phy_reg *phy_reg, char addr) +{ + void __iomem *reg_gusb2phyacc0 = phy_reg->reg_gusb2phyacc0; + unsigned int val; + int ret = 0; + + addr -= OFFEST_PHY_READ; + + /* polling until VBusy == 0 */ + ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0); + if (ret) + return (char)ret; + + /* VCtrl = low nibble of addr, and set PHY_NEW_REG_REQ */ + val = PHY_NEW_REG_REQ | (GET_LOW_NIBBLE(addr) << PHY_VCTRL_SHIFT); + writel(val, reg_gusb2phyacc0); + ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0); + if (ret) + return (char)ret; + + /* VCtrl = high nibble of addr, and set PHY_NEW_REG_REQ */ + val = PHY_NEW_REG_REQ | (GET_HIGH_NIBBLE(addr) << PHY_VCTRL_SHIFT); + writel(val, reg_gusb2phyacc0); + ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0); + if (ret) + return (char)ret; + + val = readl(reg_gusb2phyacc0); + + return (char)(val & PHY_REG_DATA_MASK); +} + +static int rtk_phy_write(struct phy_reg *phy_reg, char addr, char data) +{ + unsigned int val; + void __iomem *reg_wrap_vstatus = phy_reg->reg_wrap_vstatus; + void __iomem *reg_gusb2phyacc0 = phy_reg->reg_gusb2phyacc0; + int shift_bits = phy_reg->vstatus_index * 8; + int ret = 0; + + /* write data to VStatusOut2 (data output to phy) */ + writel((u32)data << shift_bits, reg_wrap_vstatus); + + ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0); + if (ret) + return ret; + + /* VCtrl = low nibble of addr, set PHY_NEW_REG_REQ */ + val = PHY_NEW_REG_REQ | (GET_LOW_NIBBLE(addr) << PHY_VCTRL_SHIFT); + + writel(val, reg_gusb2phyacc0); + ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0); + if (ret) + return ret; + + /* VCtrl = high nibble of addr, set PHY_NEW_REG_REQ */ + val = PHY_NEW_REG_REQ | (GET_HIGH_NIBBLE(addr) << PHY_VCTRL_SHIFT); + + writel(val, reg_gusb2phyacc0); + ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0); + if (ret) + return ret; + + return 0; +} + +static int rtk_phy_set_page(struct phy_reg *phy_reg, int page) +{ + switch (page) { + case 0: + return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_0); + case 1: + return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_1); + case 2: + return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_2); + default: + pr_err("%s error page=%d\n", __func__, page); + } + + return -EINVAL; +} + +static u8 __updated_dc_disconnect_level_page0_0xe4(struct phy_cfg *phy_cfg, + struct phy_parameter *phy_parameter, u8 data) +{ + u8 ret; + s32 val; + s32 dc_disconnect_mask = phy_cfg->dc_disconnect_mask; + int offset = 4; + + val = (s32)((data >> offset) & dc_disconnect_mask) + + phy_parameter->efuse_usb_dc_dis + + phy_parameter->disconnection_compensate; + + if (val > dc_disconnect_mask) + val = dc_disconnect_mask; + else if (val < 0) + val = 0; + + ret = (data & (~(dc_disconnect_mask << offset))) | + (val & dc_disconnect_mask) << offset; + + return ret; +} + +/* updated disconnect level at page0 */ +static void update_dc_disconnect_level_at_page0(struct rtk_phy *rtk_phy, + struct phy_parameter *phy_parameter, bool update) +{ + struct phy_cfg *phy_cfg; + struct phy_reg *phy_reg; + struct phy_data *phy_data_page; + struct phy_data *phy_data; + u8 addr, data; + int offset = 4; + s32 dc_disconnect_mask; + int i; + + phy_cfg = rtk_phy->phy_cfg; + phy_reg = &phy_parameter->phy_reg; + + /* Set page 0 */ + phy_data_page = phy_cfg->page0; + rtk_phy_set_page(phy_reg, 0); + + i = page_addr_to_array_index(PAGE0_0XE4); + phy_data = phy_data_page + i; + if (!phy_data->addr) { + phy_data->addr = PAGE0_0XE4; + phy_data->data = rtk_phy_read(phy_reg, PAGE0_0XE4); + } + + addr = phy_data->addr; + data = phy_data->data; + dc_disconnect_mask = phy_cfg->dc_disconnect_mask; + + if (update) + data = __updated_dc_disconnect_level_page0_0xe4(phy_cfg, phy_parameter, data); + else + data = (data & ~(dc_disconnect_mask << offset)) | + (DEFAULT_DC_DISCONNECTION_VALUE << offset); + + if (rtk_phy_write(phy_reg, addr, data)) + dev_err(rtk_phy->dev, + "%s: Error to set page1 parameter addr=0x%x value=0x%x\n", + __func__, addr, data); +} + +static u8 __updated_dc_disconnect_level_page1_0xe2(struct phy_cfg *phy_cfg, + struct phy_parameter *phy_parameter, u8 data) +{ + u8 ret; + s32 val; + s32 dc_disconnect_mask = phy_cfg->dc_disconnect_mask; + + if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) { + val = (s32)(data & dc_disconnect_mask) + + phy_parameter->efuse_usb_dc_dis + + phy_parameter->disconnection_compensate; + } else { /* for CHECK_EFUSE_V2 or no efuse */ + if (phy_parameter->efuse_usb_dc_dis) + val = (s32)(phy_parameter->efuse_usb_dc_dis + + phy_parameter->disconnection_compensate); + else + val = (s32)((data & dc_disconnect_mask) + + phy_parameter->disconnection_compensate); + } + + if (val > dc_disconnect_mask) + val = dc_disconnect_mask; + else if (val < 0) + val = 0; + + ret = (data & (~dc_disconnect_mask)) | (val & dc_disconnect_mask); + + return ret; +} + +/* updated disconnect level at page1 */ +static void update_dc_disconnect_level_at_page1(struct rtk_phy *rtk_phy, + struct phy_parameter *phy_parameter, bool update) +{ + struct phy_cfg *phy_cfg; + struct phy_data *phy_data_page; + struct phy_data *phy_data; + struct phy_reg *phy_reg; + u8 addr, data; + s32 dc_disconnect_mask; + int i; + + phy_cfg = rtk_phy->phy_cfg; + phy_reg = &phy_parameter->phy_reg; + + /* Set page 1 */ + phy_data_page = phy_cfg->page1; + rtk_phy_set_page(phy_reg, 1); + + i = page_addr_to_array_index(PAGE1_0XE2); + phy_data = phy_data_page + i; + if (!phy_data->addr) { + phy_data->addr = PAGE1_0XE2; + phy_data->data = rtk_phy_read(phy_reg, PAGE1_0XE2); + } + + addr = phy_data->addr; + data = phy_data->data; + dc_disconnect_mask = phy_cfg->dc_disconnect_mask; + + if (update) + data = __updated_dc_disconnect_level_page1_0xe2(phy_cfg, phy_parameter, data); + else + data = (data & ~dc_disconnect_mask) | DEFAULT_DC_DISCONNECTION_VALUE; + + if (rtk_phy_write(phy_reg, addr, data)) + dev_err(rtk_phy->dev, + "%s: Error to set page1 parameter addr=0x%x value=0x%x\n", + __func__, addr, data); +} + +static void update_dc_disconnect_level(struct rtk_phy *rtk_phy, + struct phy_parameter *phy_parameter, bool update) +{ + struct phy_cfg *phy_cfg = rtk_phy->phy_cfg; + + if (phy_cfg->usb_dc_disconnect_at_page0) + update_dc_disconnect_level_at_page0(rtk_phy, phy_parameter, update); + else + update_dc_disconnect_level_at_page1(rtk_phy, phy_parameter, update); +} + +static u8 __update_dc_driving_page0_0xe4(struct phy_cfg *phy_cfg, + struct phy_parameter *phy_parameter, u8 data) +{ + s32 driving_level_compensate = phy_parameter->driving_level_compensate; + s32 dc_driving_mask = phy_cfg->dc_driving_mask; + s32 val; + u8 ret; + + if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) { + val = (s32)(data & dc_driving_mask) + driving_level_compensate + + phy_parameter->efuse_usb_dc_cal; + } else { /* for CHECK_EFUSE_V2 or no efuse */ + if (phy_parameter->efuse_usb_dc_cal) + val = (s32)((phy_parameter->efuse_usb_dc_cal & dc_driving_mask) + + driving_level_compensate); + else + val = (s32)(data & dc_driving_mask); + } + + if (val > dc_driving_mask) + val = dc_driving_mask; + else if (val < 0) + val = 0; + + ret = (data & (~dc_driving_mask)) | (val & dc_driving_mask); + + return ret; +} + +static void update_dc_driving_level(struct rtk_phy *rtk_phy, + struct phy_parameter *phy_parameter) +{ + struct phy_cfg *phy_cfg; + struct phy_reg *phy_reg; + + phy_reg = &phy_parameter->phy_reg; + phy_cfg = rtk_phy->phy_cfg; + if (!phy_cfg->page0[4].addr) { + rtk_phy_set_page(phy_reg, 0); + phy_cfg->page0[4].addr = PAGE0_0XE4; + phy_cfg->page0[4].data = rtk_phy_read(phy_reg, PAGE0_0XE4); + } + + if (phy_parameter->driving_level != DEFAULT_DC_DRIVING_VALUE) { + u32 dc_driving_mask; + u8 driving_level; + u8 data; + + data = phy_cfg->page0[4].data; + dc_driving_mask = phy_cfg->dc_driving_mask; + driving_level = data & dc_driving_mask; + + dev_dbg(rtk_phy->dev, "%s driving_level=%d => dts driving_level=%d\n", + __func__, driving_level, phy_parameter->driving_level); + + phy_cfg->page0[4].data = (data & (~dc_driving_mask)) | + (phy_parameter->driving_level & dc_driving_mask); + } + + phy_cfg->page0[4].data = __update_dc_driving_page0_0xe4(phy_cfg, + phy_parameter, + phy_cfg->page0[4].data); +} + +static void update_hs_clk_select(struct rtk_phy *rtk_phy, + struct phy_parameter *phy_parameter) +{ + struct phy_cfg *phy_cfg; + struct phy_reg *phy_reg; + + phy_cfg = rtk_phy->phy_cfg; + phy_reg = &phy_parameter->phy_reg; + + if (phy_parameter->inverse_hstx_sync_clock) { + if (!phy_cfg->page0[6].addr) { + rtk_phy_set_page(phy_reg, 0); + phy_cfg->page0[6].addr = PAGE0_0XE6; + phy_cfg->page0[6].data = rtk_phy_read(phy_reg, PAGE0_0XE6); + } + + phy_cfg->page0[6].data = phy_cfg->page0[6].data | HS_CLK_SELECT; + } +} + +static void do_rtk_phy_toggle(struct rtk_phy *rtk_phy, + int index, bool connect) +{ + struct phy_parameter *phy_parameter; + struct phy_cfg *phy_cfg; + struct phy_reg *phy_reg; + struct phy_data *phy_data_page; + u8 addr, data; + int i; + + phy_cfg = rtk_phy->phy_cfg; + phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index]; + phy_reg = &phy_parameter->phy_reg; + + if (!phy_cfg->do_toggle) + goto out; + + if (phy_cfg->is_double_sensitivity_mode) + goto do_toggle_driving; + + /* Set page 0 */ + rtk_phy_set_page(phy_reg, 0); + + addr = PAGE0_0XE7; + data = rtk_phy_read(phy_reg, addr); + + if (connect) + rtk_phy_write(phy_reg, addr, data & (~SENSITIVITY_CTRL)); + else + rtk_phy_write(phy_reg, addr, data | (SENSITIVITY_CTRL)); + +do_toggle_driving: + + if (!phy_cfg->do_toggle_driving) + goto do_toggle; + + /* Page 0 addr 0xE4 driving capability */ + + /* Set page 0 */ + phy_data_page = phy_cfg->page0; + rtk_phy_set_page(phy_reg, 0); + + i = page_addr_to_array_index(PAGE0_0XE4); + addr = phy_data_page[i].addr; + data = phy_data_page[i].data; + + if (connect) { + rtk_phy_write(phy_reg, addr, data); + } else { + u8 value; + s32 tmp; + s32 driving_updated = + phy_cfg->driving_updated_for_dev_dis; + s32 dc_driving_mask = phy_cfg->dc_driving_mask; + + tmp = (s32)(data & dc_driving_mask) + driving_updated; + + if (tmp > dc_driving_mask) + tmp = dc_driving_mask; + else if (tmp < 0) + tmp = 0; + + value = (data & (~dc_driving_mask)) | (tmp & dc_driving_mask); + + rtk_phy_write(phy_reg, addr, value); + } + +do_toggle: + /* restore dc disconnect level before toggle */ + update_dc_disconnect_level(rtk_phy, phy_parameter, false); + + /* Set page 1 */ + rtk_phy_set_page(phy_reg, 1); + + addr = PAGE1_0XE0; + data = rtk_phy_read(phy_reg, addr); + + rtk_phy_write(phy_reg, addr, data & + (~ENABLE_AUTO_SENSITIVITY_CALIBRATION)); + mdelay(1); + rtk_phy_write(phy_reg, addr, data | + (ENABLE_AUTO_SENSITIVITY_CALIBRATION)); + + /* update dc disconnect level after toggle */ + update_dc_disconnect_level(rtk_phy, phy_parameter, true); + +out: + return; +} + +static int do_rtk_phy_init(struct rtk_phy *rtk_phy, int index) +{ + struct phy_parameter *phy_parameter; + struct phy_cfg *phy_cfg; + struct phy_data *phy_data_page; + struct phy_reg *phy_reg; + int i; + + phy_cfg = rtk_phy->phy_cfg; + phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index]; + phy_reg = &phy_parameter->phy_reg; + + if (phy_cfg->use_default_parameter) { + dev_dbg(rtk_phy->dev, "%s phy#%d use default parameter\n", + __func__, index); + goto do_toggle; + } + + /* Set page 0 */ + phy_data_page = phy_cfg->page0; + rtk_phy_set_page(phy_reg, 0); + + for (i = 0; i < phy_cfg->page0_size; i++) { + struct phy_data *phy_data = phy_data_page + i; + u8 addr = phy_data->addr; + u8 data = phy_data->data; + + if (!addr) + continue; + + if (rtk_phy_write(phy_reg, addr, data)) { + dev_err(rtk_phy->dev, + "%s: Error to set page0 parameter addr=0x%x value=0x%x\n", + __func__, addr, data); + return -EINVAL; + } + } + + /* Set page 1 */ + phy_data_page = phy_cfg->page1; + rtk_phy_set_page(phy_reg, 1); + + for (i = 0; i < phy_cfg->page1_size; i++) { + struct phy_data *phy_data = phy_data_page + i; + u8 addr = phy_data->addr; + u8 data = phy_data->data; + + if (!addr) + continue; + + if (rtk_phy_write(phy_reg, addr, data)) { + dev_err(rtk_phy->dev, + "%s: Error to set page1 parameter addr=0x%x value=0x%x\n", + __func__, addr, data); + return -EINVAL; + } + } + + if (phy_cfg->page2_size == 0) + goto do_toggle; + + /* Set page 2 */ + phy_data_page = phy_cfg->page2; + rtk_phy_set_page(phy_reg, 2); + + for (i = 0; i < phy_cfg->page2_size; i++) { + struct phy_data *phy_data = phy_data_page + i; + u8 addr = phy_data->addr; + u8 data = phy_data->data; + + if (!addr) + continue; + + if (rtk_phy_write(phy_reg, addr, data)) { + dev_err(rtk_phy->dev, + "%s: Error to set page2 parameter addr=0x%x value=0x%x\n", + __func__, addr, data); + return -EINVAL; + } + } + +do_toggle: + do_rtk_phy_toggle(rtk_phy, index, false); + + return 0; +} + +static int rtk_phy_init(struct phy *phy) +{ + struct rtk_phy *rtk_phy = phy_get_drvdata(phy); + unsigned long phy_init_time = jiffies; + int i, ret = 0; + + if (!rtk_phy) + return -EINVAL; + + for (i = 0; i < rtk_phy->num_phy; i++) + ret = do_rtk_phy_init(rtk_phy, i); + + dev_dbg(rtk_phy->dev, "Initialized RTK USB 2.0 PHY (take %dms)\n", + jiffies_to_msecs(jiffies - phy_init_time)); + return ret; +} + +static int rtk_phy_exit(struct phy *phy) +{ + return 0; +} + +static const struct phy_ops ops = { + .init = rtk_phy_init, + .exit = rtk_phy_exit, + .owner = THIS_MODULE, +}; + +static void rtk_phy_toggle(struct usb_phy *usb2_phy, bool connect, int port) +{ + int index = port; + struct rtk_phy *rtk_phy = NULL; + + rtk_phy = dev_get_drvdata(usb2_phy->dev); + + if (index > rtk_phy->num_phy) { + dev_err(rtk_phy->dev, "%s: The port=%d is not in usb phy (num_phy=%d)\n", + __func__, index, rtk_phy->num_phy); + return; + } + + do_rtk_phy_toggle(rtk_phy, index, connect); +} + +static int rtk_phy_notify_port_status(struct usb_phy *x, int port, + u16 portstatus, u16 portchange) +{ + bool connect = false; + + pr_debug("%s port=%d portstatus=0x%x portchange=0x%x\n", + __func__, port, (int)portstatus, (int)portchange); + if (portstatus & USB_PORT_STAT_CONNECTION) + connect = true; + + if (portchange & USB_PORT_STAT_C_CONNECTION) + rtk_phy_toggle(x, connect, port); + + return 0; +} + +#ifdef CONFIG_DEBUG_FS +static struct dentry *create_phy_debug_root(void) +{ + struct dentry *phy_debug_root; + + phy_debug_root = debugfs_lookup("phy", usb_debug_root); + if (!phy_debug_root) + phy_debug_root = debugfs_create_dir("phy", usb_debug_root); + + return phy_debug_root; +} + +static int rtk_usb2_parameter_show(struct seq_file *s, void *unused) +{ + struct rtk_phy *rtk_phy = s->private; + struct phy_cfg *phy_cfg; + int i, index; + + phy_cfg = rtk_phy->phy_cfg; + + seq_puts(s, "Property:\n"); + seq_printf(s, " check_efuse: %s\n", + phy_cfg->check_efuse ? "Enable" : "Disable"); + seq_printf(s, " check_efuse_version: %d\n", + phy_cfg->check_efuse_version); + seq_printf(s, " efuse_dc_driving_rate: %d\n", + phy_cfg->efuse_dc_driving_rate); + seq_printf(s, " dc_driving_mask: 0x%x\n", + phy_cfg->dc_driving_mask); + seq_printf(s, " efuse_dc_disconnect_rate: %d\n", + phy_cfg->efuse_dc_disconnect_rate); + seq_printf(s, " dc_disconnect_mask: 0x%x\n", + phy_cfg->dc_disconnect_mask); + seq_printf(s, " usb_dc_disconnect_at_page0: %s\n", + phy_cfg->usb_dc_disconnect_at_page0 ? "true" : "false"); + seq_printf(s, " do_toggle: %s\n", + phy_cfg->do_toggle ? "Enable" : "Disable"); + seq_printf(s, " do_toggle_driving: %s\n", + phy_cfg->do_toggle_driving ? "Enable" : "Disable"); + seq_printf(s, " driving_updated_for_dev_dis: 0x%x\n", + phy_cfg->driving_updated_for_dev_dis); + seq_printf(s, " use_default_parameter: %s\n", + phy_cfg->use_default_parameter ? "Enable" : "Disable"); + seq_printf(s, " is_double_sensitivity_mode: %s\n", + phy_cfg->is_double_sensitivity_mode ? "Enable" : "Disable"); + + for (index = 0; index < rtk_phy->num_phy; index++) { + struct phy_parameter *phy_parameter; + struct phy_reg *phy_reg; + struct phy_data *phy_data_page; + + phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index]; + phy_reg = &phy_parameter->phy_reg; + + seq_printf(s, "PHY %d:\n", index); + + seq_puts(s, "Page 0:\n"); + /* Set page 0 */ + phy_data_page = phy_cfg->page0; + rtk_phy_set_page(phy_reg, 0); + + for (i = 0; i < phy_cfg->page0_size; i++) { + struct phy_data *phy_data = phy_data_page + i; + u8 addr = array_index_to_page_addr(i); + u8 data = phy_data->data; + u8 value = rtk_phy_read(phy_reg, addr); + + if (phy_data->addr) + seq_printf(s, " Page 0: addr=0x%x data=0x%02x ==> read value=0x%02x\n", + addr, data, value); + else + seq_printf(s, " Page 0: addr=0x%x data=none ==> read value=0x%02x\n", + addr, value); + } + + seq_puts(s, "Page 1:\n"); + /* Set page 1 */ + phy_data_page = phy_cfg->page1; + rtk_phy_set_page(phy_reg, 1); + + for (i = 0; i < phy_cfg->page1_size; i++) { + struct phy_data *phy_data = phy_data_page + i; + u8 addr = array_index_to_page_addr(i); + u8 data = phy_data->data; + u8 value = rtk_phy_read(phy_reg, addr); + + if (phy_data->addr) + seq_printf(s, " Page 1: addr=0x%x data=0x%02x ==> read value=0x%02x\n", + addr, data, value); + else + seq_printf(s, " Page 1: addr=0x%x data=none ==> read value=0x%02x\n", + addr, value); + } + + if (phy_cfg->page2_size == 0) + goto out; + + seq_puts(s, "Page 2:\n"); + /* Set page 2 */ + phy_data_page = phy_cfg->page2; + rtk_phy_set_page(phy_reg, 2); + + for (i = 0; i < phy_cfg->page2_size; i++) { + struct phy_data *phy_data = phy_data_page + i; + u8 addr = array_index_to_page_addr(i); + u8 data = phy_data->data; + u8 value = rtk_phy_read(phy_reg, addr); + + if (phy_data->addr) + seq_printf(s, " Page 2: addr=0x%x data=0x%02x ==> read value=0x%02x\n", + addr, data, value); + else + seq_printf(s, " Page 2: addr=0x%x data=none ==> read value=0x%02x\n", + addr, value); + } + +out: + seq_puts(s, "PHY Property:\n"); + seq_printf(s, " efuse_usb_dc_cal: %d\n", + (int)phy_parameter->efuse_usb_dc_cal); + seq_printf(s, " efuse_usb_dc_dis: %d\n", + (int)phy_parameter->efuse_usb_dc_dis); + seq_printf(s, " inverse_hstx_sync_clock: %s\n", + phy_parameter->inverse_hstx_sync_clock ? "Enable" : "Disable"); + seq_printf(s, " driving_level: %d\n", + phy_parameter->driving_level); + seq_printf(s, " driving_level_compensate: %d\n", + phy_parameter->driving_level_compensate); + seq_printf(s, " disconnection_compensate: %d\n", + phy_parameter->disconnection_compensate); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(rtk_usb2_parameter); + +static inline void create_debug_files(struct rtk_phy *rtk_phy) +{ + struct dentry *phy_debug_root = NULL; + + phy_debug_root = create_phy_debug_root(); + if (!phy_debug_root) + return; + + rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev), + phy_debug_root); + if (!rtk_phy->debug_dir) + return; + + if (!debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy, + &rtk_usb2_parameter_fops)) + goto file_error; + + return; + +file_error: + debugfs_remove_recursive(rtk_phy->debug_dir); +} + +static inline void remove_debug_files(struct rtk_phy *rtk_phy) +{ + debugfs_remove_recursive(rtk_phy->debug_dir); +} +#else +static inline void create_debug_files(struct rtk_phy *rtk_phy) { } +static inline void remove_debug_files(struct rtk_phy *rtk_phy) { } +#endif /* CONFIG_DEBUG_FS */ + +static int get_phy_data_by_efuse(struct rtk_phy *rtk_phy, + struct phy_parameter *phy_parameter, int index) +{ + struct phy_cfg *phy_cfg = rtk_phy->phy_cfg; + u8 value = 0; + struct nvmem_cell *cell; + struct soc_device_attribute rtk_soc_groot[] = { + { .family = "Realtek Groot",}, + { /* empty */ } }; + + if (!phy_cfg->check_efuse) + goto out; + + /* Read efuse for usb dc cal */ + cell = nvmem_cell_get(rtk_phy->dev, "usb-dc-cal"); + if (IS_ERR(cell)) { + dev_dbg(rtk_phy->dev, "%s no usb-dc-cal: %ld\n", + __func__, PTR_ERR(cell)); + } else { + unsigned char *buf; + size_t buf_size; + + buf = nvmem_cell_read(cell, &buf_size); + if (!IS_ERR(buf)) { + value = buf[0] & phy_cfg->dc_driving_mask; + kfree(buf); + } + nvmem_cell_put(cell); + } + + if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) { + int rate = phy_cfg->efuse_dc_driving_rate; + + if (value <= EFUS_USB_DC_CAL_MAX) + phy_parameter->efuse_usb_dc_cal = (int8_t)(value * rate); + else + phy_parameter->efuse_usb_dc_cal = -(int8_t) + ((EFUS_USB_DC_CAL_MAX & value) * rate); + + if (soc_device_match(rtk_soc_groot)) { + dev_dbg(rtk_phy->dev, "For groot IC we need a workaround to adjust efuse_usb_dc_cal\n"); + + /* We don't multiple dc_cal_rate=2 for positive dc cal compensate */ + if (value <= EFUS_USB_DC_CAL_MAX) + phy_parameter->efuse_usb_dc_cal = (int8_t)(value); + + /* We set max dc cal compensate is 0x8 if otp is 0x7 */ + if (value == 0x7) + phy_parameter->efuse_usb_dc_cal = (int8_t)(value + 1); + } + } else { /* for CHECK_EFUSE_V2 */ + phy_parameter->efuse_usb_dc_cal = value & phy_cfg->dc_driving_mask; + } + + /* Read efuse for usb dc disconnect level */ + value = 0; + cell = nvmem_cell_get(rtk_phy->dev, "usb-dc-dis"); + if (IS_ERR(cell)) { + dev_dbg(rtk_phy->dev, "%s no usb-dc-dis: %ld\n", + __func__, PTR_ERR(cell)); + } else { + unsigned char *buf; + size_t buf_size; + + buf = nvmem_cell_read(cell, &buf_size); + if (!IS_ERR(buf)) { + value = buf[0] & phy_cfg->dc_disconnect_mask; + kfree(buf); + } + nvmem_cell_put(cell); + } + + if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) { + int rate = phy_cfg->efuse_dc_disconnect_rate; + + if (value <= EFUS_USB_DC_DIS_MAX) + phy_parameter->efuse_usb_dc_dis = (int8_t)(value * rate); + else + phy_parameter->efuse_usb_dc_dis = -(int8_t) + ((EFUS_USB_DC_DIS_MAX & value) * rate); + } else { /* for CHECK_EFUSE_V2 */ + phy_parameter->efuse_usb_dc_dis = value & phy_cfg->dc_disconnect_mask; + } + +out: + return 0; +} + +static int parse_phy_data(struct rtk_phy *rtk_phy) +{ + struct device *dev = rtk_phy->dev; + struct device_node *np = dev->of_node; + struct phy_parameter *phy_parameter; + int ret = 0; + int index; + + rtk_phy->phy_parameter = devm_kzalloc(dev, sizeof(struct phy_parameter) * + rtk_phy->num_phy, GFP_KERNEL); + if (!rtk_phy->phy_parameter) + return -ENOMEM; + + for (index = 0; index < rtk_phy->num_phy; index++) { + phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index]; + + phy_parameter->phy_reg.reg_wrap_vstatus = of_iomap(np, 0); + phy_parameter->phy_reg.reg_gusb2phyacc0 = of_iomap(np, 1) + index; + phy_parameter->phy_reg.vstatus_index = index; + + if (of_property_read_bool(np, "realtek,inverse-hstx-sync-clock")) + phy_parameter->inverse_hstx_sync_clock = true; + else + phy_parameter->inverse_hstx_sync_clock = false; + + if (of_property_read_u32_index(np, "realtek,driving-level", + index, &phy_parameter->driving_level)) + phy_parameter->driving_level = DEFAULT_DC_DRIVING_VALUE; + + if (of_property_read_u32_index(np, "realtek,driving-level-compensate", + index, &phy_parameter->driving_level_compensate)) + phy_parameter->driving_level_compensate = 0; + + if (of_property_read_u32_index(np, "realtek,disconnection-compensate", + index, &phy_parameter->disconnection_compensate)) + phy_parameter->disconnection_compensate = 0; + + get_phy_data_by_efuse(rtk_phy, phy_parameter, index); + + update_dc_driving_level(rtk_phy, phy_parameter); + + update_hs_clk_select(rtk_phy, phy_parameter); + } + + return ret; +} + +static int rtk_usb2phy_probe(struct platform_device *pdev) +{ + struct rtk_phy *rtk_phy; + struct device *dev = &pdev->dev; + struct phy *generic_phy; + struct phy_provider *phy_provider; + const struct phy_cfg *phy_cfg; + int ret = 0; + + phy_cfg = of_device_get_match_data(dev); + if (!phy_cfg) { + dev_err(dev, "phy config are not assigned!\n"); + return -EINVAL; + } + + rtk_phy = devm_kzalloc(dev, sizeof(*rtk_phy), GFP_KERNEL); + if (!rtk_phy) + return -ENOMEM; + + rtk_phy->dev = &pdev->dev; + rtk_phy->phy.dev = rtk_phy->dev; + rtk_phy->phy.label = "rtk-usb2phy"; + rtk_phy->phy.notify_port_status = rtk_phy_notify_port_status; + + rtk_phy->phy_cfg = devm_kzalloc(dev, sizeof(*phy_cfg), GFP_KERNEL); + + memcpy(rtk_phy->phy_cfg, phy_cfg, sizeof(*phy_cfg)); + + rtk_phy->num_phy = phy_cfg->num_phy; + + ret = parse_phy_data(rtk_phy); + if (ret) + goto err; + + platform_set_drvdata(pdev, rtk_phy); + + generic_phy = devm_phy_create(rtk_phy->dev, NULL, &ops); + if (IS_ERR(generic_phy)) + return PTR_ERR(generic_phy); + + phy_set_drvdata(generic_phy, rtk_phy); + + phy_provider = devm_of_phy_provider_register(rtk_phy->dev, + of_phy_simple_xlate); + if (IS_ERR(phy_provider)) + return PTR_ERR(phy_provider); + + ret = usb_add_phy_dev(&rtk_phy->phy); + if (ret) + goto err; + + create_debug_files(rtk_phy); + +err: + return ret; +} + +static void rtk_usb2phy_remove(struct platform_device *pdev) +{ + struct rtk_phy *rtk_phy = platform_get_drvdata(pdev); + + remove_debug_files(rtk_phy); + + usb_remove_phy(&rtk_phy->phy); +} + +static const struct phy_cfg rtd1295_phy_cfg = { + .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE, + .page0 = { [0] = {0xe0, 0x90}, + [3] = {0xe3, 0x3a}, + [4] = {0xe4, 0x68}, + [6] = {0xe6, 0x91}, + [13] = {0xf5, 0x81}, + [15] = {0xf7, 0x02}, }, + .page1_size = 8, + .page1 = { /* default parameter */ }, + .page2_size = 0, + .page2 = { /* no parameter */ }, + .num_phy = 1, + .check_efuse = false, + .check_efuse_version = CHECK_EFUSE_V1, + .efuse_dc_driving_rate = 1, + .dc_driving_mask = 0xf, + .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE, + .dc_disconnect_mask = 0xf, + .usb_dc_disconnect_at_page0 = true, + .do_toggle = true, + .do_toggle_driving = false, + .driving_updated_for_dev_dis = 0xf, + .use_default_parameter = false, + .is_double_sensitivity_mode = false, +}; + +static const struct phy_cfg rtd1395_phy_cfg = { + .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE, + .page0 = { [4] = {0xe4, 0xac}, + [13] = {0xf5, 0x00}, + [15] = {0xf7, 0x02}, }, + .page1_size = 8, + .page1 = { /* default parameter */ }, + .page2_size = 0, + .page2 = { /* no parameter */ }, + .num_phy = 1, + .check_efuse = false, + .check_efuse_version = CHECK_EFUSE_V1, + .efuse_dc_driving_rate = 1, + .dc_driving_mask = 0xf, + .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE, + .dc_disconnect_mask = 0xf, + .usb_dc_disconnect_at_page0 = true, + .do_toggle = true, + .do_toggle_driving = false, + .driving_updated_for_dev_dis = 0xf, + .use_default_parameter = false, + .is_double_sensitivity_mode = false, +}; + +static const struct phy_cfg rtd1395_phy_cfg_2port = { + .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE, + .page0 = { [4] = {0xe4, 0xac}, + [13] = {0xf5, 0x00}, + [15] = {0xf7, 0x02}, }, + .page1_size = 8, + .page1 = { /* default parameter */ }, + .page2_size = 0, + .page2 = { /* no parameter */ }, + .num_phy = 2, + .check_efuse = false, + .check_efuse_version = CHECK_EFUSE_V1, + .efuse_dc_driving_rate = 1, + .dc_driving_mask = 0xf, + .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE, + .dc_disconnect_mask = 0xf, + .usb_dc_disconnect_at_page0 = true, + .do_toggle = true, + .do_toggle_driving = false, + .driving_updated_for_dev_dis = 0xf, + .use_default_parameter = false, + .is_double_sensitivity_mode = false, +}; + +static const struct phy_cfg rtd1619_phy_cfg = { + .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE, + .page0 = { [4] = {0xe4, 0x68}, }, + .page1_size = 8, + .page1 = { /* default parameter */ }, + .page2_size = 0, + .page2 = { /* no parameter */ }, + .num_phy = 1, + .check_efuse = true, + .check_efuse_version = CHECK_EFUSE_V1, + .efuse_dc_driving_rate = 1, + .dc_driving_mask = 0xf, + .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE, + .dc_disconnect_mask = 0xf, + .usb_dc_disconnect_at_page0 = true, + .do_toggle = true, + .do_toggle_driving = false, + .driving_updated_for_dev_dis = 0xf, + .use_default_parameter = false, + .is_double_sensitivity_mode = false, +}; + +static const struct phy_cfg rtd1319_phy_cfg = { + .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE, + .page0 = { [0] = {0xe0, 0x18}, + [4] = {0xe4, 0x6a}, + [7] = {0xe7, 0x71}, + [13] = {0xf5, 0x15}, + [15] = {0xf7, 0x32}, }, + .page1_size = 8, + .page1 = { [3] = {0xe3, 0x44}, }, + .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE, + .page2 = { [0] = {0xe0, 0x01}, }, + .num_phy = 1, + .check_efuse = true, + .check_efuse_version = CHECK_EFUSE_V1, + .efuse_dc_driving_rate = 1, + .dc_driving_mask = 0xf, + .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE, + .dc_disconnect_mask = 0xf, + .usb_dc_disconnect_at_page0 = true, + .do_toggle = true, + .do_toggle_driving = true, + .driving_updated_for_dev_dis = 0xf, + .use_default_parameter = false, + .is_double_sensitivity_mode = true, +}; + +static const struct phy_cfg rtd1312c_phy_cfg = { + .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE, + .page0 = { [0] = {0xe0, 0x14}, + [4] = {0xe4, 0x67}, + [5] = {0xe5, 0x55}, }, + .page1_size = 8, + .page1 = { [3] = {0xe3, 0x23}, + [6] = {0xe6, 0x58}, }, + .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE, + .page2 = { /* default parameter */ }, + .num_phy = 1, + .check_efuse = true, + .check_efuse_version = CHECK_EFUSE_V1, + .efuse_dc_driving_rate = 1, + .dc_driving_mask = 0xf, + .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE, + .dc_disconnect_mask = 0xf, + .usb_dc_disconnect_at_page0 = true, + .do_toggle = true, + .do_toggle_driving = true, + .driving_updated_for_dev_dis = 0xf, + .use_default_parameter = false, + .is_double_sensitivity_mode = true, +}; + +static const struct phy_cfg rtd1619b_phy_cfg = { + .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE, + .page0 = { [0] = {0xe0, 0xa3}, + [4] = {0xe4, 0x88}, + [5] = {0xe5, 0x4f}, + [6] = {0xe6, 0x02}, }, + .page1_size = 8, + .page1 = { [3] = {0xe3, 0x64}, }, + .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE, + .page2 = { [7] = {0xe7, 0x45}, }, + .num_phy = 1, + .check_efuse = true, + .check_efuse_version = CHECK_EFUSE_V1, + .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE, + .dc_driving_mask = 0x1f, + .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE, + .dc_disconnect_mask = 0xf, + .usb_dc_disconnect_at_page0 = false, + .do_toggle = true, + .do_toggle_driving = true, + .driving_updated_for_dev_dis = 0x8, + .use_default_parameter = false, + .is_double_sensitivity_mode = true, +}; + +static const struct phy_cfg rtd1319d_phy_cfg = { + .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE, + .page0 = { [0] = {0xe0, 0xa3}, + [4] = {0xe4, 0x8e}, + [5] = {0xe5, 0x4f}, + [6] = {0xe6, 0x02}, }, + .page1_size = MAX_USB_PHY_PAGE1_DATA_SIZE, + .page1 = { [14] = {0xf5, 0x1}, }, + .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE, + .page2 = { [7] = {0xe7, 0x44}, }, + .check_efuse = true, + .num_phy = 1, + .check_efuse_version = CHECK_EFUSE_V1, + .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE, + .dc_driving_mask = 0x1f, + .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE, + .dc_disconnect_mask = 0xf, + .usb_dc_disconnect_at_page0 = false, + .do_toggle = true, + .do_toggle_driving = false, + .driving_updated_for_dev_dis = 0x8, + .use_default_parameter = false, + .is_double_sensitivity_mode = true, +}; + +static const struct phy_cfg rtd1315e_phy_cfg = { + .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE, + .page0 = { [0] = {0xe0, 0xa3}, + [4] = {0xe4, 0x8c}, + [5] = {0xe5, 0x4f}, + [6] = {0xe6, 0x02}, }, + .page1_size = MAX_USB_PHY_PAGE1_DATA_SIZE, + .page1 = { [3] = {0xe3, 0x7f}, + [14] = {0xf5, 0x01}, }, + .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE, + .page2 = { [7] = {0xe7, 0x44}, }, + .num_phy = 1, + .check_efuse = true, + .check_efuse_version = CHECK_EFUSE_V2, + .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE, + .dc_driving_mask = 0x1f, + .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE, + .dc_disconnect_mask = 0xf, + .usb_dc_disconnect_at_page0 = false, + .do_toggle = true, + .do_toggle_driving = false, + .driving_updated_for_dev_dis = 0x8, + .use_default_parameter = false, + .is_double_sensitivity_mode = true, +}; + +static const struct of_device_id usbphy_rtk_dt_match[] = { + { .compatible = "realtek,rtd1295-usb2phy", .data = &rtd1295_phy_cfg }, + { .compatible = "realtek,rtd1312c-usb2phy", .data = &rtd1312c_phy_cfg }, + { .compatible = "realtek,rtd1315e-usb2phy", .data = &rtd1315e_phy_cfg }, + { .compatible = "realtek,rtd1319-usb2phy", .data = &rtd1319_phy_cfg }, + { .compatible = "realtek,rtd1319d-usb2phy", .data = &rtd1319d_phy_cfg }, + { .compatible = "realtek,rtd1395-usb2phy", .data = &rtd1395_phy_cfg }, + { .compatible = "realtek,rtd1395-usb2phy-2port", .data = &rtd1395_phy_cfg_2port }, + { .compatible = "realtek,rtd1619-usb2phy", .data = &rtd1619_phy_cfg }, + { .compatible = "realtek,rtd1619b-usb2phy", .data = &rtd1619b_phy_cfg }, + {}, +}; +MODULE_DEVICE_TABLE(of, usbphy_rtk_dt_match); + +static struct platform_driver rtk_usb2phy_driver = { + .probe = rtk_usb2phy_probe, + .remove_new = rtk_usb2phy_remove, + .driver = { + .name = "rtk-usb2phy", + .of_match_table = usbphy_rtk_dt_match, + }, +}; + +module_platform_driver(rtk_usb2phy_driver); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform: rtk-usb2phy"); +MODULE_AUTHOR("Stanley Chang <[email protected]>"); +MODULE_DESCRIPTION("Realtek usb 2.0 phy driver"); diff --git a/drivers/phy/realtek/phy-rtk-usb3.c b/drivers/phy/realtek/phy-rtk-usb3.c new file mode 100644 index 000000000000..7881f908aade --- /dev/null +++ b/drivers/phy/realtek/phy-rtk-usb3.c @@ -0,0 +1,767 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * phy-rtk-usb3.c RTK usb3.0 phy driver + * + * copyright (c) 2023 realtek semiconductor corporation + * + */ + +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_address.h> +#include <linux/uaccess.h> +#include <linux/debugfs.h> +#include <linux/nvmem-consumer.h> +#include <linux/regmap.h> +#include <linux/sys_soc.h> +#include <linux/mfd/syscon.h> +#include <linux/phy/phy.h> +#include <linux/usb.h> +#include <linux/usb/hcd.h> +#include <linux/usb/phy.h> + +#define USB_MDIO_CTRL_PHY_BUSY BIT(7) +#define USB_MDIO_CTRL_PHY_WRITE BIT(0) +#define USB_MDIO_CTRL_PHY_ADDR_SHIFT 8 +#define USB_MDIO_CTRL_PHY_DATA_SHIFT 16 + +#define MAX_USB_PHY_DATA_SIZE 0x30 +#define PHY_ADDR_0X09 0x09 +#define PHY_ADDR_0X0B 0x0b +#define PHY_ADDR_0X0D 0x0d +#define PHY_ADDR_0X10 0x10 +#define PHY_ADDR_0X1F 0x1f +#define PHY_ADDR_0X20 0x20 +#define PHY_ADDR_0X21 0x21 +#define PHY_ADDR_0X30 0x30 + +#define REG_0X09_FORCE_CALIBRATION BIT(9) +#define REG_0X0B_RX_OFFSET_RANGE_MASK 0xc +#define REG_0X0D_RX_DEBUG_TEST_EN BIT(6) +#define REG_0X10_DEBUG_MODE_SETTING 0x3c0 +#define REG_0X10_DEBUG_MODE_SETTING_MASK 0x3f8 +#define REG_0X1F_RX_OFFSET_CODE_MASK 0x1e + +#define USB_U3_TX_LFPS_SWING_TRIM_SHIFT 4 +#define USB_U3_TX_LFPS_SWING_TRIM_MASK 0xf +#define AMPLITUDE_CONTROL_COARSE_MASK 0xff +#define AMPLITUDE_CONTROL_FINE_MASK 0xffff +#define AMPLITUDE_CONTROL_COARSE_DEFAULT 0xff +#define AMPLITUDE_CONTROL_FINE_DEFAULT 0xffff + +#define PHY_ADDR_MAP_ARRAY_INDEX(addr) (addr) +#define ARRAY_INDEX_MAP_PHY_ADDR(index) (index) + +struct phy_reg { + void __iomem *reg_mdio_ctl; +}; + +struct phy_data { + u8 addr; + u16 data; +}; + +struct phy_cfg { + int param_size; + struct phy_data param[MAX_USB_PHY_DATA_SIZE]; + + bool check_efuse; + bool do_toggle; + bool do_toggle_once; + bool use_default_parameter; + bool check_rx_front_end_offset; +}; + +struct phy_parameter { + struct phy_reg phy_reg; + + /* Get from efuse */ + u8 efuse_usb_u3_tx_lfps_swing_trim; + + /* Get from dts */ + u32 amplitude_control_coarse; + u32 amplitude_control_fine; +}; + +struct rtk_phy { + struct usb_phy phy; + struct device *dev; + + struct phy_cfg *phy_cfg; + int num_phy; + struct phy_parameter *phy_parameter; + + struct dentry *debug_dir; +}; + +#define PHY_IO_TIMEOUT_USEC (50000) +#define PHY_IO_DELAY_US (100) + +static inline int utmi_wait_register(void __iomem *reg, u32 mask, u32 result) +{ + int ret; + unsigned int val; + + ret = read_poll_timeout(readl, val, ((val & mask) == result), + PHY_IO_DELAY_US, PHY_IO_TIMEOUT_USEC, false, reg); + if (ret) { + pr_err("%s can't program USB phy\n", __func__); + return -ETIMEDOUT; + } + + return 0; +} + +static int rtk_phy3_wait_vbusy(struct phy_reg *phy_reg) +{ + return utmi_wait_register(phy_reg->reg_mdio_ctl, USB_MDIO_CTRL_PHY_BUSY, 0); +} + +static u16 rtk_phy_read(struct phy_reg *phy_reg, char addr) +{ + unsigned int tmp; + u32 value; + + tmp = (addr << USB_MDIO_CTRL_PHY_ADDR_SHIFT); + + writel(tmp, phy_reg->reg_mdio_ctl); + + rtk_phy3_wait_vbusy(phy_reg); + + value = readl(phy_reg->reg_mdio_ctl); + value = value >> USB_MDIO_CTRL_PHY_DATA_SHIFT; + + return (u16)value; +} + +static int rtk_phy_write(struct phy_reg *phy_reg, char addr, u16 data) +{ + unsigned int val; + + val = USB_MDIO_CTRL_PHY_WRITE | + (addr << USB_MDIO_CTRL_PHY_ADDR_SHIFT) | + (data << USB_MDIO_CTRL_PHY_DATA_SHIFT); + + writel(val, phy_reg->reg_mdio_ctl); + + rtk_phy3_wait_vbusy(phy_reg); + + return 0; +} + +static void do_rtk_usb3_phy_toggle(struct rtk_phy *rtk_phy, int index, bool connect) +{ + struct phy_cfg *phy_cfg = rtk_phy->phy_cfg; + struct phy_reg *phy_reg; + struct phy_parameter *phy_parameter; + struct phy_data *phy_data; + u8 addr; + u16 data; + int i; + + phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index]; + phy_reg = &phy_parameter->phy_reg; + + if (!phy_cfg->do_toggle) + return; + + i = PHY_ADDR_MAP_ARRAY_INDEX(PHY_ADDR_0X09); + phy_data = phy_cfg->param + i; + addr = phy_data->addr; + data = phy_data->data; + + if (!addr && !data) { + addr = PHY_ADDR_0X09; + data = rtk_phy_read(phy_reg, addr); + phy_data->addr = addr; + phy_data->data = data; + } + + rtk_phy_write(phy_reg, addr, data & (~REG_0X09_FORCE_CALIBRATION)); + mdelay(1); + rtk_phy_write(phy_reg, addr, data | REG_0X09_FORCE_CALIBRATION); +} + +static int do_rtk_phy_init(struct rtk_phy *rtk_phy, int index) +{ + struct phy_cfg *phy_cfg; + struct phy_reg *phy_reg; + struct phy_parameter *phy_parameter; + int i = 0; + + phy_cfg = rtk_phy->phy_cfg; + phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index]; + phy_reg = &phy_parameter->phy_reg; + + if (phy_cfg->use_default_parameter) + goto do_toggle; + + for (i = 0; i < phy_cfg->param_size; i++) { + struct phy_data *phy_data = phy_cfg->param + i; + u8 addr = phy_data->addr; + u16 data = phy_data->data; + + if (!addr && !data) + continue; + + rtk_phy_write(phy_reg, addr, data); + } + +do_toggle: + if (phy_cfg->do_toggle_once) + phy_cfg->do_toggle = true; + + do_rtk_usb3_phy_toggle(rtk_phy, index, false); + + if (phy_cfg->do_toggle_once) { + u16 check_value = 0; + int count = 10; + u16 value_0x0d, value_0x10; + + /* Enable Debug mode by set 0x0D and 0x10 */ + value_0x0d = rtk_phy_read(phy_reg, PHY_ADDR_0X0D); + value_0x10 = rtk_phy_read(phy_reg, PHY_ADDR_0X10); + + rtk_phy_write(phy_reg, PHY_ADDR_0X0D, + value_0x0d | REG_0X0D_RX_DEBUG_TEST_EN); + rtk_phy_write(phy_reg, PHY_ADDR_0X10, + (value_0x10 & ~REG_0X10_DEBUG_MODE_SETTING_MASK) | + REG_0X10_DEBUG_MODE_SETTING); + + check_value = rtk_phy_read(phy_reg, PHY_ADDR_0X30); + + while (!(check_value & BIT(15))) { + check_value = rtk_phy_read(phy_reg, PHY_ADDR_0X30); + mdelay(1); + if (count-- < 0) + break; + } + + if (!(check_value & BIT(15))) + dev_info(rtk_phy->dev, "toggle fail addr=0x%02x, data=0x%04x\n", + PHY_ADDR_0X30, check_value); + + /* Disable Debug mode by set 0x0D and 0x10 to default*/ + rtk_phy_write(phy_reg, PHY_ADDR_0X0D, value_0x0d); + rtk_phy_write(phy_reg, PHY_ADDR_0X10, value_0x10); + + phy_cfg->do_toggle = false; + } + + if (phy_cfg->check_rx_front_end_offset) { + u16 rx_offset_code, rx_offset_range; + u16 code_mask = REG_0X1F_RX_OFFSET_CODE_MASK; + u16 range_mask = REG_0X0B_RX_OFFSET_RANGE_MASK; + bool do_update = false; + + rx_offset_code = rtk_phy_read(phy_reg, PHY_ADDR_0X1F); + if (((rx_offset_code & code_mask) == 0x0) || + ((rx_offset_code & code_mask) == code_mask)) + do_update = true; + + rx_offset_range = rtk_phy_read(phy_reg, PHY_ADDR_0X0B); + if (((rx_offset_range & range_mask) == range_mask) && do_update) { + dev_warn(rtk_phy->dev, "Don't update rx_offset_range (rx_offset_code=0x%x, rx_offset_range=0x%x)\n", + rx_offset_code, rx_offset_range); + do_update = false; + } + + if (do_update) { + u16 tmp1, tmp2; + + tmp1 = rx_offset_range & (~range_mask); + tmp2 = rx_offset_range & range_mask; + tmp2 += (1 << 2); + rx_offset_range = tmp1 | (tmp2 & range_mask); + rtk_phy_write(phy_reg, PHY_ADDR_0X0B, rx_offset_range); + goto do_toggle; + } + } + + return 0; +} + +static int rtk_phy_init(struct phy *phy) +{ + struct rtk_phy *rtk_phy = phy_get_drvdata(phy); + int ret = 0; + int i; + unsigned long phy_init_time = jiffies; + + for (i = 0; i < rtk_phy->num_phy; i++) + ret = do_rtk_phy_init(rtk_phy, i); + + dev_dbg(rtk_phy->dev, "Initialized RTK USB 3.0 PHY (take %dms)\n", + jiffies_to_msecs(jiffies - phy_init_time)); + + return ret; +} + +static int rtk_phy_exit(struct phy *phy) +{ + return 0; +} + +static const struct phy_ops ops = { + .init = rtk_phy_init, + .exit = rtk_phy_exit, + .owner = THIS_MODULE, +}; + +static void rtk_phy_toggle(struct usb_phy *usb3_phy, bool connect, int port) +{ + int index = port; + struct rtk_phy *rtk_phy = NULL; + + rtk_phy = dev_get_drvdata(usb3_phy->dev); + + if (index > rtk_phy->num_phy) { + dev_err(rtk_phy->dev, "%s: The port=%d is not in usb phy (num_phy=%d)\n", + __func__, index, rtk_phy->num_phy); + return; + } + + do_rtk_usb3_phy_toggle(rtk_phy, index, connect); +} + +static int rtk_phy_notify_port_status(struct usb_phy *x, int port, + u16 portstatus, u16 portchange) +{ + bool connect = false; + + pr_debug("%s port=%d portstatus=0x%x portchange=0x%x\n", + __func__, port, (int)portstatus, (int)portchange); + if (portstatus & USB_PORT_STAT_CONNECTION) + connect = true; + + if (portchange & USB_PORT_STAT_C_CONNECTION) + rtk_phy_toggle(x, connect, port); + + return 0; +} + +#ifdef CONFIG_DEBUG_FS +static struct dentry *create_phy_debug_root(void) +{ + struct dentry *phy_debug_root; + + phy_debug_root = debugfs_lookup("phy", usb_debug_root); + if (!phy_debug_root) + phy_debug_root = debugfs_create_dir("phy", usb_debug_root); + + return phy_debug_root; +} + +static int rtk_usb3_parameter_show(struct seq_file *s, void *unused) +{ + struct rtk_phy *rtk_phy = s->private; + struct phy_cfg *phy_cfg; + int i, index; + + phy_cfg = rtk_phy->phy_cfg; + + seq_puts(s, "Property:\n"); + seq_printf(s, " check_efuse: %s\n", + phy_cfg->check_efuse ? "Enable" : "Disable"); + seq_printf(s, " do_toggle: %s\n", + phy_cfg->do_toggle ? "Enable" : "Disable"); + seq_printf(s, " do_toggle_once: %s\n", + phy_cfg->do_toggle_once ? "Enable" : "Disable"); + seq_printf(s, " use_default_parameter: %s\n", + phy_cfg->use_default_parameter ? "Enable" : "Disable"); + + for (index = 0; index < rtk_phy->num_phy; index++) { + struct phy_reg *phy_reg; + struct phy_parameter *phy_parameter; + + phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index]; + phy_reg = &phy_parameter->phy_reg; + + seq_printf(s, "PHY %d:\n", index); + + for (i = 0; i < phy_cfg->param_size; i++) { + struct phy_data *phy_data = phy_cfg->param + i; + u8 addr = ARRAY_INDEX_MAP_PHY_ADDR(i); + u16 data = phy_data->data; + + if (!phy_data->addr && !data) + seq_printf(s, " addr = 0x%02x, data = none ==> read value = 0x%04x\n", + addr, rtk_phy_read(phy_reg, addr)); + else + seq_printf(s, " addr = 0x%02x, data = 0x%04x ==> read value = 0x%04x\n", + addr, data, rtk_phy_read(phy_reg, addr)); + } + + seq_puts(s, "PHY Property:\n"); + seq_printf(s, " efuse_usb_u3_tx_lfps_swing_trim: 0x%x\n", + (int)phy_parameter->efuse_usb_u3_tx_lfps_swing_trim); + seq_printf(s, " amplitude_control_coarse: 0x%x\n", + (int)phy_parameter->amplitude_control_coarse); + seq_printf(s, " amplitude_control_fine: 0x%x\n", + (int)phy_parameter->amplitude_control_fine); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(rtk_usb3_parameter); + +static inline void create_debug_files(struct rtk_phy *rtk_phy) +{ + struct dentry *phy_debug_root = NULL; + + phy_debug_root = create_phy_debug_root(); + + if (!phy_debug_root) + return; + + rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev), phy_debug_root); + if (!rtk_phy->debug_dir) + return; + + if (!debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy, + &rtk_usb3_parameter_fops)) + goto file_error; + + return; + +file_error: + debugfs_remove_recursive(rtk_phy->debug_dir); +} + +static inline void remove_debug_files(struct rtk_phy *rtk_phy) +{ + debugfs_remove_recursive(rtk_phy->debug_dir); +} +#else +static inline void create_debug_files(struct rtk_phy *rtk_phy) { } +static inline void remove_debug_files(struct rtk_phy *rtk_phy) { } +#endif /* CONFIG_DEBUG_FS */ + +static int get_phy_data_by_efuse(struct rtk_phy *rtk_phy, + struct phy_parameter *phy_parameter, int index) +{ + struct phy_cfg *phy_cfg = rtk_phy->phy_cfg; + u8 value = 0; + struct nvmem_cell *cell; + + if (!phy_cfg->check_efuse) + goto out; + + cell = nvmem_cell_get(rtk_phy->dev, "usb_u3_tx_lfps_swing_trim"); + if (IS_ERR(cell)) { + dev_dbg(rtk_phy->dev, "%s no usb_u3_tx_lfps_swing_trim: %ld\n", + __func__, PTR_ERR(cell)); + } else { + unsigned char *buf; + size_t buf_size; + + buf = nvmem_cell_read(cell, &buf_size); + if (!IS_ERR(buf)) { + value = buf[0] & USB_U3_TX_LFPS_SWING_TRIM_MASK; + kfree(buf); + } + nvmem_cell_put(cell); + } + + if (value > 0 && value < 0x8) + phy_parameter->efuse_usb_u3_tx_lfps_swing_trim = 0x8; + else + phy_parameter->efuse_usb_u3_tx_lfps_swing_trim = (u8)value; + +out: + return 0; +} + +static void update_amplitude_control_value(struct rtk_phy *rtk_phy, + struct phy_parameter *phy_parameter) +{ + struct phy_cfg *phy_cfg; + struct phy_reg *phy_reg; + + phy_reg = &phy_parameter->phy_reg; + phy_cfg = rtk_phy->phy_cfg; + + if (phy_parameter->amplitude_control_coarse != AMPLITUDE_CONTROL_COARSE_DEFAULT) { + u16 val_mask = AMPLITUDE_CONTROL_COARSE_MASK; + u16 data; + + if (!phy_cfg->param[PHY_ADDR_0X20].addr && !phy_cfg->param[PHY_ADDR_0X20].data) { + phy_cfg->param[PHY_ADDR_0X20].addr = PHY_ADDR_0X20; + data = rtk_phy_read(phy_reg, PHY_ADDR_0X20); + } else { + data = phy_cfg->param[PHY_ADDR_0X20].data; + } + + data &= (~val_mask); + data |= (phy_parameter->amplitude_control_coarse & val_mask); + + phy_cfg->param[PHY_ADDR_0X20].data = data; + } + + if (phy_parameter->efuse_usb_u3_tx_lfps_swing_trim) { + u8 efuse_val = phy_parameter->efuse_usb_u3_tx_lfps_swing_trim; + u16 val_mask = USB_U3_TX_LFPS_SWING_TRIM_MASK; + int val_shift = USB_U3_TX_LFPS_SWING_TRIM_SHIFT; + u16 data; + + if (!phy_cfg->param[PHY_ADDR_0X20].addr && !phy_cfg->param[PHY_ADDR_0X20].data) { + phy_cfg->param[PHY_ADDR_0X20].addr = PHY_ADDR_0X20; + data = rtk_phy_read(phy_reg, PHY_ADDR_0X20); + } else { + data = phy_cfg->param[PHY_ADDR_0X20].data; + } + + data &= ~(val_mask << val_shift); + data |= ((efuse_val & val_mask) << val_shift); + + phy_cfg->param[PHY_ADDR_0X20].data = data; + } + + if (phy_parameter->amplitude_control_fine != AMPLITUDE_CONTROL_FINE_DEFAULT) { + u16 val_mask = AMPLITUDE_CONTROL_FINE_MASK; + + if (!phy_cfg->param[PHY_ADDR_0X21].addr && !phy_cfg->param[PHY_ADDR_0X21].data) + phy_cfg->param[PHY_ADDR_0X21].addr = PHY_ADDR_0X21; + + phy_cfg->param[PHY_ADDR_0X21].data = + phy_parameter->amplitude_control_fine & val_mask; + } +} + +static int parse_phy_data(struct rtk_phy *rtk_phy) +{ + struct device *dev = rtk_phy->dev; + struct phy_parameter *phy_parameter; + int ret = 0; + int index; + + rtk_phy->phy_parameter = devm_kzalloc(dev, sizeof(struct phy_parameter) * + rtk_phy->num_phy, GFP_KERNEL); + if (!rtk_phy->phy_parameter) + return -ENOMEM; + + for (index = 0; index < rtk_phy->num_phy; index++) { + phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index]; + + phy_parameter->phy_reg.reg_mdio_ctl = of_iomap(dev->of_node, 0) + index; + + /* Amplitude control address 0x20 bit 0 to bit 7 */ + if (of_property_read_u32(dev->of_node, "realtek,amplitude-control-coarse-tuning", + &phy_parameter->amplitude_control_coarse)) + phy_parameter->amplitude_control_coarse = AMPLITUDE_CONTROL_COARSE_DEFAULT; + + /* Amplitude control address 0x21 bit 0 to bit 16 */ + if (of_property_read_u32(dev->of_node, "realtek,amplitude-control-fine-tuning", + &phy_parameter->amplitude_control_fine)) + phy_parameter->amplitude_control_fine = AMPLITUDE_CONTROL_FINE_DEFAULT; + + get_phy_data_by_efuse(rtk_phy, phy_parameter, index); + + update_amplitude_control_value(rtk_phy, phy_parameter); + } + + return ret; +} + +static int rtk_usb3phy_probe(struct platform_device *pdev) +{ + struct rtk_phy *rtk_phy; + struct device *dev = &pdev->dev; + struct phy *generic_phy; + struct phy_provider *phy_provider; + const struct phy_cfg *phy_cfg; + int ret; + + phy_cfg = of_device_get_match_data(dev); + if (!phy_cfg) { + dev_err(dev, "phy config are not assigned!\n"); + return -EINVAL; + } + + rtk_phy = devm_kzalloc(dev, sizeof(*rtk_phy), GFP_KERNEL); + if (!rtk_phy) + return -ENOMEM; + + rtk_phy->dev = &pdev->dev; + rtk_phy->phy.dev = rtk_phy->dev; + rtk_phy->phy.label = "rtk-usb3phy"; + rtk_phy->phy.notify_port_status = rtk_phy_notify_port_status; + + rtk_phy->phy_cfg = devm_kzalloc(dev, sizeof(*phy_cfg), GFP_KERNEL); + + memcpy(rtk_phy->phy_cfg, phy_cfg, sizeof(*phy_cfg)); + + rtk_phy->num_phy = 1; + + ret = parse_phy_data(rtk_phy); + if (ret) + goto err; + + platform_set_drvdata(pdev, rtk_phy); + + generic_phy = devm_phy_create(rtk_phy->dev, NULL, &ops); + if (IS_ERR(generic_phy)) + return PTR_ERR(generic_phy); + + phy_set_drvdata(generic_phy, rtk_phy); + + phy_provider = devm_of_phy_provider_register(rtk_phy->dev, of_phy_simple_xlate); + if (IS_ERR(phy_provider)) + return PTR_ERR(phy_provider); + + ret = usb_add_phy_dev(&rtk_phy->phy); + if (ret) + goto err; + + create_debug_files(rtk_phy); + +err: + return ret; +} + +static void rtk_usb3phy_remove(struct platform_device *pdev) +{ + struct rtk_phy *rtk_phy = platform_get_drvdata(pdev); + + remove_debug_files(rtk_phy); + + usb_remove_phy(&rtk_phy->phy); +} + +static const struct phy_cfg rtd1295_phy_cfg = { + .param_size = MAX_USB_PHY_DATA_SIZE, + .param = { [0] = {0x01, 0x4008}, [1] = {0x01, 0xe046}, + [2] = {0x02, 0x6046}, [3] = {0x03, 0x2779}, + [4] = {0x04, 0x72f5}, [5] = {0x05, 0x2ad3}, + [6] = {0x06, 0x000e}, [7] = {0x07, 0x2e00}, + [8] = {0x08, 0x3591}, [9] = {0x09, 0x525c}, + [10] = {0x0a, 0xa600}, [11] = {0x0b, 0xa904}, + [12] = {0x0c, 0xc000}, [13] = {0x0d, 0xef1c}, + [14] = {0x0e, 0x2000}, [15] = {0x0f, 0x0000}, + [16] = {0x10, 0x000c}, [17] = {0x11, 0x4c00}, + [18] = {0x12, 0xfc00}, [19] = {0x13, 0x0c81}, + [20] = {0x14, 0xde01}, [21] = {0x15, 0x0000}, + [22] = {0x16, 0x0000}, [23] = {0x17, 0x0000}, + [24] = {0x18, 0x0000}, [25] = {0x19, 0x4004}, + [26] = {0x1a, 0x1260}, [27] = {0x1b, 0xff00}, + [28] = {0x1c, 0xcb00}, [29] = {0x1d, 0xa03f}, + [30] = {0x1e, 0xc2e0}, [31] = {0x1f, 0x2807}, + [32] = {0x20, 0x947a}, [33] = {0x21, 0x88aa}, + [34] = {0x22, 0x0057}, [35] = {0x23, 0xab66}, + [36] = {0x24, 0x0800}, [37] = {0x25, 0x0000}, + [38] = {0x26, 0x040a}, [39] = {0x27, 0x01d6}, + [40] = {0x28, 0xf8c2}, [41] = {0x29, 0x3080}, + [42] = {0x2a, 0x3082}, [43] = {0x2b, 0x2078}, + [44] = {0x2c, 0xffff}, [45] = {0x2d, 0xffff}, + [46] = {0x2e, 0x0000}, [47] = {0x2f, 0x0040}, }, + .check_efuse = false, + .do_toggle = true, + .do_toggle_once = false, + .use_default_parameter = false, + .check_rx_front_end_offset = false, +}; + +static const struct phy_cfg rtd1619_phy_cfg = { + .param_size = MAX_USB_PHY_DATA_SIZE, + .param = { [8] = {0x08, 0x3591}, + [38] = {0x26, 0x840b}, + [40] = {0x28, 0xf842}, }, + .check_efuse = false, + .do_toggle = true, + .do_toggle_once = false, + .use_default_parameter = false, + .check_rx_front_end_offset = false, +}; + +static const struct phy_cfg rtd1319_phy_cfg = { + .param_size = MAX_USB_PHY_DATA_SIZE, + .param = { [1] = {0x01, 0xac86}, + [6] = {0x06, 0x0003}, + [9] = {0x09, 0x924c}, + [10] = {0x0a, 0xa608}, + [11] = {0x0b, 0xb905}, + [14] = {0x0e, 0x2010}, + [32] = {0x20, 0x705a}, + [33] = {0x21, 0xf645}, + [34] = {0x22, 0x0013}, + [35] = {0x23, 0xcb66}, + [41] = {0x29, 0xff00}, }, + .check_efuse = true, + .do_toggle = true, + .do_toggle_once = false, + .use_default_parameter = false, + .check_rx_front_end_offset = false, +}; + +static const struct phy_cfg rtd1619b_phy_cfg = { + .param_size = MAX_USB_PHY_DATA_SIZE, + .param = { [1] = {0x01, 0xac8c}, + [6] = {0x06, 0x0017}, + [9] = {0x09, 0x724c}, + [10] = {0x0a, 0xb610}, + [11] = {0x0b, 0xb90d}, + [13] = {0x0d, 0xef2a}, + [15] = {0x0f, 0x9050}, + [16] = {0x10, 0x000c}, + [32] = {0x20, 0x70ff}, + [34] = {0x22, 0x0013}, + [35] = {0x23, 0xdb66}, + [38] = {0x26, 0x8609}, + [41] = {0x29, 0xff13}, + [42] = {0x2a, 0x3070}, }, + .check_efuse = true, + .do_toggle = false, + .do_toggle_once = true, + .use_default_parameter = false, + .check_rx_front_end_offset = false, +}; + +static const struct phy_cfg rtd1319d_phy_cfg = { + .param_size = MAX_USB_PHY_DATA_SIZE, + .param = { [1] = {0x01, 0xac89}, + [4] = {0x04, 0xf2f5}, + [6] = {0x06, 0x0017}, + [9] = {0x09, 0x424c}, + [10] = {0x0a, 0x9610}, + [11] = {0x0b, 0x9901}, + [12] = {0x0c, 0xf000}, + [13] = {0x0d, 0xef2a}, + [14] = {0x0e, 0x1000}, + [15] = {0x0f, 0x9050}, + [32] = {0x20, 0x7077}, + [35] = {0x23, 0x0b62}, + [37] = {0x25, 0x10ec}, + [42] = {0x2a, 0x3070}, }, + .check_efuse = true, + .do_toggle = false, + .do_toggle_once = true, + .use_default_parameter = false, + .check_rx_front_end_offset = true, +}; + +static const struct of_device_id usbphy_rtk_dt_match[] = { + { .compatible = "realtek,rtd1295-usb3phy", .data = &rtd1295_phy_cfg }, + { .compatible = "realtek,rtd1319-usb3phy", .data = &rtd1319_phy_cfg }, + { .compatible = "realtek,rtd1319d-usb3phy", .data = &rtd1319d_phy_cfg }, + { .compatible = "realtek,rtd1619-usb3phy", .data = &rtd1619_phy_cfg }, + { .compatible = "realtek,rtd1619b-usb3phy", .data = &rtd1619b_phy_cfg }, + {}, +}; +MODULE_DEVICE_TABLE(of, usbphy_rtk_dt_match); + +static struct platform_driver rtk_usb3phy_driver = { + .probe = rtk_usb3phy_probe, + .remove_new = rtk_usb3phy_remove, + .driver = { + .name = "rtk-usb3phy", + .of_match_table = usbphy_rtk_dt_match, + }, +}; + +module_platform_driver(rtk_usb3phy_driver); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform: rtk-usb3phy"); +MODULE_AUTHOR("Stanley Chang <[email protected]>"); +MODULE_DESCRIPTION("Realtek usb 3.0 phy driver"); diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c index ab69d517a36a..a70e67749be3 100644 --- a/drivers/platform/x86/amd/pmf/sps.c +++ b/drivers/platform/x86/amd/pmf/sps.c @@ -176,7 +176,8 @@ int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf) int amd_pmf_power_slider_update_event(struct amd_pmf_dev *dev) { - u8 mode, flag = 0; + u8 flag = 0; + int mode; int src; mode = amd_pmf_get_pprof_modes(dev); diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c index 1f59ac55c5f7..a95004e3d80b 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c @@ -335,8 +335,8 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn node = dev_to_node(&_pci_dev->dev); if (node == NUMA_NO_NODE) { - pr_info("Fail to get numa node for CPU:%d bus:%d dev:%d fn:%d\n", - cpu, bus_no, dev, fn); + pr_info_once("Fail to get numa node for CPU:%d bus:%d dev:%d fn:%d\n", + cpu, bus_no, dev, fn); continue; } diff --git a/drivers/platform/x86/lenovo-ymc.c b/drivers/platform/x86/lenovo-ymc.c index 41676188b373..f360370d5002 100644 --- a/drivers/platform/x86/lenovo-ymc.c +++ b/drivers/platform/x86/lenovo-ymc.c @@ -24,6 +24,10 @@ static bool ec_trigger __read_mostly; module_param(ec_trigger, bool, 0444); MODULE_PARM_DESC(ec_trigger, "Enable EC triggering work-around to force emitting tablet mode events"); +static bool force; +module_param(force, bool, 0444); +MODULE_PARM_DESC(force, "Force loading on boards without a convertible DMI chassis-type"); + static const struct dmi_system_id ec_trigger_quirk_dmi_table[] = { { /* Lenovo Yoga 7 14ARB7 */ @@ -35,6 +39,20 @@ static const struct dmi_system_id ec_trigger_quirk_dmi_table[] = { { } }; +static const struct dmi_system_id allowed_chasis_types_dmi_table[] = { + { + .matches = { + DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "31" /* Convertible */), + }, + }, + { + .matches = { + DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "32" /* Detachable */), + }, + }, + { } +}; + struct lenovo_ymc_private { struct input_dev *input_dev; struct acpi_device *ec_acpi_dev; @@ -111,6 +129,13 @@ static int lenovo_ymc_probe(struct wmi_device *wdev, const void *ctx) struct input_dev *input_dev; int err; + if (!dmi_check_system(allowed_chasis_types_dmi_table)) { + if (force) + dev_info(&wdev->dev, "Force loading Lenovo YMC support\n"); + else + return -ENODEV; + } + ec_trigger |= dmi_check_system(ec_trigger_quirk_dmi_table); priv = devm_kzalloc(&wdev->dev, sizeof(*priv), GFP_KERNEL); diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c index 67367f010139..7d33977d9c60 100644 --- a/drivers/platform/x86/mlx-platform.c +++ b/drivers/platform/x86/mlx-platform.c @@ -62,10 +62,6 @@ #define MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET 0x37 #define MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET 0x3a #define MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET 0x3b -#define MLXPLAT_CPLD_LPC_REG_DBG1_OFFSET 0x3c -#define MLXPLAT_CPLD_LPC_REG_DBG2_OFFSET 0x3d -#define MLXPLAT_CPLD_LPC_REG_DBG3_OFFSET 0x3e -#define MLXPLAT_CPLD_LPC_REG_DBG4_OFFSET 0x3f #define MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET 0x40 #define MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET 0x41 #define MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET 0x42 @@ -126,6 +122,10 @@ #define MLXPLAT_CPLD_LPC_REG_LC_SD_EVENT_OFFSET 0xaa #define MLXPLAT_CPLD_LPC_REG_LC_SD_MASK_OFFSET 0xab #define MLXPLAT_CPLD_LPC_REG_LC_PWR_ON 0xb2 +#define MLXPLAT_CPLD_LPC_REG_DBG1_OFFSET 0xb6 +#define MLXPLAT_CPLD_LPC_REG_DBG2_OFFSET 0xb7 +#define MLXPLAT_CPLD_LPC_REG_DBG3_OFFSET 0xb8 +#define MLXPLAT_CPLD_LPC_REG_DBG4_OFFSET 0xb9 #define MLXPLAT_CPLD_LPC_REG_GP4_RO_OFFSET 0xc2 #define MLXPLAT_CPLD_LPC_REG_SPI_CHNL_SELECT 0xc3 #define MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET 0xc7 @@ -222,7 +222,7 @@ MLXPLAT_CPLD_AGGR_MASK_LC_SDWN) #define MLXPLAT_CPLD_LOW_AGGR_MASK_LOW 0xc1 #define MLXPLAT_CPLD_LOW_AGGR_MASK_ASIC2 BIT(2) -#define MLXPLAT_CPLD_LOW_AGGR_MASK_PWR_BUT BIT(4) +#define MLXPLAT_CPLD_LOW_AGGR_MASK_PWR_BUT GENMASK(5, 4) #define MLXPLAT_CPLD_LOW_AGGR_MASK_I2C BIT(6) #define MLXPLAT_CPLD_PSU_MASK GENMASK(1, 0) #define MLXPLAT_CPLD_PWR_MASK GENMASK(1, 0) @@ -237,7 +237,7 @@ #define MLXPLAT_CPLD_GWP_MASK GENMASK(0, 0) #define MLXPLAT_CPLD_EROT_MASK GENMASK(1, 0) #define MLXPLAT_CPLD_PWR_BUTTON_MASK BIT(0) -#define MLXPLAT_CPLD_LATCH_RST_MASK BIT(5) +#define MLXPLAT_CPLD_LATCH_RST_MASK BIT(6) #define MLXPLAT_CPLD_THERMAL1_PDB_MASK BIT(3) #define MLXPLAT_CPLD_THERMAL2_PDB_MASK BIT(4) #define MLXPLAT_CPLD_INTRUSION_MASK BIT(6) @@ -2356,7 +2356,7 @@ mlxplat_mlxcpld_l1_switch_pwr_events_handler(void *handle, enum mlxreg_hotplug_k u8 action) { dev_info(&mlxplat_dev->dev, "System shutdown due to short press of power button"); - kernel_halt(); + kernel_power_off(); return 0; } @@ -2475,7 +2475,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_l1_switch_events_items[] = { .reg = MLXPLAT_CPLD_LPC_REG_PWRB_OFFSET, .mask = MLXPLAT_CPLD_PWR_BUTTON_MASK, .count = ARRAY_SIZE(mlxplat_mlxcpld_l1_switch_pwr_events_items_data), - .inversed = 0, + .inversed = 1, .health = false, }, { @@ -2484,7 +2484,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_l1_switch_events_items[] = { .reg = MLXPLAT_CPLD_LPC_REG_BRD_OFFSET, .mask = MLXPLAT_CPLD_L1_CHA_HEALTH_MASK, .count = ARRAY_SIZE(mlxplat_mlxcpld_l1_switch_health_events_items_data), - .inversed = 0, + .inversed = 1, .health = false, .ind = 8, }, @@ -3677,7 +3677,7 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = { { .label = "latch_reset", .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET, - .mask = GENMASK(7, 0) & ~BIT(5), + .mask = GENMASK(7, 0) & ~BIT(6), .mode = 0200, }, { @@ -6238,8 +6238,6 @@ static void mlxplat_i2c_mux_topolgy_exit(struct mlxplat_priv *priv) if (priv->pdev_mux[i]) platform_device_unregister(priv->pdev_mux[i]); } - - mlxplat_post_exit(); } static int mlxplat_i2c_main_complition_notify(void *handle, int id) @@ -6369,6 +6367,7 @@ static void __exit mlxplat_exit(void) pm_power_off = NULL; mlxplat_pre_exit(priv); mlxplat_i2c_main_exit(priv); + mlxplat_post_exit(); } module_exit(mlxplat_exit); diff --git a/drivers/platform/x86/msi-ec.c b/drivers/platform/x86/msi-ec.c index ff93986e3d35..f26a3121092f 100644 --- a/drivers/platform/x86/msi-ec.c +++ b/drivers/platform/x86/msi-ec.c @@ -27,15 +27,15 @@ #include <linux/seq_file.h> #include <linux/string.h> -static const char *const SM_ECO_NAME = "eco"; -static const char *const SM_COMFORT_NAME = "comfort"; -static const char *const SM_SPORT_NAME = "sport"; -static const char *const SM_TURBO_NAME = "turbo"; - -static const char *const FM_AUTO_NAME = "auto"; -static const char *const FM_SILENT_NAME = "silent"; -static const char *const FM_BASIC_NAME = "basic"; -static const char *const FM_ADVANCED_NAME = "advanced"; +#define SM_ECO_NAME "eco" +#define SM_COMFORT_NAME "comfort" +#define SM_SPORT_NAME "sport" +#define SM_TURBO_NAME "turbo" + +#define FM_AUTO_NAME "auto" +#define FM_SILENT_NAME "silent" +#define FM_BASIC_NAME "basic" +#define FM_ADVANCED_NAME "advanced" static const char * const ALLOWED_FW_0[] __initconst = { "14C1EMS1.012", diff --git a/drivers/platform/x86/serial-multi-instantiate.c b/drivers/platform/x86/serial-multi-instantiate.c index 2c2abf69f049..8158e3cf5d6d 100644 --- a/drivers/platform/x86/serial-multi-instantiate.c +++ b/drivers/platform/x86/serial-multi-instantiate.c @@ -329,6 +329,19 @@ static const struct smi_node cs35l41_hda = { .bus_type = SMI_AUTO_DETECT, }; +static const struct smi_node cs35l56_hda = { + .instances = { + { "cs35l56-hda", IRQ_RESOURCE_AUTO, 0 }, + { "cs35l56-hda", IRQ_RESOURCE_AUTO, 0 }, + { "cs35l56-hda", IRQ_RESOURCE_AUTO, 0 }, + { "cs35l56-hda", IRQ_RESOURCE_AUTO, 0 }, + /* a 5th entry is an alias address, not a real device */ + { "cs35l56-hda_dummy_dev" }, + {} + }, + .bus_type = SMI_AUTO_DETECT, +}; + /* * Note new device-ids must also be added to ignore_serial_bus_ids in * drivers/acpi/scan.c: acpi_device_enumeration_by_parent(). @@ -337,6 +350,7 @@ static const struct acpi_device_id smi_acpi_ids[] = { { "BSG1160", (unsigned long)&bsg1160_data }, { "BSG2150", (unsigned long)&bsg2150_data }, { "CSC3551", (unsigned long)&cs35l41_hda }, + { "CSC3556", (unsigned long)&cs35l56_hda }, { "INT3515", (unsigned long)&int3515_data }, /* Non-conforming _HID for Cirrus Logic already released */ { "CLSA0100", (unsigned long)&cs35l41_hda }, diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index 4e646e5e48f6..8fac57b28f8a 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -818,7 +818,7 @@ static int rapl_read_data_raw(struct rapl_domain *rd, return -EINVAL; ra.reg = rd->regs[rpi->id]; - if (!ra.reg) + if (!ra.reg.val) return -EINVAL; /* non-hardware data are collected by the polling thread */ @@ -830,7 +830,7 @@ static int rapl_read_data_raw(struct rapl_domain *rd, ra.mask = rpi->mask; if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) { - pr_debug("failed to read reg 0x%llx for %s:%s\n", ra.reg, rd->rp->name, rd->name); + pr_debug("failed to read reg 0x%llx for %s:%s\n", ra.reg.val, rd->rp->name, rd->name); return -EIO; } @@ -920,7 +920,7 @@ static int rapl_check_unit_core(struct rapl_domain *rd) ra.mask = ~0; if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) { pr_err("Failed to read power unit REG 0x%llx on %s:%s, exit.\n", - ra.reg, rd->rp->name, rd->name); + ra.reg.val, rd->rp->name, rd->name); return -ENODEV; } @@ -948,7 +948,7 @@ static int rapl_check_unit_atom(struct rapl_domain *rd) ra.mask = ~0; if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) { pr_err("Failed to read power unit REG 0x%llx on %s:%s, exit.\n", - ra.reg, rd->rp->name, rd->name); + ra.reg.val, rd->rp->name, rd->name); return -ENODEV; } @@ -1135,7 +1135,7 @@ static int rapl_check_unit_tpmi(struct rapl_domain *rd) ra.mask = ~0; if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) { pr_err("Failed to read power unit REG 0x%llx on %s:%s, exit.\n", - ra.reg, rd->rp->name, rd->name); + ra.reg.val, rd->rp->name, rd->name); return -ENODEV; } @@ -1411,8 +1411,8 @@ static int rapl_get_domain_unit(struct rapl_domain *rd) struct rapl_defaults *defaults = get_defaults(rd->rp); int ret; - if (!rd->regs[RAPL_DOMAIN_REG_UNIT]) { - if (!rd->rp->priv->reg_unit) { + if (!rd->regs[RAPL_DOMAIN_REG_UNIT].val) { + if (!rd->rp->priv->reg_unit.val) { pr_err("No valid Unit register found\n"); return -ENODEV; } diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c index 569e25eab1e1..dd471021f237 100644 --- a/drivers/powercap/intel_rapl_msr.c +++ b/drivers/powercap/intel_rapl_msr.c @@ -34,28 +34,32 @@ static struct rapl_if_priv *rapl_msr_priv; static struct rapl_if_priv rapl_msr_priv_intel = { .type = RAPL_IF_MSR, - .reg_unit = MSR_RAPL_POWER_UNIT, - .regs[RAPL_DOMAIN_PACKAGE] = { - MSR_PKG_POWER_LIMIT, MSR_PKG_ENERGY_STATUS, MSR_PKG_PERF_STATUS, 0, MSR_PKG_POWER_INFO }, - .regs[RAPL_DOMAIN_PP0] = { - MSR_PP0_POWER_LIMIT, MSR_PP0_ENERGY_STATUS, 0, MSR_PP0_POLICY, 0 }, - .regs[RAPL_DOMAIN_PP1] = { - MSR_PP1_POWER_LIMIT, MSR_PP1_ENERGY_STATUS, 0, MSR_PP1_POLICY, 0 }, - .regs[RAPL_DOMAIN_DRAM] = { - MSR_DRAM_POWER_LIMIT, MSR_DRAM_ENERGY_STATUS, MSR_DRAM_PERF_STATUS, 0, MSR_DRAM_POWER_INFO }, - .regs[RAPL_DOMAIN_PLATFORM] = { - MSR_PLATFORM_POWER_LIMIT, MSR_PLATFORM_ENERGY_STATUS, 0, 0, 0}, + .reg_unit.msr = MSR_RAPL_POWER_UNIT, + .regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_LIMIT].msr = MSR_PKG_POWER_LIMIT, + .regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_STATUS].msr = MSR_PKG_ENERGY_STATUS, + .regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_PERF].msr = MSR_PKG_PERF_STATUS, + .regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_INFO].msr = MSR_PKG_POWER_INFO, + .regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_LIMIT].msr = MSR_PP0_POWER_LIMIT, + .regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_STATUS].msr = MSR_PP0_ENERGY_STATUS, + .regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_POLICY].msr = MSR_PP0_POLICY, + .regs[RAPL_DOMAIN_PP1][RAPL_DOMAIN_REG_LIMIT].msr = MSR_PP1_POWER_LIMIT, + .regs[RAPL_DOMAIN_PP1][RAPL_DOMAIN_REG_STATUS].msr = MSR_PP1_ENERGY_STATUS, + .regs[RAPL_DOMAIN_PP1][RAPL_DOMAIN_REG_POLICY].msr = MSR_PP1_POLICY, + .regs[RAPL_DOMAIN_DRAM][RAPL_DOMAIN_REG_LIMIT].msr = MSR_DRAM_POWER_LIMIT, + .regs[RAPL_DOMAIN_DRAM][RAPL_DOMAIN_REG_STATUS].msr = MSR_DRAM_ENERGY_STATUS, + .regs[RAPL_DOMAIN_DRAM][RAPL_DOMAIN_REG_PERF].msr = MSR_DRAM_PERF_STATUS, + .regs[RAPL_DOMAIN_DRAM][RAPL_DOMAIN_REG_INFO].msr = MSR_DRAM_POWER_INFO, + .regs[RAPL_DOMAIN_PLATFORM][RAPL_DOMAIN_REG_LIMIT].msr = MSR_PLATFORM_POWER_LIMIT, + .regs[RAPL_DOMAIN_PLATFORM][RAPL_DOMAIN_REG_STATUS].msr = MSR_PLATFORM_ENERGY_STATUS, .limits[RAPL_DOMAIN_PACKAGE] = BIT(POWER_LIMIT2), .limits[RAPL_DOMAIN_PLATFORM] = BIT(POWER_LIMIT2), }; static struct rapl_if_priv rapl_msr_priv_amd = { .type = RAPL_IF_MSR, - .reg_unit = MSR_AMD_RAPL_POWER_UNIT, - .regs[RAPL_DOMAIN_PACKAGE] = { - 0, MSR_AMD_PKG_ENERGY_STATUS, 0, 0, 0 }, - .regs[RAPL_DOMAIN_PP0] = { - 0, MSR_AMD_CORE_ENERGY_STATUS, 0, 0, 0 }, + .reg_unit.msr = MSR_AMD_RAPL_POWER_UNIT, + .regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_STATUS].msr = MSR_AMD_PKG_ENERGY_STATUS, + .regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_STATUS].msr = MSR_AMD_CORE_ENERGY_STATUS, }; /* Handles CPU hotplug on multi-socket systems. @@ -99,10 +103,8 @@ static int rapl_cpu_down_prep(unsigned int cpu) static int rapl_msr_read_raw(int cpu, struct reg_action *ra) { - u32 msr = (u32)ra->reg; - - if (rdmsrl_safe_on_cpu(cpu, msr, &ra->value)) { - pr_debug("failed to read msr 0x%x on cpu %d\n", msr, cpu); + if (rdmsrl_safe_on_cpu(cpu, ra->reg.msr, &ra->value)) { + pr_debug("failed to read msr 0x%x on cpu %d\n", ra->reg.msr, cpu); return -EIO; } ra->value &= ra->mask; @@ -112,17 +114,16 @@ static int rapl_msr_read_raw(int cpu, struct reg_action *ra) static void rapl_msr_update_func(void *info) { struct reg_action *ra = info; - u32 msr = (u32)ra->reg; u64 val; - ra->err = rdmsrl_safe(msr, &val); + ra->err = rdmsrl_safe(ra->reg.msr, &val); if (ra->err) return; val &= ~ra->mask; val |= ra->value; - ra->err = wrmsrl_safe(msr, val); + ra->err = wrmsrl_safe(ra->reg.msr, val); } static int rapl_msr_write_raw(int cpu, struct reg_action *ra) @@ -171,7 +172,7 @@ static int rapl_msr_probe(struct platform_device *pdev) if (id) { rapl_msr_priv->limits[RAPL_DOMAIN_PACKAGE] |= BIT(POWER_LIMIT4); - rapl_msr_priv->regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_PL4] = + rapl_msr_priv->regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_PL4].msr = MSR_VR_CURRENT_CONFIG; pr_info("PL4 support detected.\n"); } diff --git a/drivers/powercap/intel_rapl_tpmi.c b/drivers/powercap/intel_rapl_tpmi.c index 4f4f13ded225..891c90fefd8b 100644 --- a/drivers/powercap/intel_rapl_tpmi.c +++ b/drivers/powercap/intel_rapl_tpmi.c @@ -59,10 +59,10 @@ static struct powercap_control_type *tpmi_control_type; static int tpmi_rapl_read_raw(int id, struct reg_action *ra) { - if (!ra->reg) + if (!ra->reg.mmio) return -EINVAL; - ra->value = readq((void __iomem *)ra->reg); + ra->value = readq(ra->reg.mmio); ra->value &= ra->mask; return 0; @@ -72,15 +72,15 @@ static int tpmi_rapl_write_raw(int id, struct reg_action *ra) { u64 val; - if (!ra->reg) + if (!ra->reg.mmio) return -EINVAL; - val = readq((void __iomem *)ra->reg); + val = readq(ra->reg.mmio); val &= ~ra->mask; val |= ra->value; - writeq(val, (void __iomem *)ra->reg); + writeq(val, ra->reg.mmio); return 0; } @@ -138,8 +138,7 @@ static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset) enum tpmi_rapl_register reg_index; enum rapl_domain_reg_id reg_id; int tpmi_domain_size, tpmi_domain_flags; - u64 *tpmi_rapl_regs = trp->base + offset; - u64 tpmi_domain_header = readq((void __iomem *)tpmi_rapl_regs); + u64 tpmi_domain_header = readq(trp->base + offset); /* Domain Parent bits are ignored for now */ tpmi_domain_version = tpmi_domain_header & 0xff; @@ -180,7 +179,7 @@ static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset) return -EINVAL; } - if (trp->priv.regs[domain_type][RAPL_DOMAIN_REG_UNIT]) { + if (trp->priv.regs[domain_type][RAPL_DOMAIN_REG_UNIT].mmio) { pr_warn(FW_BUG "Duplicate Domain type %d\n", tpmi_domain_type); return -EINVAL; } @@ -218,7 +217,7 @@ static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset) default: continue; } - trp->priv.regs[domain_type][reg_id] = (u64)&tpmi_rapl_regs[reg_index]; + trp->priv.regs[domain_type][reg_id].mmio = trp->base + offset + reg_index * 8; } return 0; diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 1d195429753d..613eab729704 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -716,7 +716,6 @@ struct qeth_card_info { u16 chid; u8 ids_valid:1; /* cssid,iid,chid */ u8 dev_addr_is_registered:1; - u8 open_when_online:1; u8 promisc_mode:1; u8 use_v1_blkt:1; u8 is_vm_nic:1; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 1d5b207c2b9e..cd783290bde5 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -5373,8 +5373,6 @@ int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc, qeth_clear_ipacmd_list(card); rtnl_lock(); - card->info.open_when_online = card->dev->flags & IFF_UP; - dev_close(card->dev); netif_device_detach(card->dev); netif_carrier_off(card->dev); rtnl_unlock(); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 9f13ed170a43..75910c0bcc2b 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -2388,9 +2388,12 @@ static int qeth_l2_set_online(struct qeth_card *card, bool carrier_ok) qeth_enable_hw_features(dev); qeth_l2_enable_brport_features(card); - if (card->info.open_when_online) { - card->info.open_when_online = 0; - dev_open(dev, NULL); + if (netif_running(dev)) { + local_bh_disable(); + napi_schedule(&card->napi); + /* kick-start the NAPI softirq: */ + local_bh_enable(); + qeth_l2_set_rx_mode(dev); } rtnl_unlock(); } diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index af4e60d2917e..b92a32b4b114 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2018,9 +2018,11 @@ static int qeth_l3_set_online(struct qeth_card *card, bool carrier_ok) netif_device_attach(dev); qeth_enable_hw_features(dev); - if (card->info.open_when_online) { - card->info.open_when_online = 0; - dev_open(dev, NULL); + if (netif_running(dev)) { + local_bh_disable(); + napi_schedule(&card->napi); + /* kick-start the NAPI softirq: */ + local_bh_enable(); } rtnl_unlock(); } diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index f21307537829..4f0d0e55f0d4 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -534,8 +534,7 @@ static void zfcp_fc_adisc_handler(void *data) /* re-init to undo drop from zfcp_fc_adisc() */ port->d_id = ntoh24(adisc_resp->adisc_port_id); - /* port is good, unblock rport without going through erp */ - zfcp_scsi_schedule_rport_register(port); + /* port is still good, nothing to do */ out: atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status); put_device(&port->dev); @@ -595,9 +594,6 @@ void zfcp_fc_link_test_work(struct work_struct *work) int retval; set_worker_desc("zadisc%16llx", port->wwpn); /* < WORKER_DESC_LEN=24 */ - get_device(&port->dev); - port->rport_task = RPORT_DEL; - zfcp_scsi_rport_work(&port->rport_work); /* only issue one test command at one time per port */ if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST) diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index e1e4f9d10887..857be0f3ae5b 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c @@ -1598,7 +1598,7 @@ NCR_700_intr(int irq, void *dev_id) printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG))); #endif resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch; - } else if(dsp >= to32bit(&slot->pSG[0].ins) && + } else if (slot && dsp >= to32bit(&slot->pSG[0].ins) && dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) { int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff; int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List); diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index d82de34f6fd7..e51e92f932fa 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -27,7 +27,7 @@ #define DRV_NAME "fnic" #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" -#define DRV_VERSION "1.6.0.54" +#define DRV_VERSION "1.6.0.55" #define PFX DRV_NAME ": " #define DFX DRV_NAME "%d: " diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 26dbd347156e..be89ce96df46 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -2139,7 +2139,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, bool new_sc) { - int ret = SUCCESS; + int ret = 0; struct fnic_pending_aborts_iter_data iter_data = { .fnic = fnic, .lun_dev = lr_sc->device, @@ -2159,9 +2159,11 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, /* walk again to check, if IOs are still pending in fw */ if (fnic_is_abts_pending(fnic, lr_sc)) - ret = FAILED; + ret = 1; clean_pending_aborts_end: + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + "%s: exit status: %d\n", __func__, ret); return ret; } diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index a62e091894f6..d26941b131fd 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -109,8 +109,6 @@ lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba, } } -#define LPFC_INVALID_REFTAG ((u32)-1) - /** * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread * @phba: The Hba for which this call is being executed. @@ -978,8 +976,6 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, sgpe = scsi_prot_sglist(sc); lba = scsi_prot_ref_tag(sc); - if (lba == LPFC_INVALID_REFTAG) - return 0; /* First check if we need to match the LBA */ if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) { @@ -1560,8 +1556,6 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, /* extract some info from the scsi command for pde*/ reftag = scsi_prot_ref_tag(sc); - if (reftag == LPFC_INVALID_REFTAG) - goto out; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); @@ -1723,8 +1717,6 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, /* extract some info from the scsi command */ blksize = scsi_prot_interval(sc); reftag = scsi_prot_ref_tag(sc); - if (reftag == LPFC_INVALID_REFTAG) - goto out; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); @@ -1953,8 +1945,6 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, /* extract some info from the scsi command for pde*/ reftag = scsi_prot_ref_tag(sc); - if (reftag == LPFC_INVALID_REFTAG) - goto out; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); @@ -2154,8 +2144,6 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, /* extract some info from the scsi command */ blksize = scsi_prot_interval(sc); reftag = scsi_prot_ref_tag(sc); - if (reftag == LPFC_INVALID_REFTAG) - goto out; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); @@ -2746,8 +2734,6 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) src = (struct scsi_dif_tuple *)sg_virt(sgpe); start_ref_tag = scsi_prot_ref_tag(cmd); - if (start_ref_tag == LPFC_INVALID_REFTAG) - goto out; start_app_tag = src->app_tag; len = sgpe->length; while (src && protsegcnt) { @@ -3493,11 +3479,11 @@ err: scsi_cmnd->sc_data_direction); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "9084 Cannot setup S/G List for HBA" - "IO segs %d/%d SGL %d SCSI %d: %d %d\n", + "9084 Cannot setup S/G List for HBA " + "IO segs %d/%d SGL %d SCSI %d: %d %d %d\n", lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, - prot_group_type, num_sge); + prot_group_type, num_sge, ret); lpfc_cmd->seg_cnt = 0; lpfc_cmd->prot_seg_cnt = 0; diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 2e886c1d867d..4995e1ef4e0e 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -1181,7 +1181,8 @@ static int pm8001_pci_probe(struct pci_dev *pdev, pm80xx_set_thermal_config(pm8001_ha); } - if (pm8001_init_sas_add(pm8001_ha)) + rc = pm8001_init_sas_add(pm8001_ha); + if (rc) goto err_out_shost; /* phy setting support for motherboard controller */ rc = pm8001_configure_phy_settings(pm8001_ha); diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 2a31ddc99dde..7825765c936c 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -31,6 +31,7 @@ static void qedf_remove(struct pci_dev *pdev); static void qedf_shutdown(struct pci_dev *pdev); static void qedf_schedule_recovery_handler(void *dev); static void qedf_recovery_handler(struct work_struct *work); +static int qedf_suspend(struct pci_dev *pdev, pm_message_t state); /* * Driver module parameters. @@ -3271,6 +3272,7 @@ static struct pci_driver qedf_pci_driver = { .probe = qedf_probe, .remove = qedf_remove, .shutdown = qedf_shutdown, + .suspend = qedf_suspend, }; static int __qedf_probe(struct pci_dev *pdev, int mode) @@ -4000,6 +4002,22 @@ static void qedf_shutdown(struct pci_dev *pdev) __qedf_remove(pdev, QEDF_MODE_NORMAL); } +static int qedf_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct qedf_ctx *qedf; + + if (!pdev) { + QEDF_ERR(NULL, "pdev is NULL.\n"); + return -ENODEV; + } + + qedf = pci_get_drvdata(pdev); + + QEDF_ERR(&qedf->dbg_ctx, "%s: Device does not support suspend operation\n", __func__); + + return -EPERM; +} + /* * Recovery handler code */ diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index 450522b204d6..cd0180b1f5b9 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -69,6 +69,7 @@ static struct nvm_iscsi_block *qedi_get_nvram_block(struct qedi_ctx *qedi); static void qedi_recovery_handler(struct work_struct *work); static void qedi_schedule_hw_err_handler(void *dev, enum qed_hw_err_type err_type); +static int qedi_suspend(struct pci_dev *pdev, pm_message_t state); static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle) { @@ -1976,8 +1977,9 @@ static int qedi_cpu_offline(unsigned int cpu) struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu); struct qedi_work *work, *tmp; struct task_struct *thread; + unsigned long flags; - spin_lock_bh(&p->p_work_lock); + spin_lock_irqsave(&p->p_work_lock, flags); thread = p->iothread; p->iothread = NULL; @@ -1988,7 +1990,7 @@ static int qedi_cpu_offline(unsigned int cpu) kfree(work); } - spin_unlock_bh(&p->p_work_lock); + spin_unlock_irqrestore(&p->p_work_lock, flags); if (thread) kthread_stop(thread); return 0; @@ -2510,6 +2512,22 @@ static void qedi_shutdown(struct pci_dev *pdev) __qedi_remove(pdev, QEDI_MODE_SHUTDOWN); } +static int qedi_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct qedi_ctx *qedi; + + if (!pdev) { + QEDI_ERR(NULL, "pdev is NULL.\n"); + return -ENODEV; + } + + qedi = pci_get_drvdata(pdev); + + QEDI_ERR(&qedi->dbg_ctx, "%s: Device does not support suspend operation\n", __func__); + + return -EPERM; +} + static int __qedi_probe(struct pci_dev *pdev, int mode) { struct qedi_ctx *qedi; @@ -2868,6 +2886,7 @@ static struct pci_driver qedi_pci_driver = { .remove = qedi_remove, .shutdown = qedi_shutdown, .err_handler = &qedi_err_handler, + .suspend = qedi_suspend, }; static int __init qedi_init(void) diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c index 898a0bdf8df6..711252e52d8e 100644 --- a/drivers/scsi/raid_class.c +++ b/drivers/scsi/raid_class.c @@ -248,6 +248,7 @@ int raid_component_add(struct raid_template *r,struct device *raid_dev, return 0; err_out: + put_device(&rc->dev); list_del(&rc->node); rd->component_count--; put_device(component_dev); diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c index 4a6eb1741be0..41f23cd0bfb4 100644 --- a/drivers/scsi/scsi_proc.c +++ b/drivers/scsi/scsi_proc.c @@ -406,7 +406,7 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, size_t length, loff_t *ppos) { int host, channel, id, lun; - char *buffer, *p; + char *buffer, *end, *p; int err; if (!buf || length > PAGE_SIZE) @@ -421,10 +421,14 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, goto out; err = -EINVAL; - if (length < PAGE_SIZE) - buffer[length] = '\0'; - else if (buffer[PAGE_SIZE-1]) - goto out; + if (length < PAGE_SIZE) { + end = buffer + length; + *end = '\0'; + } else { + end = buffer + PAGE_SIZE - 1; + if (*end) + goto out; + } /* * Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi @@ -433,10 +437,10 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, if (!strncmp("scsi add-single-device", buffer, 22)) { p = buffer + 23; - host = simple_strtoul(p, &p, 0); - channel = simple_strtoul(p + 1, &p, 0); - id = simple_strtoul(p + 1, &p, 0); - lun = simple_strtoul(p + 1, &p, 0); + host = (p < end) ? simple_strtoul(p, &p, 0) : 0; + channel = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + id = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + lun = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; err = scsi_add_single_device(host, channel, id, lun); @@ -447,10 +451,10 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, } else if (!strncmp("scsi remove-single-device", buffer, 25)) { p = buffer + 26; - host = simple_strtoul(p, &p, 0); - channel = simple_strtoul(p + 1, &p, 0); - id = simple_strtoul(p + 1, &p, 0); - lun = simple_strtoul(p + 1, &p, 0); + host = (p < end) ? simple_strtoul(p, &p, 0) : 0; + channel = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + id = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + lun = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; err = scsi_remove_single_device(host, channel, id, lun); } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 68b12afa0721..3c668cfb146d 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -3876,7 +3876,7 @@ static int sd_suspend_runtime(struct device *dev) static int sd_resume(struct device *dev) { struct scsi_disk *sdkp = dev_get_drvdata(dev); - int ret; + int ret = 0; if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ return 0; @@ -3884,8 +3884,11 @@ static int sd_resume(struct device *dev) if (!sdkp->device->manage_start_stop) return 0; - sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); - ret = sd_start_stop_device(sdkp, 1); + if (!sdkp->device->no_start_on_resume) { + sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); + ret = sd_start_stop_device(sdkp, 1); + } + if (!ret) opal_unlock_from_suspend(sdkp->opal_dev); return ret; diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c index 3e2e5783924d..e429ad23c396 100644 --- a/drivers/scsi/snic/snic_disc.c +++ b/drivers/scsi/snic/snic_disc.c @@ -303,6 +303,7 @@ snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid) "Snic Tgt: device_add, with err = %d\n", ret); + put_device(&tgt->dev); put_device(&snic->shost->shost_gendev); spin_lock_irqsave(snic->shost->host_lock, flags); list_del(&tgt->list); diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 7f12d931fe7c..047ffaf7d42a 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -366,6 +366,7 @@ static void storvsc_on_channel_callback(void *context); #define STORVSC_FC_MAX_LUNS_PER_TARGET 255 #define STORVSC_FC_MAX_TARGETS 128 #define STORVSC_FC_MAX_CHANNELS 8 +#define STORVSC_FC_MAX_XFER_SIZE ((u32)(512 * 1024)) #define STORVSC_IDE_MAX_LUNS_PER_TARGET 64 #define STORVSC_IDE_MAX_TARGETS 1 @@ -1673,10 +1674,6 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd) */ static enum scsi_timeout_action storvsc_eh_timed_out(struct scsi_cmnd *scmnd) { -#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) - if (scmnd->device->host->transportt == fc_transport_template) - return fc_eh_timed_out(scmnd); -#endif return SCSI_EH_RESET_TIMER; } @@ -2006,6 +2003,9 @@ static int storvsc_probe(struct hv_device *device, * protecting it from any weird value. */ max_xfer_bytes = round_down(stor_device->max_transfer_bytes, HV_HYP_PAGE_SIZE); + if (is_fc) + max_xfer_bytes = min(max_xfer_bytes, STORVSC_FC_MAX_XFER_SIZE); + /* max_hw_sectors_kb */ host->max_sectors = max_xfer_bytes >> 9; /* diff --git a/drivers/soc/imx/imx8mp-blk-ctrl.c b/drivers/soc/imx/imx8mp-blk-ctrl.c index 870aecc0202a..1c1fcab4979a 100644 --- a/drivers/soc/imx/imx8mp-blk-ctrl.c +++ b/drivers/soc/imx/imx8mp-blk-ctrl.c @@ -164,7 +164,7 @@ static int imx8mp_hsio_blk_ctrl_probe(struct imx8mp_blk_ctrl *bc) clk_hsio_pll->hw.init = &init; hw = &clk_hsio_pll->hw; - ret = devm_clk_hw_register(bc->dev, hw); + ret = devm_clk_hw_register(bc->bus_power_dev, hw); if (ret) return ret; diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c index 013f1633f082..2f00fc3bf274 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c @@ -57,10 +57,10 @@ static int rapl_mmio_cpu_down_prep(unsigned int cpu) static int rapl_mmio_read_raw(int cpu, struct reg_action *ra) { - if (!ra->reg) + if (!ra->reg.mmio) return -EINVAL; - ra->value = readq((void __iomem *)ra->reg); + ra->value = readq(ra->reg.mmio); ra->value &= ra->mask; return 0; } @@ -69,13 +69,13 @@ static int rapl_mmio_write_raw(int cpu, struct reg_action *ra) { u64 val; - if (!ra->reg) + if (!ra->reg.mmio) return -EINVAL; - val = readq((void __iomem *)ra->reg); + val = readq(ra->reg.mmio); val &= ~ra->mask; val |= ra->value; - writeq(val, (void __iomem *)ra->reg); + writeq(val, ra->reg.mmio); return 0; } @@ -92,13 +92,13 @@ int proc_thermal_rapl_add(struct pci_dev *pdev, struct proc_thermal_device *proc for (domain = RAPL_DOMAIN_PACKAGE; domain < RAPL_DOMAIN_MAX; domain++) { for (reg = RAPL_DOMAIN_REG_LIMIT; reg < RAPL_DOMAIN_REG_MAX; reg++) if (rapl_regs->regs[domain][reg]) - rapl_mmio_priv.regs[domain][reg] = - (u64)proc_priv->mmio_base + + rapl_mmio_priv.regs[domain][reg].mmio = + proc_priv->mmio_base + rapl_regs->regs[domain][reg]; rapl_mmio_priv.limits[domain] = rapl_regs->limits[domain]; } rapl_mmio_priv.type = RAPL_IF_MMIO; - rapl_mmio_priv.reg_unit = (u64)proc_priv->mmio_base + rapl_regs->reg_unit; + rapl_mmio_priv.reg_unit.mmio = proc_priv->mmio_base + rapl_regs->reg_unit; rapl_mmio_priv.read_raw = rapl_mmio_read_raw; rapl_mmio_priv.write_raw = rapl_mmio_write_raw; diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index ffbae134589b..dd0a1ef8cf12 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -1964,6 +1964,8 @@ unlock: pm_runtime_mark_last_busy(&tb->dev); pm_runtime_put_autosuspend(&tb->dev); + + kfree(ev); } static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port) diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c index c789024d7ffe..747f88703d5c 100644 --- a/drivers/thunderbolt/tmu.c +++ b/drivers/thunderbolt/tmu.c @@ -579,7 +579,9 @@ int tb_switch_tmu_disable(struct tb_switch *sw) * uni-directional mode and we don't want to change it's TMU * mode. */ - tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]); + ret = tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]); + if (ret) + return ret; tb_port_tmu_time_sync_disable(up); ret = tb_port_tmu_time_sync_disable(down); diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c index f8a5e79ed3b4..ab0652d8705a 100644 --- a/drivers/ufs/host/ufs-renesas.c +++ b/drivers/ufs/host/ufs-renesas.c @@ -359,7 +359,7 @@ static int ufs_renesas_init(struct ufs_hba *hba) { struct ufs_renesas_priv *priv; - priv = devm_kmalloc(hba->dev, sizeof(*priv), GFP_KERNEL); + priv = devm_kzalloc(hba->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; ufshcd_set_variant(hba, priv); diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c index aa0111b365bb..11a5b3437c32 100644 --- a/drivers/usb/cdns3/cdns3-gadget.c +++ b/drivers/usb/cdns3/cdns3-gadget.c @@ -61,6 +61,7 @@ #include <linux/module.h> #include <linux/dmapool.h> #include <linux/iopoll.h> +#include <linux/property.h> #include "core.h" #include "gadget-export.h" diff --git a/drivers/usb/cdns3/cdns3-plat.c b/drivers/usb/cdns3/cdns3-plat.c index 884e2301237f..2c1aca84f226 100644 --- a/drivers/usb/cdns3/cdns3-plat.c +++ b/drivers/usb/cdns3/cdns3-plat.c @@ -15,6 +15,7 @@ #include <linux/module.h> #include <linux/irq.h> #include <linux/kernel.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> @@ -255,9 +256,10 @@ static int cdns3_controller_resume(struct device *dev, pm_message_t msg) cdns3_set_platform_suspend(cdns->dev, false, false); spin_lock_irqsave(&cdns->lock, flags); - cdns_resume(cdns, !PMSG_IS_AUTO(msg)); + cdns_resume(cdns); cdns->in_lpm = false; spin_unlock_irqrestore(&cdns->lock, flags); + cdns_set_active(cdns, !PMSG_IS_AUTO(msg)); if (cdns->wakeup_pending) { cdns->wakeup_pending = false; enable_irq(cdns->wakeup_irq); diff --git a/drivers/usb/cdns3/cdns3-starfive.c b/drivers/usb/cdns3/cdns3-starfive.c index fc1f003b145d..a7265b86e427 100644 --- a/drivers/usb/cdns3/cdns3-starfive.c +++ b/drivers/usb/cdns3/cdns3-starfive.c @@ -166,7 +166,7 @@ static int cdns_starfive_remove_core(struct device *dev, void *c) return 0; } -static int cdns_starfive_remove(struct platform_device *pdev) +static void cdns_starfive_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct cdns_starfive *data = dev_get_drvdata(dev); @@ -178,8 +178,6 @@ static int cdns_starfive_remove(struct platform_device *pdev) pm_runtime_put_noidle(dev); cdns_clk_rst_deinit(data); platform_set_drvdata(pdev, NULL); - - return 0; } #ifdef CONFIG_PM @@ -232,7 +230,7 @@ MODULE_DEVICE_TABLE(of, cdns_starfive_of_match); static struct platform_driver cdns_starfive_driver = { .probe = cdns_starfive_probe, - .remove = cdns_starfive_remove, + .remove_new = cdns_starfive_remove, .driver = { .name = "cdns3-starfive", .of_match_table = cdns_starfive_of_match, diff --git a/drivers/usb/cdns3/cdns3-ti.c b/drivers/usb/cdns3/cdns3-ti.c index 81b9132e3aaa..5945c4b1e11f 100644 --- a/drivers/usb/cdns3/cdns3-ti.c +++ b/drivers/usb/cdns3/cdns3-ti.c @@ -15,6 +15,7 @@ #include <linux/io.h> #include <linux/of_platform.h> #include <linux/pm_runtime.h> +#include <linux/property.h> /* USB Wrapper register offsets */ #define USBSS_PID 0x0 diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c index 7b151f5af3cc..0725668ffea4 100644 --- a/drivers/usb/cdns3/cdnsp-pci.c +++ b/drivers/usb/cdns3/cdnsp-pci.c @@ -208,8 +208,9 @@ static int __maybe_unused cdnsp_pci_resume(struct device *dev) int ret; spin_lock_irqsave(&cdns->lock, flags); - ret = cdns_resume(cdns, 1); + ret = cdns_resume(cdns); spin_unlock_irqrestore(&cdns->lock, flags); + cdns_set_active(cdns, 1); return ret; } diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c index dbcdf3b24b47..33548771a0d3 100644 --- a/drivers/usb/cdns3/core.c +++ b/drivers/usb/cdns3/core.c @@ -14,6 +14,7 @@ #include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/kernel.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/io.h> @@ -522,9 +523,8 @@ int cdns_suspend(struct cdns *cdns) } EXPORT_SYMBOL_GPL(cdns_suspend); -int cdns_resume(struct cdns *cdns, u8 set_active) +int cdns_resume(struct cdns *cdns) { - struct device *dev = cdns->dev; enum usb_role real_role; bool role_changed = false; int ret = 0; @@ -556,15 +556,23 @@ int cdns_resume(struct cdns *cdns, u8 set_active) if (cdns->roles[cdns->role]->resume) cdns->roles[cdns->role]->resume(cdns, cdns_power_is_lost(cdns)); + return 0; +} +EXPORT_SYMBOL_GPL(cdns_resume); + +void cdns_set_active(struct cdns *cdns, u8 set_active) +{ + struct device *dev = cdns->dev; + if (set_active) { pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); } - return 0; + return; } -EXPORT_SYMBOL_GPL(cdns_resume); +EXPORT_SYMBOL_GPL(cdns_set_active); #endif /* CONFIG_PM_SLEEP */ MODULE_AUTHOR("Peter Chen <[email protected]>"); diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h index 2d332a788871..4a4dbc2c1561 100644 --- a/drivers/usb/cdns3/core.h +++ b/drivers/usb/cdns3/core.h @@ -125,10 +125,13 @@ int cdns_init(struct cdns *cdns); int cdns_remove(struct cdns *cdns); #ifdef CONFIG_PM_SLEEP -int cdns_resume(struct cdns *cdns, u8 set_active); +int cdns_resume(struct cdns *cdns); int cdns_suspend(struct cdns *cdns); +void cdns_set_active(struct cdns *cdns, u8 set_active); #else /* CONFIG_PM_SLEEP */ -static inline int cdns_resume(struct cdns *cdns, u8 set_active) +static inline int cdns_resume(struct cdns *cdns) +{ return 0; } +static inline int cdns_set_active(struct cdns *cdns, u8 set_active) { return 0; } static inline int cdns_suspend(struct cdns *cdns) { return 0; } diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c index d00ff98dffab..04b6d12f2b9a 100644 --- a/drivers/usb/cdns3/drd.c +++ b/drivers/usb/cdns3/drd.c @@ -196,6 +196,7 @@ int cdns_drd_host_on(struct cdns *cdns) if (ret) dev_err(cdns->dev, "timeout waiting for xhci_ready\n"); + phy_set_mode(cdns->usb2_phy, PHY_MODE_USB_HOST); phy_set_mode(cdns->usb3_phy, PHY_MODE_USB_HOST); return ret; } @@ -216,6 +217,7 @@ void cdns_drd_host_off(struct cdns *cdns) readl_poll_timeout_atomic(&cdns->otg_regs->state, val, !(val & OTGSTATE_HOST_STATE_MASK), 1, 2000000); + phy_set_mode(cdns->usb2_phy, PHY_MODE_INVALID); phy_set_mode(cdns->usb3_phy, PHY_MODE_INVALID); } @@ -248,6 +250,7 @@ int cdns_drd_gadget_on(struct cdns *cdns) return ret; } + phy_set_mode(cdns->usb2_phy, PHY_MODE_USB_DEVICE); phy_set_mode(cdns->usb3_phy, PHY_MODE_USB_DEVICE); return 0; } @@ -273,6 +276,7 @@ void cdns_drd_gadget_off(struct cdns *cdns) readl_poll_timeout_atomic(&cdns->otg_regs->state, val, !(val & OTGSTATE_DEV_STATE_MASK), 1, 2000000); + phy_set_mode(cdns->usb2_phy, PHY_MODE_INVALID); phy_set_mode(cdns->usb3_phy, PHY_MODE_INVALID); } EXPORT_SYMBOL_GPL(cdns_drd_gadget_off); diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h index f210b7489fd5..d9bb3d3f026e 100644 --- a/drivers/usb/chipidea/ci.h +++ b/drivers/usb/chipidea/ci.h @@ -257,6 +257,7 @@ struct ci_hdrc { bool id_event; bool b_sess_valid_event; bool imx28_write_fix; + bool has_portsc_pec_bug; bool supports_runtime_pm; bool in_lpm; bool wakeup_int; @@ -281,8 +282,19 @@ static inline int ci_role_start(struct ci_hdrc *ci, enum ci_role role) return -ENXIO; ret = ci->roles[role]->start(ci); - if (!ret) - ci->role = role; + if (ret) + return ret; + + ci->role = role; + + if (ci->usb_phy) { + if (role == CI_ROLE_HOST) + usb_phy_set_event(ci->usb_phy, USB_EVENT_ID); + else + /* in device mode but vbus is invalid*/ + usb_phy_set_event(ci->usb_phy, USB_EVENT_NONE); + } + return ret; } @@ -296,6 +308,9 @@ static inline void ci_role_stop(struct ci_hdrc *ci) ci->role = CI_ROLE_END; ci->roles[role]->stop(ci); + + if (ci->usb_phy) + usb_phy_set_event(ci->usb_phy, USB_EVENT_NONE); } static inline enum usb_role ci_role_to_usb_role(struct ci_hdrc *ci) diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index 336ef6dd8e7d..e28bb2f2612d 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c @@ -6,6 +6,7 @@ */ #include <linux/module.h> +#include <linux/of.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> @@ -67,11 +68,13 @@ static const struct ci_hdrc_imx_platform_flag imx7d_usb_data = { static const struct ci_hdrc_imx_platform_flag imx7ulp_usb_data = { .flags = CI_HDRC_SUPPORTS_RUNTIME_PM | + CI_HDRC_HAS_PORTSC_PEC_MISSED | CI_HDRC_PMQOS, }; static const struct ci_hdrc_imx_platform_flag imx8ulp_usb_data = { - .flags = CI_HDRC_SUPPORTS_RUNTIME_PM, + .flags = CI_HDRC_SUPPORTS_RUNTIME_PM | + CI_HDRC_HAS_PORTSC_PEC_MISSED, }; static const struct of_device_id ci_hdrc_imx_dt_ids[] = { @@ -175,10 +178,15 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev) if (of_usb_get_phy_mode(np) == USBPHY_INTERFACE_MODE_ULPI) data->ulpi = 1; - of_property_read_u32(np, "samsung,picophy-pre-emp-curr-control", - &data->emp_curr_control); - of_property_read_u32(np, "samsung,picophy-dc-vol-level-adjust", - &data->dc_vol_level_adjust); + if (of_property_read_u32(np, "samsung,picophy-pre-emp-curr-control", + &data->emp_curr_control)) + data->emp_curr_control = -1; + if (of_property_read_u32(np, "samsung,picophy-dc-vol-level-adjust", + &data->dc_vol_level_adjust)) + data->dc_vol_level_adjust = -1; + if (of_property_read_u32(np, "fsl,picophy-rise-fall-time-adjust", + &data->rise_fall_time_adjust)) + data->rise_fall_time_adjust = -1; return data; } diff --git a/drivers/usb/chipidea/ci_hdrc_imx.h b/drivers/usb/chipidea/ci_hdrc_imx.h index 7135b9a5d913..88b8da79d518 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.h +++ b/drivers/usb/chipidea/ci_hdrc_imx.h @@ -28,6 +28,7 @@ struct imx_usbmisc_data { enum usb_dr_mode available_role; /* runtime usb dr mode */ int emp_curr_control; int dc_vol_level_adjust; + int rise_fall_time_adjust; }; int imx_usbmisc_init(struct imx_usbmisc_data *data); diff --git a/drivers/usb/chipidea/ci_hdrc_tegra.c b/drivers/usb/chipidea/ci_hdrc_tegra.c index ca36d11a69ea..8e78bf643e25 100644 --- a/drivers/usb/chipidea/ci_hdrc_tegra.c +++ b/drivers/usb/chipidea/ci_hdrc_tegra.c @@ -6,7 +6,8 @@ #include <linux/clk.h> #include <linux/io.h> #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 51994d655b82..7ac39a281b8c 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -1028,8 +1028,7 @@ static int ci_hdrc_probe(struct platform_device *pdev) return -ENODEV; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(dev, res); + base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(base)) return PTR_ERR(base); @@ -1045,6 +1044,8 @@ static int ci_hdrc_probe(struct platform_device *pdev) CI_HDRC_IMX28_WRITE_FIX); ci->supports_runtime_pm = !!(ci->platdata->flags & CI_HDRC_SUPPORTS_RUNTIME_PM); + ci->has_portsc_pec_bug = !!(ci->platdata->flags & + CI_HDRC_HAS_PORTSC_PEC_MISSED); platform_set_drvdata(pdev, ci); ret = hw_device_init(ci, base); diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c index ebe7400243b1..08af26b762a2 100644 --- a/drivers/usb/chipidea/host.c +++ b/drivers/usb/chipidea/host.c @@ -151,6 +151,7 @@ static int host_start(struct ci_hdrc *ci) ehci->has_hostpc = ci->hw_bank.lpm; ehci->has_tdi_phy_lpm = ci->hw_bank.lpm; ehci->imx28_write_fix = ci->imx28_write_fix; + ehci->has_ci_pec_bug = ci->has_portsc_pec_bug; priv = (struct ehci_ci_priv *)ehci->priv; priv->reg_vbus = NULL; diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index 54c09245ad05..0b7bd3c643c3 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -1463,7 +1463,7 @@ static int ep_disable(struct usb_ep *ep) */ static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) { - struct ci_hw_req *hwreq = NULL; + struct ci_hw_req *hwreq; if (ep == NULL) return NULL; @@ -1718,6 +1718,13 @@ static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active) ret = ci->platdata->notify_event(ci, CI_HDRC_CONTROLLER_VBUS_EVENT); + if (ci->usb_phy) { + if (is_active) + usb_phy_set_event(ci->usb_phy, USB_EVENT_VBUS); + else + usb_phy_set_event(ci->usb_phy, USB_EVENT_NONE); + } + if (ci->driver) ci_hdrc_gadget_connect(_gadget, is_active); @@ -2034,6 +2041,9 @@ static irqreturn_t udc_irq(struct ci_hdrc *ci) if (USBi_PCI & intr) { ci->gadget.speed = hw_port_is_high_speed(ci) ? USB_SPEED_HIGH : USB_SPEED_FULL; + if (ci->usb_phy) + usb_phy_set_event(ci->usb_phy, + USB_EVENT_ENUMERATED); if (ci->suspended) { if (ci->driver->resume) { spin_unlock(&ci->lock); diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c index 9ee9621e2ccc..173c78afd502 100644 --- a/drivers/usb/chipidea/usbmisc_imx.c +++ b/drivers/usb/chipidea/usbmisc_imx.c @@ -4,10 +4,11 @@ */ #include <linux/module.h> -#include <linux/of_platform.h> +#include <linux/of.h> #include <linux/err.h> #include <linux/io.h> #include <linux/delay.h> +#include <linux/platform_device.h> #include <linux/usb/otg.h> #include "ci_hdrc_imx.h" @@ -130,6 +131,8 @@ #define MX7D_USB_OTG_PHY_CFG1 0x30 #define TXPREEMPAMPTUNE0_BIT 28 #define TXPREEMPAMPTUNE0_MASK (3 << 28) +#define TXRISETUNE0_BIT 24 +#define TXRISETUNE0_MASK (3 << 24) #define TXVREFTUNE0_BIT 20 #define TXVREFTUNE0_MASK (0xf << 20) @@ -659,18 +662,27 @@ static int usbmisc_imx7d_init(struct imx_usbmisc_data *data) usbmisc->base + MX7D_USBNC_USB_CTRL2); /* PHY tuning for signal quality */ reg = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG1); - if (data->emp_curr_control && data->emp_curr_control <= + if (data->emp_curr_control >= 0 && + data->emp_curr_control <= (TXPREEMPAMPTUNE0_MASK >> TXPREEMPAMPTUNE0_BIT)) { reg &= ~TXPREEMPAMPTUNE0_MASK; reg |= (data->emp_curr_control << TXPREEMPAMPTUNE0_BIT); } - if (data->dc_vol_level_adjust && data->dc_vol_level_adjust <= + if (data->dc_vol_level_adjust >= 0 && + data->dc_vol_level_adjust <= (TXVREFTUNE0_MASK >> TXVREFTUNE0_BIT)) { reg &= ~TXVREFTUNE0_MASK; reg |= (data->dc_vol_level_adjust << TXVREFTUNE0_BIT); } + if (data->rise_fall_time_adjust >= 0 && + data->rise_fall_time_adjust <= + (TXRISETUNE0_MASK >> TXRISETUNE0_BIT)) { + reg &= ~TXRISETUNE0_MASK; + reg |= (data->rise_fall_time_adjust << TXRISETUNE0_BIT); + } + writel(reg, usbmisc->base + MX7D_USB_OTG_PHY_CFG1); } diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 11da5fb284d0..9b34199474c4 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -28,6 +28,7 @@ #include <linux/serial.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> +#include <linux/tty_ldisc.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/uaccess.h> @@ -324,8 +325,17 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf) if (difference & USB_CDC_SERIAL_STATE_DSR) acm->iocount.dsr++; - if (difference & USB_CDC_SERIAL_STATE_DCD) + if (difference & USB_CDC_SERIAL_STATE_DCD) { + if (acm->port.tty) { + struct tty_ldisc *ld = tty_ldisc_ref(acm->port.tty); + if (ld) { + if (ld->ops->dcd_change) + ld->ops->dcd_change(acm->port.tty, newctrl & USB_CDC_SERIAL_STATE_DCD); + tty_ldisc_deref(ld); + } + } acm->iocount.dcd++; + } if (newctrl & USB_CDC_SERIAL_STATE_BREAK) { acm->iocount.brk++; tty_insert_flip_char(&acm->port, 0, TTY_BREAK); diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c index c9bdeb4ddcb5..b84efae26e15 100644 --- a/drivers/usb/common/common.c +++ b/drivers/usb/common/common.c @@ -11,6 +11,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/platform_device.h> #include <linux/usb/ch9.h> #include <linux/usb/of.h> #include <linux/usb/otg.h> diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c index 766005d20bae..501e8bc9738e 100644 --- a/drivers/usb/common/usb-conn-gpio.c +++ b/drivers/usb/common/usb-conn-gpio.c @@ -42,6 +42,7 @@ struct usb_conn_info { struct power_supply_desc desc; struct power_supply *charger; + bool initial_detection; }; /* @@ -86,11 +87,13 @@ static void usb_conn_detect_cable(struct work_struct *work) dev_dbg(info->dev, "role %s -> %s, gpios: id %d, vbus %d\n", usb_role_string(info->last_role), usb_role_string(role), id, vbus); - if (info->last_role == role) { + if (!info->initial_detection && info->last_role == role) { dev_warn(info->dev, "repeated role: %s\n", usb_role_string(role)); return; } + info->initial_detection = false; + if (info->last_role == USB_ROLE_HOST && info->vbus) regulator_disable(info->vbus); @@ -258,6 +261,7 @@ static int usb_conn_probe(struct platform_device *pdev) device_set_wakeup_capable(&pdev->dev, true); /* Perform initial detection */ + info->initial_detection = true; usb_conn_queue_dwork(info, 0); return 0; diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 725b8dbcfe5f..b19e38d5fd10 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -1051,9 +1051,6 @@ int usb_get_bos_descriptor(struct usb_device *dev) } switch (cap_type) { - case USB_CAP_TYPE_WIRELESS_USB: - /* Wireless USB cap descriptor is handled by wusb */ - break; case USB_CAP_TYPE_EXT: dev->bos->ext_cap = (struct usb_ext_cap_descriptor *)buffer; diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c index 2c14a9636056..a247da73f34d 100644 --- a/drivers/usb/core/devices.c +++ b/drivers/usb/core/devices.c @@ -424,7 +424,6 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes, case USB_SPEED_UNKNOWN: /* usb 1.1 root hub code */ case USB_SPEED_FULL: speed = "12"; break; - case USB_SPEED_WIRELESS: /* Wireless has no real fixed speed */ case USB_SPEED_HIGH: speed = "480"; break; case USB_SPEED_SUPER: diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c index c4ed3310e069..a88ced93b5e7 100644 --- a/drivers/usb/core/file.c +++ b/drivers/usb/core/file.c @@ -29,7 +29,6 @@ #define MAX_USB_MINORS 256 static const struct file_operations *usb_minors[MAX_USB_MINORS]; static DECLARE_RWSEM(minor_rwsem); -static DEFINE_MUTEX(init_usb_class_mutex); static int usb_open(struct inode *inode, struct file *file) { @@ -57,11 +56,6 @@ static const struct file_operations usb_fops = { .llseek = noop_llseek, }; -static struct usb_class { - struct kref kref; - struct class *class; -} *usb_class; - static char *usb_devnode(const struct device *dev, umode_t *mode) { struct usb_class_driver *drv; @@ -72,50 +66,10 @@ static char *usb_devnode(const struct device *dev, umode_t *mode) return drv->devnode(dev, mode); } -static int init_usb_class(void) -{ - int result = 0; - - if (usb_class != NULL) { - kref_get(&usb_class->kref); - goto exit; - } - - usb_class = kmalloc(sizeof(*usb_class), GFP_KERNEL); - if (!usb_class) { - result = -ENOMEM; - goto exit; - } - - kref_init(&usb_class->kref); - usb_class->class = class_create("usbmisc"); - if (IS_ERR(usb_class->class)) { - result = PTR_ERR(usb_class->class); - printk(KERN_ERR "class_create failed for usb devices\n"); - kfree(usb_class); - usb_class = NULL; - goto exit; - } - usb_class->class->devnode = usb_devnode; - -exit: - return result; -} - -static void release_usb_class(struct kref *kref) -{ - /* Ok, we cheat as we know we only have one usb_class */ - class_destroy(usb_class->class); - kfree(usb_class); - usb_class = NULL; -} - -static void destroy_usb_class(void) -{ - mutex_lock(&init_usb_class_mutex); - kref_put(&usb_class->kref, release_usb_class); - mutex_unlock(&init_usb_class_mutex); -} +const struct class usbmisc_class = { + .name = "usbmisc", + .devnode = usb_devnode, +}; int usb_major_init(void) { @@ -156,7 +110,7 @@ void usb_major_cleanup(void) int usb_register_dev(struct usb_interface *intf, struct usb_class_driver *class_driver) { - int retval; + int retval = 0; int minor_base = class_driver->minor_base; int minor; char name[20]; @@ -175,13 +129,6 @@ int usb_register_dev(struct usb_interface *intf, if (intf->minor >= 0) return -EADDRINUSE; - mutex_lock(&init_usb_class_mutex); - retval = init_usb_class(); - mutex_unlock(&init_usb_class_mutex); - - if (retval) - return retval; - dev_dbg(&intf->dev, "looking for a minor, starting at %d\n", minor_base); down_write(&minor_rwsem); @@ -200,7 +147,7 @@ int usb_register_dev(struct usb_interface *intf, /* create a usb class device for this usb interface */ snprintf(name, sizeof(name), class_driver->name, minor - minor_base); - intf->usb_dev = device_create(usb_class->class, &intf->dev, + intf->usb_dev = device_create(&usbmisc_class, &intf->dev, MKDEV(USB_MAJOR, minor), class_driver, "%s", kbasename(name)); if (IS_ERR(intf->usb_dev)) { @@ -234,7 +181,7 @@ void usb_deregister_dev(struct usb_interface *intf, return; dev_dbg(&intf->dev, "removing %d minor\n", intf->minor); - device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor)); + device_destroy(&usbmisc_class, MKDEV(USB_MAJOR, intf->minor)); down_write(&minor_rwsem); usb_minors[intf->minor] = NULL; @@ -242,6 +189,5 @@ void usb_deregister_dev(struct usb_interface *intf, intf->usb_dev = NULL; intf->minor = -1; - destroy_usb_class(); } EXPORT_SYMBOL_GPL(usb_deregister_dev); diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 8300baedafd2..12b6dfeaf658 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -156,27 +156,6 @@ static const u8 usb3_rh_dev_descriptor[18] = { 0x01 /* __u8 bNumConfigurations; */ }; -/* usb 2.5 (wireless USB 1.0) root hub device descriptor */ -static const u8 usb25_rh_dev_descriptor[18] = { - 0x12, /* __u8 bLength; */ - USB_DT_DEVICE, /* __u8 bDescriptorType; Device */ - 0x50, 0x02, /* __le16 bcdUSB; v2.5 */ - - 0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */ - 0x00, /* __u8 bDeviceSubClass; */ - 0x00, /* __u8 bDeviceProtocol; [ usb 2.0 no TT ] */ - 0xFF, /* __u8 bMaxPacketSize0; always 0xFF (WUSB Spec 7.4.1). */ - - 0x6b, 0x1d, /* __le16 idVendor; Linux Foundation 0x1d6b */ - 0x02, 0x00, /* __le16 idProduct; device 0x0002 */ - KERNEL_VER, KERNEL_REL, /* __le16 bcdDevice */ - - 0x03, /* __u8 iManufacturer; */ - 0x02, /* __u8 iProduct; */ - 0x01, /* __u8 iSerialNumber; */ - 0x01 /* __u8 bNumConfigurations; */ -}; - /* usb 2.0 root hub device descriptor */ static const u8 usb2_rh_dev_descriptor[18] = { 0x12, /* __u8 bLength; */ @@ -368,7 +347,7 @@ static const u8 ss_rh_config_descriptor[] = { }; /* authorized_default behaviour: - * -1 is authorized for all devices except wireless (old behaviour) + * -1 is authorized for all devices (leftover from wireless USB) * 0 is unauthorized for all devices * 1 is authorized for all devices * 2 is authorized for internal devices @@ -383,7 +362,7 @@ module_param(authorized_default, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(authorized_default, "Default USB device authorization: 0 is not authorized, 1 is " "authorized, 2 is authorized for internal devices, -1 is " - "authorized except for wireless USB (default, old behaviour)"); + "authorized (default, same as 1)"); /*-------------------------------------------------------------------------*/ /** @@ -578,9 +557,6 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb) case HCD_USB3: bufp = usb3_rh_dev_descriptor; break; - case HCD_USB25: - bufp = usb25_rh_dev_descriptor; - break; case HCD_USB2: bufp = usb2_rh_dev_descriptor; break; @@ -602,7 +578,6 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb) bufp = ss_rh_config_descriptor; len = sizeof ss_rh_config_descriptor; break; - case HCD_USB25: case HCD_USB2: bufp = hs_rh_config_descriptor; len = sizeof hs_rh_config_descriptor; @@ -983,6 +958,7 @@ static int register_root_hub(struct usb_hcd *hcd) { struct device *parent_dev = hcd->self.controller; struct usb_device *usb_dev = hcd->self.root_hub; + struct usb_device_descriptor *descr; const int devnum = 1; int retval; @@ -994,13 +970,16 @@ static int register_root_hub(struct usb_hcd *hcd) mutex_lock(&usb_bus_idr_lock); usb_dev->ep0.desc.wMaxPacketSize = cpu_to_le16(64); - retval = usb_get_device_descriptor(usb_dev, USB_DT_DEVICE_SIZE); - if (retval != sizeof usb_dev->descriptor) { + descr = usb_get_device_descriptor(usb_dev); + if (IS_ERR(descr)) { + retval = PTR_ERR(descr); mutex_unlock(&usb_bus_idr_lock); dev_dbg (parent_dev, "can't read %s device descriptor %d\n", dev_name(&usb_dev->dev), retval); - return (retval < 0) ? retval : -EMSGSIZE; + return retval; } + usb_dev->descriptor = *descr; + kfree(descr); if (le16_to_cpu(usb_dev->descriptor.bcdUSB) >= 0x0201) { retval = usb_get_bos_descriptor(usb_dev); @@ -2844,18 +2823,14 @@ int usb_add_hcd(struct usb_hcd *hcd, hcd->dev_policy = USB_DEVICE_AUTHORIZE_NONE; break; - case USB_AUTHORIZE_ALL: - hcd->dev_policy = USB_DEVICE_AUTHORIZE_ALL; - break; - case USB_AUTHORIZE_INTERNAL: hcd->dev_policy = USB_DEVICE_AUTHORIZE_INTERNAL; break; + case USB_AUTHORIZE_ALL: case USB_AUTHORIZE_WIRED: default: - hcd->dev_policy = hcd->wireless ? - USB_DEVICE_AUTHORIZE_NONE : USB_DEVICE_AUTHORIZE_ALL; + hcd->dev_policy = USB_DEVICE_AUTHORIZE_ALL; break; } @@ -2899,9 +2874,6 @@ int usb_add_hcd(struct usb_hcd *hcd, case HCD_USB2: rhdev->speed = USB_SPEED_HIGH; break; - case HCD_USB25: - rhdev->speed = USB_SPEED_WIRELESS; - break; case HCD_USB3: rhdev->speed = USB_SPEED_SUPER; break; diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index a739403a9e45..3c54b218301c 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -614,6 +614,29 @@ static int hub_ext_port_status(struct usb_hub *hub, int port1, int type, ret = 0; } mutex_unlock(&hub->status_mutex); + + /* + * There is no need to lock status_mutex here, because status_mutex + * protects hub->status, and the phy driver only checks the port + * status without changing the status. + */ + if (!ret) { + struct usb_device *hdev = hub->hdev; + + /* + * Only roothub will be notified of port state changes, + * since the USB PHY only cares about changes at the next + * level. + */ + if (is_root_hub(hdev)) { + struct usb_hcd *hcd = bus_to_hcd(hdev->bus); + + if (hcd->usb_phy) + usb_phy_notify_port_status(hcd->usb_phy, + port1 - 1, *status, *change); + } + } + return ret; } @@ -2117,22 +2140,6 @@ EXPORT_SYMBOL_GPL(usb_set_device_state); * USB-3.0 buses the address is assigned by the controller hardware * and it usually is not the same as the device number. * - * WUSB devices are simple: they have no hubs behind, so the mapping - * device <-> virtual port number becomes 1:1. Why? to simplify the - * life of the device connection logic in - * drivers/usb/wusbcore/devconnect.c. When we do the initial secret - * handshake we need to assign a temporary address in the unauthorized - * space. For simplicity we use the first virtual port number found to - * be free [drivers/usb/wusbcore/devconnect.c:wusbhc_devconnect_ack()] - * and that becomes it's address [X < 128] or its unauthorized address - * [X | 0x80]. - * - * We add 1 as an offset to the one-based USB-stack port number - * (zero-based wusb virtual port index) for two reasons: (a) dev addr - * 0 is reserved by USB for default address; (b) Linux's USB stack - * uses always #1 for the root hub of the controller. So USB stack's - * port #1, which is wusb virtual-port #0 has address #2. - * * Devices connected under xHCI are not as simple. The host controller * supports virtualization, so the hardware assigns device addresses and * the HCD must setup data structures before issuing a set address @@ -2145,19 +2152,13 @@ static void choose_devnum(struct usb_device *udev) /* be safe when more hub events are proceed in parallel */ mutex_lock(&bus->devnum_next_mutex); - if (udev->wusb) { - devnum = udev->portnum + 1; - BUG_ON(test_bit(devnum, bus->devmap.devicemap)); - } else { - /* Try to allocate the next devnum beginning at - * bus->devnum_next. */ - devnum = find_next_zero_bit(bus->devmap.devicemap, 128, - bus->devnum_next); - if (devnum >= 128) - devnum = find_next_zero_bit(bus->devmap.devicemap, - 128, 1); - bus->devnum_next = (devnum >= 127 ? 1 : devnum + 1); - } + + /* Try to allocate the next devnum beginning at bus->devnum_next. */ + devnum = find_next_zero_bit(bus->devmap.devicemap, 128, + bus->devnum_next); + if (devnum >= 128) + devnum = find_next_zero_bit(bus->devmap.devicemap, 128, 1); + bus->devnum_next = (devnum >= 127 ? 1 : devnum + 1); if (devnum < 128) { set_bit(devnum, bus->devmap.devicemap); udev->devnum = devnum; @@ -2175,9 +2176,7 @@ static void release_devnum(struct usb_device *udev) static void update_devnum(struct usb_device *udev, int devnum) { - /* The address for a WUSB device is managed by wusbcore. */ - if (!udev->wusb) - udev->devnum = devnum; + udev->devnum = devnum; if (!udev->devaddr) udev->devaddr = (u8)devnum; } @@ -2670,15 +2669,6 @@ int usb_authorize_device(struct usb_device *usb_dev) goto error_autoresume; } - if (usb_dev->wusb) { - result = usb_get_device_descriptor(usb_dev, sizeof(usb_dev->descriptor)); - if (result < 0) { - dev_err(&usb_dev->dev, "can't re-read device descriptor for " - "authorization: %d\n", result); - goto error_device_descriptor; - } - } - usb_dev->authorized = 1; /* Choose and set the configuration. This registers the interfaces * with the driver core and lets interface drivers bind to them. @@ -2695,7 +2685,6 @@ int usb_authorize_device(struct usb_device *usb_dev) } dev_info(&usb_dev->dev, "authorized to connect\n"); -error_device_descriptor: usb_autosuspend_device(usb_dev); error_autoresume: out_authorized: @@ -2778,17 +2767,6 @@ out: return USB_SSP_GEN_UNKNOWN; } -/* Returns 1 if @hub is a WUSB root hub, 0 otherwise */ -static unsigned hub_is_wusb(struct usb_hub *hub) -{ - struct usb_hcd *hcd; - if (hub->hdev->parent != NULL) /* not a root hub? */ - return 0; - hcd = bus_to_hcd(hub->hdev->bus); - return hcd->wireless; -} - - #ifdef CONFIG_USB_FEW_INIT_RETRIES #define PORT_RESET_TRIES 2 #define SET_ADDRESS_TRIES 1 @@ -2941,9 +2919,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1, udev->tx_lanes = 1; udev->ssp_rate = USB_SSP_GEN_UNKNOWN; } - if (hub_is_wusb(hub)) - udev->speed = USB_SPEED_WIRELESS; - else if (udev->ssp_rate != USB_SSP_GEN_UNKNOWN) + if (udev->ssp_rate != USB_SSP_GEN_UNKNOWN) udev->speed = USB_SPEED_SUPER_PLUS; else if (hub_is_superspeed(hub->hdev)) udev->speed = USB_SPEED_SUPER; @@ -4718,6 +4694,67 @@ static int hub_enable_device(struct usb_device *udev) return hcd->driver->enable_device(hcd, udev); } +/* + * Get the bMaxPacketSize0 value during initialization by reading the + * device's device descriptor. Since we don't already know this value, + * the transfer is unsafe and it ignores I/O errors, only testing for + * reasonable received values. + * + * For "old scheme" initialization, size will be 8 so we read just the + * start of the device descriptor, which should work okay regardless of + * the actual bMaxPacketSize0 value. For "new scheme" initialization, + * size will be 64 (and buf will point to a sufficiently large buffer), + * which might not be kosher according to the USB spec but it's what + * Windows does and what many devices expect. + * + * Returns: bMaxPacketSize0 or a negative error code. + */ +static int get_bMaxPacketSize0(struct usb_device *udev, + struct usb_device_descriptor *buf, int size, bool first_time) +{ + int i, rc; + + /* + * Retry on all errors; some devices are flakey. + * 255 is for WUSB devices, we actually need to use + * 512 (WUSB1.0[4.8.1]). + */ + for (i = 0; i < GET_MAXPACKET0_TRIES; ++i) { + /* Start with invalid values in case the transfer fails */ + buf->bDescriptorType = buf->bMaxPacketSize0 = 0; + rc = usb_control_msg(udev, usb_rcvaddr0pipe(), + USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, + USB_DT_DEVICE << 8, 0, + buf, size, + initial_descriptor_timeout); + switch (buf->bMaxPacketSize0) { + case 8: case 16: case 32: case 64: case 9: + if (buf->bDescriptorType == USB_DT_DEVICE) { + rc = buf->bMaxPacketSize0; + break; + } + fallthrough; + default: + if (rc >= 0) + rc = -EPROTO; + break; + } + + /* + * Some devices time out if they are powered on + * when already connected. They need a second + * reset, so return early. But only on the first + * attempt, lest we get into a time-out/reset loop. + */ + if (rc > 0 || (rc == -ETIMEDOUT && first_time && + udev->speed > USB_SPEED_FULL)) + break; + } + return rc; +} + +#define GET_DESCRIPTOR_BUFSIZE 64 + /* Reset device, (re)assign address, get device descriptor. * Device connection must be stable, no more debouncing needed. * Returns device in USB_STATE_ADDRESS, except on error. @@ -4727,10 +4764,17 @@ static int hub_enable_device(struct usb_device *udev) * the port lock. For a newly detected device that is not accessible * through any global pointers, it's not necessary to lock the device, * but it is still necessary to lock the port. + * + * For a newly detected device, @dev_descr must be NULL. The device + * descriptor retrieved from the device will then be stored in + * @udev->descriptor. For an already existing device, @dev_descr + * must be non-NULL. The device descriptor will be stored there, + * not in @udev->descriptor, because descriptors for registered + * devices are meant to be immutable. */ static int hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, - int retry_counter) + int retry_counter, struct usb_device_descriptor *dev_descr) { struct usb_device *hdev = hub->hdev; struct usb_hcd *hcd = bus_to_hcd(hdev->bus); @@ -4742,6 +4786,13 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, int devnum = udev->devnum; const char *driver_name; bool do_new_scheme; + const bool initial = !dev_descr; + int maxp0; + struct usb_device_descriptor *buf, *descr; + + buf = kmalloc(GET_DESCRIPTOR_BUFSIZE, GFP_NOIO); + if (!buf) + return -ENOMEM; /* root hub ports have a slightly longer reset period * (from USB 2.0 spec, section 7.1.7.5) @@ -4774,38 +4825,34 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, } oldspeed = udev->speed; - /* USB 2.0 section 5.5.3 talks about ep0 maxpacket ... - * it's fixed size except for full speed devices. - * For Wireless USB devices, ep0 max packet is always 512 (tho - * reported as 0xff in the device descriptor). WUSB1.0[4.8.1]. - */ - switch (udev->speed) { - case USB_SPEED_SUPER_PLUS: - case USB_SPEED_SUPER: - case USB_SPEED_WIRELESS: /* fixed at 512 */ - udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512); - break; - case USB_SPEED_HIGH: /* fixed at 64 */ - udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64); - break; - case USB_SPEED_FULL: /* 8, 16, 32, or 64 */ - /* to determine the ep0 maxpacket size, try to read - * the device descriptor to get bMaxPacketSize0 and - * then correct our initial guess. + if (initial) { + /* USB 2.0 section 5.5.3 talks about ep0 maxpacket ... + * it's fixed size except for full speed devices. */ - udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64); - break; - case USB_SPEED_LOW: /* fixed at 8 */ - udev->ep0.desc.wMaxPacketSize = cpu_to_le16(8); - break; - default: - goto fail; + switch (udev->speed) { + case USB_SPEED_SUPER_PLUS: + case USB_SPEED_SUPER: + udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512); + break; + case USB_SPEED_HIGH: /* fixed at 64 */ + udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64); + break; + case USB_SPEED_FULL: /* 8, 16, 32, or 64 */ + /* to determine the ep0 maxpacket size, try to read + * the device descriptor to get bMaxPacketSize0 and + * then correct our initial guess. + */ + udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64); + break; + case USB_SPEED_LOW: /* fixed at 8 */ + udev->ep0.desc.wMaxPacketSize = cpu_to_le16(8); + break; + default: + goto fail; + } } - if (udev->speed == USB_SPEED_WIRELESS) - speed = "variable speed Wireless"; - else - speed = usb_speed_string(udev->speed); + speed = usb_speed_string(udev->speed); /* * The controller driver may be NULL if the controller device @@ -4822,22 +4869,24 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, if (udev->speed < USB_SPEED_SUPER) dev_info(&udev->dev, "%s %s USB device number %d using %s\n", - (udev->config) ? "reset" : "new", speed, + (initial ? "new" : "reset"), speed, devnum, driver_name); - /* Set up TT records, if needed */ - if (hdev->tt) { - udev->tt = hdev->tt; - udev->ttport = hdev->ttport; - } else if (udev->speed != USB_SPEED_HIGH - && hdev->speed == USB_SPEED_HIGH) { - if (!hub->tt.hub) { - dev_err(&udev->dev, "parent hub has no TT\n"); - retval = -EINVAL; - goto fail; + if (initial) { + /* Set up TT records, if needed */ + if (hdev->tt) { + udev->tt = hdev->tt; + udev->ttport = hdev->ttport; + } else if (udev->speed != USB_SPEED_HIGH + && hdev->speed == USB_SPEED_HIGH) { + if (!hub->tt.hub) { + dev_err(&udev->dev, "parent hub has no TT\n"); + retval = -EINVAL; + goto fail; + } + udev->tt = &hub->tt; + udev->ttport = port1; } - udev->tt = &hub->tt; - udev->ttport = port1; } /* Why interleave GET_DESCRIPTOR and SET_ADDRESS this way? @@ -4861,9 +4910,6 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, } if (do_new_scheme) { - struct usb_device_descriptor *buf; - int r = 0; - retval = hub_enable_device(udev); if (retval < 0) { dev_err(&udev->dev, @@ -4872,52 +4918,14 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, goto fail; } -#define GET_DESCRIPTOR_BUFSIZE 64 - buf = kmalloc(GET_DESCRIPTOR_BUFSIZE, GFP_NOIO); - if (!buf) { - retval = -ENOMEM; - continue; - } - - /* Retry on all errors; some devices are flakey. - * 255 is for WUSB devices, we actually need to use - * 512 (WUSB1.0[4.8.1]). - */ - for (operations = 0; operations < GET_MAXPACKET0_TRIES; - ++operations) { - buf->bMaxPacketSize0 = 0; - r = usb_control_msg(udev, usb_rcvaddr0pipe(), - USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, - USB_DT_DEVICE << 8, 0, - buf, GET_DESCRIPTOR_BUFSIZE, - initial_descriptor_timeout); - switch (buf->bMaxPacketSize0) { - case 8: case 16: case 32: case 64: case 255: - if (buf->bDescriptorType == - USB_DT_DEVICE) { - r = 0; - break; - } - fallthrough; - default: - if (r == 0) - r = -EPROTO; - break; - } - /* - * Some devices time out if they are powered on - * when already connected. They need a second - * reset. But only on the first attempt, - * lest we get into a time out/reset loop - */ - if (r == 0 || (r == -ETIMEDOUT && - retries == 0 && - udev->speed > USB_SPEED_FULL)) - break; + maxp0 = get_bMaxPacketSize0(udev, buf, + GET_DESCRIPTOR_BUFSIZE, retries == 0); + if (maxp0 > 0 && !initial && + maxp0 != udev->descriptor.bMaxPacketSize0) { + dev_err(&udev->dev, "device reset changed ep0 maxpacket size!\n"); + retval = -ENODEV; + goto fail; } - udev->descriptor.bMaxPacketSize0 = - buf->bMaxPacketSize0; - kfree(buf); retval = hub_port_reset(hub, port1, udev, delay, false); if (retval < 0) /* error or disconnect */ @@ -4928,71 +4936,68 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, retval = -ENODEV; goto fail; } - if (r) { - if (r != -ENODEV) + if (maxp0 < 0) { + if (maxp0 != -ENODEV) dev_err(&udev->dev, "device descriptor read/64, error %d\n", - r); - retval = -EMSGSIZE; + maxp0); + retval = maxp0; continue; } -#undef GET_DESCRIPTOR_BUFSIZE + } + + for (operations = 0; operations < SET_ADDRESS_TRIES; ++operations) { + retval = hub_set_address(udev, devnum); + if (retval >= 0) + break; + msleep(200); + } + if (retval < 0) { + if (retval != -ENODEV) + dev_err(&udev->dev, "device not accepting address %d, error %d\n", + devnum, retval); + goto fail; + } + if (udev->speed >= USB_SPEED_SUPER) { + devnum = udev->devnum; + dev_info(&udev->dev, + "%s SuperSpeed%s%s USB device number %d using %s\n", + (udev->config) ? "reset" : "new", + (udev->speed == USB_SPEED_SUPER_PLUS) ? + " Plus" : "", + (udev->ssp_rate == USB_SSP_GEN_2x2) ? + " Gen 2x2" : + (udev->ssp_rate == USB_SSP_GEN_2x1) ? + " Gen 2x1" : + (udev->ssp_rate == USB_SSP_GEN_1x2) ? + " Gen 1x2" : "", + devnum, driver_name); } /* - * If device is WUSB, we already assigned an - * unauthorized address in the Connect Ack sequence; - * authorization will assign the final address. + * cope with hardware quirkiness: + * - let SET_ADDRESS settle, some device hardware wants it + * - read ep0 maxpacket even for high and low speed, */ - if (udev->wusb == 0) { - for (operations = 0; operations < SET_ADDRESS_TRIES; ++operations) { - retval = hub_set_address(udev, devnum); - if (retval >= 0) - break; - msleep(200); - } - if (retval < 0) { - if (retval != -ENODEV) - dev_err(&udev->dev, "device not accepting address %d, error %d\n", - devnum, retval); - goto fail; - } - if (udev->speed >= USB_SPEED_SUPER) { - devnum = udev->devnum; - dev_info(&udev->dev, - "%s SuperSpeed%s%s USB device number %d using %s\n", - (udev->config) ? "reset" : "new", - (udev->speed == USB_SPEED_SUPER_PLUS) ? - " Plus" : "", - (udev->ssp_rate == USB_SSP_GEN_2x2) ? - " Gen 2x2" : - (udev->ssp_rate == USB_SSP_GEN_2x1) ? - " Gen 2x1" : - (udev->ssp_rate == USB_SSP_GEN_1x2) ? - " Gen 1x2" : "", - devnum, driver_name); - } + msleep(10); - /* cope with hardware quirkiness: - * - let SET_ADDRESS settle, some device hardware wants it - * - read ep0 maxpacket even for high and low speed, - */ - msleep(10); - if (do_new_scheme) - break; - } + if (do_new_scheme) + break; - retval = usb_get_device_descriptor(udev, 8); - if (retval < 8) { + maxp0 = get_bMaxPacketSize0(udev, buf, 8, retries == 0); + if (maxp0 < 0) { + retval = maxp0; if (retval != -ENODEV) dev_err(&udev->dev, "device descriptor read/8, error %d\n", retval); - if (retval >= 0) - retval = -EMSGSIZE; } else { u32 delay; - retval = 0; + if (!initial && maxp0 != udev->descriptor.bMaxPacketSize0) { + dev_err(&udev->dev, "device reset changed ep0 maxpacket size!\n"); + retval = -ENODEV; + goto fail; + } delay = udev->parent->hub_delay; udev->hub_delay = min_t(u32, delay, @@ -5011,54 +5016,67 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, goto fail; /* - * Some superspeed devices have finished the link training process - * and attached to a superspeed hub port, but the device descriptor - * got from those devices show they aren't superspeed devices. Warm - * reset the port attached by the devices can fix them. + * Check the ep0 maxpacket guess and correct it if necessary. + * maxp0 is the value stored in the device descriptor; + * i is the value it encodes (logarithmic for SuperSpeed or greater). */ - if ((udev->speed >= USB_SPEED_SUPER) && - (le16_to_cpu(udev->descriptor.bcdUSB) < 0x0300)) { - dev_err(&udev->dev, "got a wrong device descriptor, " - "warm reset device\n"); - hub_port_reset(hub, port1, udev, - HUB_BH_RESET_TIME, true); - retval = -EINVAL; - goto fail; - } - - if (udev->descriptor.bMaxPacketSize0 == 0xff || - udev->speed >= USB_SPEED_SUPER) - i = 512; - else - i = udev->descriptor.bMaxPacketSize0; - if (usb_endpoint_maxp(&udev->ep0.desc) != i) { - if (udev->speed == USB_SPEED_LOW || - !(i == 8 || i == 16 || i == 32 || i == 64)) { - dev_err(&udev->dev, "Invalid ep0 maxpacket: %d\n", i); - retval = -EMSGSIZE; - goto fail; - } + i = maxp0; + if (udev->speed >= USB_SPEED_SUPER) { + if (maxp0 <= 16) + i = 1 << maxp0; + else + i = 0; /* Invalid */ + } + if (usb_endpoint_maxp(&udev->ep0.desc) == i) { + ; /* Initial ep0 maxpacket guess is right */ + } else if ((udev->speed == USB_SPEED_FULL || + udev->speed == USB_SPEED_HIGH) && + (i == 8 || i == 16 || i == 32 || i == 64)) { + /* Initial guess is wrong; use the descriptor's value */ if (udev->speed == USB_SPEED_FULL) dev_dbg(&udev->dev, "ep0 maxpacket = %d\n", i); else dev_warn(&udev->dev, "Using ep0 maxpacket: %d\n", i); udev->ep0.desc.wMaxPacketSize = cpu_to_le16(i); usb_ep0_reinit(udev); + } else { + /* Initial guess is wrong and descriptor's value is invalid */ + dev_err(&udev->dev, "Invalid ep0 maxpacket: %d\n", maxp0); + retval = -EMSGSIZE; + goto fail; } - retval = usb_get_device_descriptor(udev, USB_DT_DEVICE_SIZE); - if (retval < (signed)sizeof(udev->descriptor)) { + descr = usb_get_device_descriptor(udev); + if (IS_ERR(descr)) { + retval = PTR_ERR(descr); if (retval != -ENODEV) dev_err(&udev->dev, "device descriptor read/all, error %d\n", retval); - if (retval >= 0) - retval = -ENOMSG; + goto fail; + } + if (initial) + udev->descriptor = *descr; + else + *dev_descr = *descr; + kfree(descr); + + /* + * Some superspeed devices have finished the link training process + * and attached to a superspeed hub port, but the device descriptor + * got from those devices show they aren't superspeed devices. Warm + * reset the port attached by the devices can fix them. + */ + if ((udev->speed >= USB_SPEED_SUPER) && + (le16_to_cpu(udev->descriptor.bcdUSB) < 0x0300)) { + dev_err(&udev->dev, "got a wrong device descriptor, warm reset device\n"); + hub_port_reset(hub, port1, udev, HUB_BH_RESET_TIME, true); + retval = -EINVAL; goto fail; } usb_detect_quirks(udev); - if (udev->wusb == 0 && le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0201) { + if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0201) { retval = usb_get_bos_descriptor(udev); if (!retval) { udev->lpm_capable = usb_device_supports_lpm(udev); @@ -5078,6 +5096,7 @@ fail: hub_port_disable(hub, port1, 0); update_devnum(udev, devnum); /* for disconnect processing */ } + kfree(buf); return retval; } @@ -5158,7 +5177,7 @@ hub_power_remaining(struct usb_hub *hub) static int descriptors_changed(struct usb_device *udev, - struct usb_device_descriptor *old_device_descriptor, + struct usb_device_descriptor *new_device_descriptor, struct usb_host_bos *old_bos) { int changed = 0; @@ -5169,8 +5188,8 @@ static int descriptors_changed(struct usb_device *udev, int length; char *buf; - if (memcmp(&udev->descriptor, old_device_descriptor, - sizeof(*old_device_descriptor)) != 0) + if (memcmp(&udev->descriptor, new_device_descriptor, + sizeof(*new_device_descriptor)) != 0) return 1; if ((old_bos && !udev->bos) || (!old_bos && udev->bos)) @@ -5333,7 +5352,6 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, usb_set_device_state(udev, USB_STATE_POWERED); udev->bus_mA = hub->mA_per_port; udev->level = hdev->level + 1; - udev->wusb = hub_is_wusb(hub); /* Devices connected to SuperSpeed hubs are USB 3.0 or later */ if (hub_is_superspeed(hub->hdev)) @@ -5348,7 +5366,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, } /* reset (non-USB 3.0 devices) and get descriptor */ - status = hub_port_init(hub, udev, port1, i); + status = hub_port_init(hub, udev, port1, i, NULL); if (status < 0) goto loop; @@ -5495,9 +5513,8 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1, { struct usb_port *port_dev = hub->ports[port1 - 1]; struct usb_device *udev = port_dev->child; - struct usb_device_descriptor descriptor; + struct usb_device_descriptor *descr; int status = -ENODEV; - int retval; dev_dbg(&port_dev->dev, "status %04x, change %04x, %s\n", portstatus, portchange, portspeed(hub, portstatus)); @@ -5524,23 +5541,20 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1, * changed device descriptors before resuscitating the * device. */ - descriptor = udev->descriptor; - retval = usb_get_device_descriptor(udev, - sizeof(udev->descriptor)); - if (retval < 0) { + descr = usb_get_device_descriptor(udev); + if (IS_ERR(descr)) { dev_dbg(&udev->dev, - "can't read device descriptor %d\n", - retval); + "can't read device descriptor %ld\n", + PTR_ERR(descr)); } else { - if (descriptors_changed(udev, &descriptor, + if (descriptors_changed(udev, descr, udev->bos)) { dev_dbg(&udev->dev, "device descriptor has changed\n"); - /* for disconnect() calls */ - udev->descriptor = descriptor; } else { status = 0; /* Nothing to do */ } + kfree(descr); } #ifdef CONFIG_PM } else if (udev->state == USB_STATE_SUSPENDED && @@ -5982,7 +5996,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev) struct usb_device *parent_hdev = udev->parent; struct usb_hub *parent_hub; struct usb_hcd *hcd = bus_to_hcd(udev->bus); - struct usb_device_descriptor descriptor = udev->descriptor; + struct usb_device_descriptor descriptor; struct usb_host_bos *bos; int i, j, ret = 0; int port1 = udev->portnum; @@ -6018,7 +6032,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev) /* ep0 maxpacket size may change; let the HCD know about it. * Other endpoints will be handled by re-enumeration. */ usb_ep0_reinit(udev); - ret = hub_port_init(parent_hub, udev, port1, i); + ret = hub_port_init(parent_hub, udev, port1, i, &descriptor); if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV) break; } @@ -6030,7 +6044,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev) /* Device might have changed firmware (DFU or similar) */ if (descriptors_changed(udev, &descriptor, bos)) { dev_info(&udev->dev, "device firmware changed\n"); - udev->descriptor = descriptor; /* for disconnect() calls */ goto re_enumerate; } diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index b5811620f1de..077dfe48d01c 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -9,6 +9,7 @@ #include <linux/pci.h> /* for scatterlist macros */ #include <linux/usb.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/timer.h> @@ -1040,40 +1041,35 @@ char *usb_cache_string(struct usb_device *udev, int index) EXPORT_SYMBOL_GPL(usb_cache_string); /* - * usb_get_device_descriptor - (re)reads the device descriptor (usbcore) - * @dev: the device whose device descriptor is being updated - * @size: how much of the descriptor to read + * usb_get_device_descriptor - read the device descriptor + * @udev: the device whose device descriptor should be read * * Context: task context, might sleep. * - * Updates the copy of the device descriptor stored in the device structure, - * which dedicates space for this purpose. - * * Not exported, only for use by the core. If drivers really want to read * the device descriptor directly, they can call usb_get_descriptor() with * type = USB_DT_DEVICE and index = 0. * - * This call is synchronous, and may not be used in an interrupt context. - * - * Return: The number of bytes received on success, or else the status code - * returned by the underlying usb_control_msg() call. + * Returns: a pointer to a dynamically allocated usb_device_descriptor + * structure (which the caller must deallocate), or an ERR_PTR value. */ -int usb_get_device_descriptor(struct usb_device *dev, unsigned int size) +struct usb_device_descriptor *usb_get_device_descriptor(struct usb_device *udev) { struct usb_device_descriptor *desc; int ret; - if (size > sizeof(*desc)) - return -EINVAL; desc = kmalloc(sizeof(*desc), GFP_NOIO); if (!desc) - return -ENOMEM; + return ERR_PTR(-ENOMEM); + + ret = usb_get_descriptor(udev, USB_DT_DEVICE, 0, desc, sizeof(*desc)); + if (ret == sizeof(*desc)) + return desc; - ret = usb_get_descriptor(dev, USB_DT_DEVICE, 0, desc, size); if (ret >= 0) - memcpy(&dev->descriptor, desc, size); + ret = -EMSGSIZE; kfree(desc); - return ret; + return ERR_PTR(ret); } /* diff --git a/drivers/usb/core/of.c b/drivers/usb/core/of.c index 617e92569b2c..db4ccf9ce3d9 100644 --- a/drivers/usb/core/of.c +++ b/drivers/usb/core/of.c @@ -8,7 +8,6 @@ */ #include <linux/of.h> -#include <linux/of_platform.h> #include <linux/usb/of.h> /** diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c index 323dc02becbe..5d21718afb05 100644 --- a/drivers/usb/core/sysfs.c +++ b/drivers/usb/core/sysfs.c @@ -161,9 +161,6 @@ static ssize_t speed_show(struct device *dev, struct device_attribute *attr, case USB_SPEED_HIGH: speed = "480"; break; - case USB_SPEED_WIRELESS: - speed = "480"; - break; case USB_SPEED_SUPER: speed = "5000"; break; diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index 9f3c54032556..7576920e2d5a 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c @@ -480,8 +480,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) urb->iso_frame_desc[n].status = -EXDEV; urb->iso_frame_desc[n].actual_length = 0; } - } else if (urb->num_sgs && !urb->dev->bus->no_sg_constraint && - dev->speed != USB_SPEED_WIRELESS) { + } else if (urb->num_sgs && !urb->dev->bus->no_sg_constraint) { struct scatterlist *sg; int i; @@ -540,17 +539,9 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) case USB_ENDPOINT_XFER_ISOC: case USB_ENDPOINT_XFER_INT: /* too small? */ - switch (dev->speed) { - case USB_SPEED_WIRELESS: - if ((urb->interval < 6) - && (xfertype == USB_ENDPOINT_XFER_INT)) - return -EINVAL; - fallthrough; - default: - if (urb->interval <= 0) - return -EINVAL; - break; - } + if (urb->interval <= 0) + return -EINVAL; + /* too big? */ switch (dev->speed) { case USB_SPEED_SUPER_PLUS: @@ -560,10 +551,6 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) return -EINVAL; max = 1 << 15; break; - case USB_SPEED_WIRELESS: - if (urb->interval > 16) - return -EINVAL; - break; case USB_SPEED_HIGH: /* units are microframes */ /* NOTE usb handles 2^15 */ if (urb->interval > (1024 * 8)) @@ -587,10 +574,8 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) default: return -EINVAL; } - if (dev->speed != USB_SPEED_WIRELESS) { - /* Round down to a power of 2, no more than max */ - urb->interval = min(max, 1 << ilog2(urb->interval)); - } + /* Round down to a power of 2, no more than max */ + urb->interval = min(max, 1 << ilog2(urb->interval)); } return usb_hcd_submit_urb(urb, mem_flags); diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 901ec732321c..2a938cf47ccd 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -25,6 +25,7 @@ #include <linux/module.h> #include <linux/moduleparam.h> +#include <linux/of.h> #include <linux/string.h> #include <linux/bitops.h> #include <linux/slab.h> @@ -601,14 +602,6 @@ struct device_type usb_device_type = { #endif }; - -/* Returns 1 if @usb_bus is WUSB, 0 otherwise */ -static unsigned usb_bus_is_wusb(struct usb_bus *bus) -{ - struct usb_hcd *hcd = bus_to_hcd(bus); - return hcd->wireless; -} - static bool usb_dev_authorized(struct usb_device *dev, struct usb_hcd *hcd) { struct usb_hub *hub; @@ -652,7 +645,6 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent, { struct usb_device *dev; struct usb_hcd *usb_hcd = bus_to_hcd(bus); - unsigned root_hub = 0; unsigned raw_port = port1; dev = kzalloc(sizeof(*dev), GFP_KERNEL); @@ -702,7 +694,6 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent, dev->dev.parent = bus->controller; device_set_of_node_from_dev(&dev->dev, bus->sysdev); dev_set_name(&dev->dev, "usb%d", bus->busnum); - root_hub = 1; } else { /* match any labeling on the hubs; it's one-based */ if (parent->devpath[0] == '0') { @@ -748,9 +739,6 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent, #endif dev->authorized = usb_dev_authorized(dev, usb_hcd); - if (!root_hub) - dev->wusb = usb_bus_is_wusb(bus) ? 1 : 0; - return dev; } EXPORT_SYMBOL_GPL(usb_alloc_dev); @@ -1101,6 +1089,9 @@ static int __init usb_init(void) retval = usb_major_init(); if (retval) goto major_init_failed; + retval = class_register(&usbmisc_class); + if (retval) + goto class_register_failed; retval = usb_register(&usbfs_driver); if (retval) goto driver_register_failed; @@ -1120,6 +1111,8 @@ hub_init_failed: usb_devio_init_failed: usb_deregister(&usbfs_driver); driver_register_failed: + class_unregister(&usbmisc_class); +class_register_failed: usb_major_cleanup(); major_init_failed: bus_unregister_notifier(&usb_bus_type, &usb_bus_nb); @@ -1147,6 +1140,7 @@ static void __exit usb_exit(void) usb_deregister(&usbfs_driver); usb_devio_cleanup(); usb_hub_cleanup(); + class_unregister(&usbmisc_class); bus_unregister_notifier(&usb_bus_type, &usb_bus_nb); bus_unregister(&usb_bus_type); usb_acpi_unregister(); diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h index ffe3f6818e9c..60363153fc3f 100644 --- a/drivers/usb/core/usb.h +++ b/drivers/usb/core/usb.h @@ -43,8 +43,8 @@ extern bool usb_endpoint_is_ignored(struct usb_device *udev, struct usb_endpoint_descriptor *epd); extern int usb_remove_device(struct usb_device *udev); -extern int usb_get_device_descriptor(struct usb_device *dev, - unsigned int size); +extern struct usb_device_descriptor *usb_get_device_descriptor( + struct usb_device *udev); extern int usb_set_isoch_delay(struct usb_device *dev); extern int usb_get_bos_descriptor(struct usb_device *dev); extern void usb_release_bos_descriptor(struct usb_device *dev); @@ -141,6 +141,7 @@ static inline int usb_disable_usb2_hardware_lpm(struct usb_device *udev) #endif +extern const struct class usbmisc_class; extern const struct bus_type usb_bus_type; extern struct mutex usb_port_peer_mutex; extern struct device_type usb_device_type; diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 8b15742d9e8a..b517a7216de2 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -22,7 +22,6 @@ #include <linux/delay.h> #include <linux/io.h> #include <linux/slab.h> -#include <linux/of_platform.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index 0a806f80217e..b1d48019e944 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c @@ -11,7 +11,7 @@ #include <linux/clk.h> #include <linux/device.h> #include <linux/dma-mapping.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/phy/phy.h> diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig index be954a9abbe0..98efcbb76c88 100644 --- a/drivers/usb/dwc3/Kconfig +++ b/drivers/usb/dwc3/Kconfig @@ -168,4 +168,14 @@ config USB_DWC3_AM62 The Designware Core USB3 IP is programmed to operate in in USB 2.0 mode only. Say 'Y' or 'M' here if you have one such device + +config USB_DWC3_OCTEON + tristate "Cavium Octeon Platforms" + depends on CAVIUM_OCTEON_SOC || COMPILE_TEST + default USB_DWC3 + help + Support Cavium Octeon platforms with DesignWare Core USB3 IP. + Only the host mode is currently supported. + Say 'Y' or 'M' here if you have one such device. + endif diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile index 9f66bd82b639..fe1493d4bbe5 100644 --- a/drivers/usb/dwc3/Makefile +++ b/drivers/usb/dwc3/Makefile @@ -54,3 +54,4 @@ obj-$(CONFIG_USB_DWC3_ST) += dwc3-st.o obj-$(CONFIG_USB_DWC3_QCOM) += dwc3-qcom.o obj-$(CONFIG_USB_DWC3_IMX8MP) += dwc3-imx8mp.o obj-$(CONFIG_USB_DWC3_XILINX) += dwc3-xilinx.o +obj-$(CONFIG_USB_DWC3_OCTEON) += dwc3-octeon.o diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c index 1755f2f848c5..94dcbc443cf2 100644 --- a/drivers/usb/dwc3/dwc3-am62.c +++ b/drivers/usb/dwc3/dwc3-am62.c @@ -102,7 +102,7 @@ #define DWC3_AM62_AUTOSUSPEND_DELAY 100 -struct dwc3_data { +struct dwc3_am62 { struct device *dev; void __iomem *usbss; struct clk *usb2_refclk; @@ -129,19 +129,19 @@ static const int dwc3_ti_rate_table[] = { /* in KHZ */ 52000, }; -static inline u32 dwc3_ti_readl(struct dwc3_data *data, u32 offset) +static inline u32 dwc3_ti_readl(struct dwc3_am62 *am62, u32 offset) { - return readl((data->usbss) + offset); + return readl((am62->usbss) + offset); } -static inline void dwc3_ti_writel(struct dwc3_data *data, u32 offset, u32 value) +static inline void dwc3_ti_writel(struct dwc3_am62 *am62, u32 offset, u32 value) { - writel(value, (data->usbss) + offset); + writel(value, (am62->usbss) + offset); } -static int phy_syscon_pll_refclk(struct dwc3_data *data) +static int phy_syscon_pll_refclk(struct dwc3_am62 *am62) { - struct device *dev = data->dev; + struct device *dev = am62->dev; struct device_node *node = dev->of_node; struct of_phandle_args args; struct regmap *syscon; @@ -153,16 +153,16 @@ static int phy_syscon_pll_refclk(struct dwc3_data *data) return PTR_ERR(syscon); } - data->syscon = syscon; + am62->syscon = syscon; ret = of_parse_phandle_with_fixed_args(node, "ti,syscon-phy-pll-refclk", 1, 0, &args); if (ret) return ret; - data->offset = args.args[0]; + am62->offset = args.args[0]; - ret = regmap_update_bits(data->syscon, data->offset, PHY_PLL_REFCLK_MASK, data->rate_code); + ret = regmap_update_bits(am62->syscon, am62->offset, PHY_PLL_REFCLK_MASK, am62->rate_code); if (ret) { dev_err(dev, "failed to set phy pll reference clock rate\n"); return ret; @@ -175,32 +175,32 @@ static int dwc3_ti_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *node = pdev->dev.of_node; - struct dwc3_data *data; + struct dwc3_am62 *am62; int i, ret; unsigned long rate; u32 reg; - data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); - if (!data) + am62 = devm_kzalloc(dev, sizeof(*am62), GFP_KERNEL); + if (!am62) return -ENOMEM; - data->dev = dev; - platform_set_drvdata(pdev, data); + am62->dev = dev; + platform_set_drvdata(pdev, am62); - data->usbss = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(data->usbss)) { + am62->usbss = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(am62->usbss)) { dev_err(dev, "can't map IOMEM resource\n"); - return PTR_ERR(data->usbss); + return PTR_ERR(am62->usbss); } - data->usb2_refclk = devm_clk_get(dev, "ref"); - if (IS_ERR(data->usb2_refclk)) { + am62->usb2_refclk = devm_clk_get(dev, "ref"); + if (IS_ERR(am62->usb2_refclk)) { dev_err(dev, "can't get usb2_refclk\n"); - return PTR_ERR(data->usb2_refclk); + return PTR_ERR(am62->usb2_refclk); } /* Calculate the rate code */ - rate = clk_get_rate(data->usb2_refclk); + rate = clk_get_rate(am62->usb2_refclk); rate /= 1000; // To KHz for (i = 0; i < ARRAY_SIZE(dwc3_ti_rate_table); i++) { if (dwc3_ti_rate_table[i] == rate) @@ -212,20 +212,20 @@ static int dwc3_ti_probe(struct platform_device *pdev) return -EINVAL; } - data->rate_code = i; + am62->rate_code = i; /* Read the syscon property and set the rate code */ - ret = phy_syscon_pll_refclk(data); + ret = phy_syscon_pll_refclk(am62); if (ret) return ret; /* VBUS divider select */ - data->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider"); - reg = dwc3_ti_readl(data, USBSS_PHY_CONFIG); - if (data->vbus_divider) + am62->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider"); + reg = dwc3_ti_readl(am62, USBSS_PHY_CONFIG); + if (am62->vbus_divider) reg |= 1 << USBSS_PHY_VBUS_SEL_SHIFT; - dwc3_ti_writel(data, USBSS_PHY_CONFIG, reg); + dwc3_ti_writel(am62, USBSS_PHY_CONFIG, reg); pm_runtime_set_active(dev); pm_runtime_enable(dev); @@ -233,7 +233,7 @@ static int dwc3_ti_probe(struct platform_device *pdev) * Don't ignore its dependencies with its children */ pm_suspend_ignore_children(dev, false); - clk_prepare_enable(data->usb2_refclk); + clk_prepare_enable(am62->usb2_refclk); pm_runtime_get_noresume(dev); ret = of_platform_populate(node, NULL, NULL, dev); @@ -243,9 +243,9 @@ static int dwc3_ti_probe(struct platform_device *pdev) } /* Set mode valid bit to indicate role is valid */ - reg = dwc3_ti_readl(data, USBSS_MODE_CONTROL); + reg = dwc3_ti_readl(am62, USBSS_MODE_CONTROL); reg |= USBSS_MODE_VALID; - dwc3_ti_writel(data, USBSS_MODE_CONTROL, reg); + dwc3_ti_writel(am62, USBSS_MODE_CONTROL, reg); /* Device has capability to wakeup system from sleep */ device_set_wakeup_capable(dev, true); @@ -261,7 +261,7 @@ static int dwc3_ti_probe(struct platform_device *pdev) return 0; err_pm_disable: - clk_disable_unprepare(data->usb2_refclk); + clk_disable_unprepare(am62->usb2_refclk); pm_runtime_disable(dev); pm_runtime_set_suspended(dev); return ret; @@ -278,18 +278,18 @@ static int dwc3_ti_remove_core(struct device *dev, void *c) static void dwc3_ti_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct dwc3_data *data = platform_get_drvdata(pdev); + struct dwc3_am62 *am62 = platform_get_drvdata(pdev); u32 reg; device_for_each_child(dev, NULL, dwc3_ti_remove_core); /* Clear mode valid bit */ - reg = dwc3_ti_readl(data, USBSS_MODE_CONTROL); + reg = dwc3_ti_readl(am62, USBSS_MODE_CONTROL); reg &= ~USBSS_MODE_VALID; - dwc3_ti_writel(data, USBSS_MODE_CONTROL, reg); + dwc3_ti_writel(am62, USBSS_MODE_CONTROL, reg); pm_runtime_put_sync(dev); - clk_disable_unprepare(data->usb2_refclk); + clk_disable_unprepare(am62->usb2_refclk); pm_runtime_disable(dev); pm_runtime_set_suspended(dev); @@ -299,15 +299,15 @@ static void dwc3_ti_remove(struct platform_device *pdev) #ifdef CONFIG_PM static int dwc3_ti_suspend_common(struct device *dev) { - struct dwc3_data *data = dev_get_drvdata(dev); + struct dwc3_am62 *am62 = dev_get_drvdata(dev); u32 reg, current_prtcap_dir; if (device_may_wakeup(dev)) { - reg = dwc3_ti_readl(data, USBSS_CORE_STAT); + reg = dwc3_ti_readl(am62, USBSS_CORE_STAT); current_prtcap_dir = (reg & USBSS_CORE_OPERATIONAL_MODE_MASK) >> USBSS_CORE_OPERATIONAL_MODE_SHIFT; /* Set wakeup config enable bits */ - reg = dwc3_ti_readl(data, USBSS_WAKEUP_CONFIG); + reg = dwc3_ti_readl(am62, USBSS_WAKEUP_CONFIG); if (current_prtcap_dir == DWC3_GCTL_PRTCAP_HOST) { reg = USBSS_WAKEUP_CFG_LINESTATE_EN | USBSS_WAKEUP_CFG_OVERCURRENT_EN; } else { @@ -317,30 +317,30 @@ static int dwc3_ti_suspend_common(struct device *dev) * and in U2/L3 state else it causes spurious wake-up. */ } - dwc3_ti_writel(data, USBSS_WAKEUP_CONFIG, reg); + dwc3_ti_writel(am62, USBSS_WAKEUP_CONFIG, reg); /* clear wakeup status so we know what caused the wake up */ - dwc3_ti_writel(data, USBSS_WAKEUP_STAT, USBSS_WAKEUP_STAT_CLR); + dwc3_ti_writel(am62, USBSS_WAKEUP_STAT, USBSS_WAKEUP_STAT_CLR); } - clk_disable_unprepare(data->usb2_refclk); + clk_disable_unprepare(am62->usb2_refclk); return 0; } static int dwc3_ti_resume_common(struct device *dev) { - struct dwc3_data *data = dev_get_drvdata(dev); + struct dwc3_am62 *am62 = dev_get_drvdata(dev); u32 reg; - clk_prepare_enable(data->usb2_refclk); + clk_prepare_enable(am62->usb2_refclk); if (device_may_wakeup(dev)) { /* Clear wakeup config enable bits */ - dwc3_ti_writel(data, USBSS_WAKEUP_CONFIG, USBSS_WAKEUP_CFG_NONE); + dwc3_ti_writel(am62, USBSS_WAKEUP_CONFIG, USBSS_WAKEUP_CFG_NONE); } - reg = dwc3_ti_readl(data, USBSS_WAKEUP_STAT); - data->wakeup_stat = reg; + reg = dwc3_ti_readl(am62, USBSS_WAKEUP_STAT); + am62->wakeup_stat = reg; return 0; } diff --git a/drivers/usb/dwc3/dwc3-imx8mp.c b/drivers/usb/dwc3/dwc3-imx8mp.c index 8b9a3bb587bf..4285bde58d2e 100644 --- a/drivers/usb/dwc3/dwc3-imx8mp.c +++ b/drivers/usb/dwc3/dwc3-imx8mp.c @@ -10,6 +10,7 @@ #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c index 0a09aedc2573..4155e8d5a559 100644 --- a/drivers/usb/dwc3/dwc3-keystone.c +++ b/drivers/usb/dwc3/dwc3-keystone.c @@ -13,6 +13,7 @@ #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/io.h> +#include <linux/of.h> #include <linux/of_platform.h> #include <linux/phy/phy.h> #include <linux/pm_runtime.h> diff --git a/arch/mips/cavium-octeon/octeon-usb.c b/drivers/usb/dwc3/dwc3-octeon.c index 2add435ad038..73bdcebf465c 100644 --- a/arch/mips/cavium-octeon/octeon-usb.c +++ b/drivers/usb/dwc3/dwc3-octeon.c @@ -1,11 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * XHCI HCD glue for Cavium Octeon III SOCs. + * DWC3 glue for Cavium Octeon III SOCs. * * Copyright (C) 2010-2017 Cavium Networks - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. + * Copyright (C) 2023 RACOM s.r.o. */ #include <linux/bitfield.h> @@ -24,9 +22,9 @@ /* BIST fast-clear mode select. A BIST run with this bit set * clears all entries in USBH RAMs to 0x0. */ -# define USBDRD_UCTL_CTL_CLEAR_BIST BIT(63) +# define USBDRD_UCTL_CTL_CLEAR_BIST BIT_ULL(63) /* 1 = Start BIST and cleared by hardware */ -# define USBDRD_UCTL_CTL_START_BIST BIT(62) +# define USBDRD_UCTL_CTL_START_BIST BIT_ULL(62) /* Reference clock select for SuperSpeed and HighSpeed PLLs: * 0x0 = Both PLLs use DLMC_REF_CLK0 for reference clock * 0x1 = Both PLLs use DLMC_REF_CLK1 for reference clock @@ -35,32 +33,32 @@ * 0x3 = SuperSpeed PLL uses DLMC_REF_CLK1 for reference clock & * HighSpeed PLL uses PLL_REF_CLK for reference clck */ -# define USBDRD_UCTL_CTL_REF_CLK_SEL GENMASK(61, 60) +# define USBDRD_UCTL_CTL_REF_CLK_SEL GENMASK_ULL(61, 60) /* 1 = Spread-spectrum clock enable, 0 = SS clock disable */ -# define USBDRD_UCTL_CTL_SSC_EN BIT(59) +# define USBDRD_UCTL_CTL_SSC_EN BIT_ULL(59) /* Spread-spectrum clock modulation range: * 0x0 = -4980 ppm downspread * 0x1 = -4492 ppm downspread * 0x2 = -4003 ppm downspread * 0x3 - 0x7 = Reserved */ -# define USBDRD_UCTL_CTL_SSC_RANGE GENMASK(58, 56) +# define USBDRD_UCTL_CTL_SSC_RANGE GENMASK_ULL(58, 56) /* Enable non-standard oscillator frequencies: * [55:53] = modules -1 * [52:47] = 2's complement push amount, 0 = Feature disabled */ -# define USBDRD_UCTL_CTL_SSC_REF_CLK_SEL GENMASK(55, 47) +# define USBDRD_UCTL_CTL_SSC_REF_CLK_SEL GENMASK_ULL(55, 47) /* Reference clock multiplier for non-standard frequencies: * 0x19 = 100MHz on DLMC_REF_CLK* if REF_CLK_SEL = 0x0 or 0x1 * 0x28 = 125MHz on DLMC_REF_CLK* if REF_CLK_SEL = 0x0 or 0x1 * 0x32 = 50MHz on DLMC_REF_CLK* if REF_CLK_SEL = 0x0 or 0x1 * Other Values = Reserved */ -# define USBDRD_UCTL_CTL_MPLL_MULTIPLIER GENMASK(46, 40) +# define USBDRD_UCTL_CTL_MPLL_MULTIPLIER GENMASK_ULL(46, 40) /* Enable reference clock to prescaler for SuperSpeed functionality. * Should always be set to "1" */ -# define USBDRD_UCTL_CTL_REF_SSP_EN BIT(39) +# define USBDRD_UCTL_CTL_REF_SSP_EN BIT_ULL(39) /* Divide the reference clock by 2 before entering the * REF_CLK_FSEL divider: * If REF_CLK_SEL = 0x0 or 0x1, then only 0x0 is legal @@ -68,21 +66,21 @@ * 0x1 = DLMC_REF_CLK* is 125MHz * 0x0 = DLMC_REF_CLK* is another supported frequency */ -# define USBDRD_UCTL_CTL_REF_CLK_DIV2 BIT(38) +# define USBDRD_UCTL_CTL_REF_CLK_DIV2 BIT_ULL(38) /* Select reference clock freqnuency for both PLL blocks: * 0x27 = REF_CLK_SEL is 0x0 or 0x1 * 0x07 = REF_CLK_SEL is 0x2 or 0x3 */ -# define USBDRD_UCTL_CTL_REF_CLK_FSEL GENMASK(37, 32) +# define USBDRD_UCTL_CTL_REF_CLK_FSEL GENMASK_ULL(37, 32) /* Controller clock enable. */ -# define USBDRD_UCTL_CTL_H_CLK_EN BIT(30) +# define USBDRD_UCTL_CTL_H_CLK_EN BIT_ULL(30) /* Select bypass input to controller clock divider: * 0x0 = Use divided coprocessor clock from H_CLKDIV * 0x1 = Use clock from GPIO pins */ -# define USBDRD_UCTL_CTL_H_CLK_BYP_SEL BIT(29) +# define USBDRD_UCTL_CTL_H_CLK_BYP_SEL BIT_ULL(29) /* Reset controller clock divider. */ -# define USBDRD_UCTL_CTL_H_CLKDIV_RST BIT(28) +# define USBDRD_UCTL_CTL_H_CLKDIV_RST BIT_ULL(28) /* Clock divider select: * 0x0 = divide by 1 * 0x1 = divide by 2 @@ -93,29 +91,29 @@ * 0x6 = divide by 24 * 0x7 = divide by 32 */ -# define USBDRD_UCTL_CTL_H_CLKDIV_SEL GENMASK(26, 24) +# define USBDRD_UCTL_CTL_H_CLKDIV_SEL GENMASK_ULL(26, 24) /* USB3 port permanently attached: 0x0 = No, 0x1 = Yes */ -# define USBDRD_UCTL_CTL_USB3_PORT_PERM_ATTACH BIT(21) +# define USBDRD_UCTL_CTL_USB3_PORT_PERM_ATTACH BIT_ULL(21) /* USB2 port permanently attached: 0x0 = No, 0x1 = Yes */ -# define USBDRD_UCTL_CTL_USB2_PORT_PERM_ATTACH BIT(20) +# define USBDRD_UCTL_CTL_USB2_PORT_PERM_ATTACH BIT_ULL(20) /* Disable SuperSpeed PHY: 0x0 = No, 0x1 = Yes */ -# define USBDRD_UCTL_CTL_USB3_PORT_DISABLE BIT(18) +# define USBDRD_UCTL_CTL_USB3_PORT_DISABLE BIT_ULL(18) /* Disable HighSpeed PHY: 0x0 = No, 0x1 = Yes */ -# define USBDRD_UCTL_CTL_USB2_PORT_DISABLE BIT(16) +# define USBDRD_UCTL_CTL_USB2_PORT_DISABLE BIT_ULL(16) /* Enable PHY SuperSpeed block power: 0x0 = No, 0x1 = Yes */ -# define USBDRD_UCTL_CTL_SS_POWER_EN BIT(14) +# define USBDRD_UCTL_CTL_SS_POWER_EN BIT_ULL(14) /* Enable PHY HighSpeed block power: 0x0 = No, 0x1 = Yes */ -# define USBDRD_UCTL_CTL_HS_POWER_EN BIT(12) +# define USBDRD_UCTL_CTL_HS_POWER_EN BIT_ULL(12) /* Enable USB UCTL interface clock: 0xx = No, 0x1 = Yes */ -# define USBDRD_UCTL_CTL_CSCLK_EN BIT(4) +# define USBDRD_UCTL_CTL_CSCLK_EN BIT_ULL(4) /* Controller mode: 0x0 = Host, 0x1 = Device */ -# define USBDRD_UCTL_CTL_DRD_MODE BIT(3) +# define USBDRD_UCTL_CTL_DRD_MODE BIT_ULL(3) /* PHY reset */ -# define USBDRD_UCTL_CTL_UPHY_RST BIT(2) +# define USBDRD_UCTL_CTL_UPHY_RST BIT_ULL(2) /* Software reset UAHC */ -# define USBDRD_UCTL_CTL_UAHC_RST BIT(1) +# define USBDRD_UCTL_CTL_UAHC_RST BIT_ULL(1) /* Software resets UCTL */ -# define USBDRD_UCTL_CTL_UCTL_RST BIT(0) +# define USBDRD_UCTL_CTL_UCTL_RST BIT_ULL(0) #define USBDRD_UCTL_BIST_STATUS 0x08 #define USBDRD_UCTL_SPARE0 0x10 @@ -130,64 +128,69 @@ */ #define USBDRD_UCTL_HOST_CFG 0xe0 /* Indicates minimum value of all received BELT values */ -# define USBDRD_UCTL_HOST_CFG_HOST_CURRENT_BELT GENMASK(59, 48) +# define USBDRD_UCTL_HOST_CFG_HOST_CURRENT_BELT GENMASK_ULL(59, 48) /* HS jitter adjustment */ -# define USBDRD_UCTL_HOST_CFG_FLA GENMASK(37, 32) +# define USBDRD_UCTL_HOST_CFG_FLA GENMASK_ULL(37, 32) /* Bus-master enable: 0x0 = Disabled (stall DMAs), 0x1 = enabled */ -# define USBDRD_UCTL_HOST_CFG_BME BIT(28) +# define USBDRD_UCTL_HOST_CFG_BME BIT_ULL(28) /* Overcurrent protection enable: 0x0 = unavailable, 0x1 = available */ -# define USBDRD_UCTL_HOST_OCI_EN BIT(27) +# define USBDRD_UCTL_HOST_OCI_EN BIT_ULL(27) /* Overcurrent sene selection: * 0x0 = Overcurrent indication from off-chip is active-low * 0x1 = Overcurrent indication from off-chip is active-high */ -# define USBDRD_UCTL_HOST_OCI_ACTIVE_HIGH_EN BIT(26) +# define USBDRD_UCTL_HOST_OCI_ACTIVE_HIGH_EN BIT_ULL(26) /* Port power control enable: 0x0 = unavailable, 0x1 = available */ -# define USBDRD_UCTL_HOST_PPC_EN BIT(25) +# define USBDRD_UCTL_HOST_PPC_EN BIT_ULL(25) /* Port power control sense selection: * 0x0 = Port power to off-chip is active-low * 0x1 = Port power to off-chip is active-high */ -# define USBDRD_UCTL_HOST_PPC_ACTIVE_HIGH_EN BIT(24) +# define USBDRD_UCTL_HOST_PPC_ACTIVE_HIGH_EN BIT_ULL(24) /* * UCTL Shim Features Register */ #define USBDRD_UCTL_SHIM_CFG 0xe8 /* Out-of-bound UAHC register access: 0 = read, 1 = write */ -# define USBDRD_UCTL_SHIM_CFG_XS_NCB_OOB_WRN BIT(63) +# define USBDRD_UCTL_SHIM_CFG_XS_NCB_OOB_WRN BIT_ULL(63) /* SRCID error log for out-of-bound UAHC register access: * [59:58] = chipID * [57] = Request source: 0 = core, 1 = NCB-device * [56:51] = Core/NCB-device number, [56] always 0 for NCB devices * [50:48] = SubID */ -# define USBDRD_UCTL_SHIM_CFG_XS_NCB_OOB_OSRC GENMASK(59, 48) +# define USBDRD_UCTL_SHIM_CFG_XS_NCB_OOB_OSRC GENMASK_ULL(59, 48) /* Error log for bad UAHC DMA access: 0 = Read log, 1 = Write log */ -# define USBDRD_UCTL_SHIM_CFG_XM_BAD_DMA_WRN BIT(47) +# define USBDRD_UCTL_SHIM_CFG_XM_BAD_DMA_WRN BIT_ULL(47) /* Encoded error type for bad UAHC DMA */ -# define USBDRD_UCTL_SHIM_CFG_XM_BAD_DMA_TYPE GENMASK(43, 40) +# define USBDRD_UCTL_SHIM_CFG_XM_BAD_DMA_TYPE GENMASK_ULL(43, 40) /* Select the IOI read command used by DMA accesses */ -# define USBDRD_UCTL_SHIM_CFG_DMA_READ_CMD BIT(12) +# define USBDRD_UCTL_SHIM_CFG_DMA_READ_CMD BIT_ULL(12) /* Select endian format for DMA accesses to the L2C: * 0x0 = Little endian * 0x1 = Big endian * 0x2 = Reserved * 0x3 = Reserved */ -# define USBDRD_UCTL_SHIM_CFG_DMA_ENDIAN_MODE GENMASK(9, 8) +# define USBDRD_UCTL_SHIM_CFG_DMA_ENDIAN_MODE GENMASK_ULL(9, 8) /* Select endian format for IOI CSR access to UAHC: * 0x0 = Little endian * 0x1 = Big endian * 0x2 = Reserved * 0x3 = Reserved */ -# define USBDRD_UCTL_SHIM_CFG_CSR_ENDIAN_MODE GENMASK(1, 0) +# define USBDRD_UCTL_SHIM_CFG_CSR_ENDIAN_MODE GENMASK_ULL(1, 0) #define USBDRD_UCTL_ECC 0xf0 #define USBDRD_UCTL_SPARE1 0xf8 -static DEFINE_MUTEX(dwc3_octeon_clocks_mutex); +struct dwc3_octeon { + struct device *dev; + void __iomem *base; +}; + +#define DWC3_GPIO_POWER_NONE (-1) #ifdef CONFIG_CAVIUM_OCTEON_SOC #include <asm/octeon/octeon.h> @@ -233,6 +236,11 @@ static inline uint64_t dwc3_octeon_readq(void __iomem *addr) static inline void dwc3_octeon_writeq(void __iomem *base, uint64_t val) { } static inline void dwc3_octeon_config_gpio(int index, int gpio) { } + +static uint64_t octeon_get_io_clock_rate(void) +{ + return 150000000; +} #endif static int dwc3_octeon_get_divider(void) @@ -243,115 +251,22 @@ static int dwc3_octeon_get_divider(void) while (div < ARRAY_SIZE(clk_div)) { uint64_t rate = octeon_get_io_clock_rate() / clk_div[div]; if (rate <= 300000000 && rate >= 150000000) - break; + return div; div++; } - return div; + return -EINVAL; } -static int dwc3_octeon_config_power(struct device *dev, void __iomem *base) +static int dwc3_octeon_setup(struct dwc3_octeon *octeon, + int ref_clk_sel, int ref_clk_fsel, int mpll_mul, + int power_gpio, int power_active_low) { - uint32_t gpio_pwr[3]; - int gpio, len, power_active_low; - struct device_node *node = dev->of_node; u64 val; - void __iomem *uctl_host_cfg_reg = base + USBDRD_UCTL_HOST_CFG; - - if (of_find_property(node, "power", &len) != NULL) { - if (len == 12) { - of_property_read_u32_array(node, "power", gpio_pwr, 3); - power_active_low = gpio_pwr[2] & 0x01; - gpio = gpio_pwr[1]; - } else if (len == 8) { - of_property_read_u32_array(node, "power", gpio_pwr, 2); - power_active_low = 0; - gpio = gpio_pwr[1]; - } else { - dev_err(dev, "invalid power configuration\n"); - return -EINVAL; - } - dwc3_octeon_config_gpio(((u64)base >> 24) & 1, gpio); - - /* Enable XHCI power control and set if active high or low. */ - val = dwc3_octeon_readq(uctl_host_cfg_reg); - val |= USBDRD_UCTL_HOST_PPC_EN; - if (power_active_low) - val &= ~USBDRD_UCTL_HOST_PPC_ACTIVE_HIGH_EN; - else - val |= USBDRD_UCTL_HOST_PPC_ACTIVE_HIGH_EN; - dwc3_octeon_writeq(uctl_host_cfg_reg, val); - } else { - /* Disable XHCI power control and set if active high. */ - val = dwc3_octeon_readq(uctl_host_cfg_reg); - val &= ~USBDRD_UCTL_HOST_PPC_EN; - val &= ~USBDRD_UCTL_HOST_PPC_ACTIVE_HIGH_EN; - dwc3_octeon_writeq(uctl_host_cfg_reg, val); - dev_info(dev, "power control disabled\n"); - } - return 0; -} - -static int dwc3_octeon_clocks_start(struct device *dev, void __iomem *base) -{ - int i, div, mpll_mul, ref_clk_fsel, ref_clk_sel = 2; - u32 clock_rate; - u64 val; - void __iomem *uctl_ctl_reg = base + USBDRD_UCTL_CTL; - - if (dev->of_node) { - const char *ss_clock_type; - const char *hs_clock_type; - - i = of_property_read_u32(dev->of_node, - "refclk-frequency", &clock_rate); - if (i) { - dev_err(dev, "No UCTL \"refclk-frequency\"\n"); - return -EINVAL; - } - i = of_property_read_string(dev->of_node, - "refclk-type-ss", &ss_clock_type); - if (i) { - dev_err(dev, "No UCTL \"refclk-type-ss\"\n"); - return -EINVAL; - } - i = of_property_read_string(dev->of_node, - "refclk-type-hs", &hs_clock_type); - if (i) { - dev_err(dev, "No UCTL \"refclk-type-hs\"\n"); - return -EINVAL; - } - if (strcmp("dlmc_ref_clk0", ss_clock_type) == 0) { - if (strcmp(hs_clock_type, "dlmc_ref_clk0") == 0) - ref_clk_sel = 0; - else if (strcmp(hs_clock_type, "pll_ref_clk") == 0) - ref_clk_sel = 2; - else - dev_warn(dev, "Invalid HS clock type %s, using pll_ref_clk instead\n", - hs_clock_type); - } else if (strcmp(ss_clock_type, "dlmc_ref_clk1") == 0) { - if (strcmp(hs_clock_type, "dlmc_ref_clk1") == 0) - ref_clk_sel = 1; - else if (strcmp(hs_clock_type, "pll_ref_clk") == 0) - ref_clk_sel = 3; - else { - dev_warn(dev, "Invalid HS clock type %s, using pll_ref_clk instead\n", - hs_clock_type); - ref_clk_sel = 3; - } - } else - dev_warn(dev, "Invalid SS clock type %s, using dlmc_ref_clk0 instead\n", - ss_clock_type); - - if ((ref_clk_sel == 0 || ref_clk_sel == 1) && - (clock_rate != 100000000)) - dev_warn(dev, "Invalid UCTL clock rate of %u, using 100000000 instead\n", - clock_rate); - - } else { - dev_err(dev, "No USB UCTL device node\n"); - return -EINVAL; - } + int div; + struct device *dev = octeon->dev; + void __iomem *uctl_ctl_reg = octeon->base + USBDRD_UCTL_CTL; + void __iomem *uctl_host_cfg_reg = octeon->base + USBDRD_UCTL_HOST_CFG; /* * Step 1: Wait for all voltages to be stable...that surely @@ -374,6 +289,10 @@ static int dwc3_octeon_clocks_start(struct device *dev, void __iomem *base) /* Step 4b: Select controller clock frequency. */ div = dwc3_octeon_get_divider(); + if (div < 0) { + dev_err(dev, "clock divider invalid\n"); + return div; + } val = dwc3_octeon_readq(uctl_ctl_reg); val &= ~USBDRD_UCTL_CTL_H_CLKDIV_SEL; val |= FIELD_PREP(USBDRD_UCTL_CTL_H_CLKDIV_SEL, div); @@ -382,8 +301,8 @@ static int dwc3_octeon_clocks_start(struct device *dev, void __iomem *base) val = dwc3_octeon_readq(uctl_ctl_reg); if ((div != FIELD_GET(USBDRD_UCTL_CTL_H_CLKDIV_SEL, val)) || (!(FIELD_GET(USBDRD_UCTL_CTL_H_CLK_EN, val)))) { - dev_err(dev, "dwc3 controller clock init failure.\n"); - return -EINVAL; + dev_err(dev, "clock init failure (UCTL_CTL=%016llx)\n", val); + return -EINVAL; } /* Step 4c: Deassert the controller clock divider reset. */ @@ -396,24 +315,6 @@ static int dwc3_octeon_clocks_start(struct device *dev, void __iomem *base) val &= ~USBDRD_UCTL_CTL_REF_CLK_SEL; val |= FIELD_PREP(USBDRD_UCTL_CTL_REF_CLK_SEL, ref_clk_sel); - ref_clk_fsel = 0x07; - switch (clock_rate) { - default: - dev_warn(dev, "Invalid ref_clk %u, using 100000000 instead\n", - clock_rate); - fallthrough; - case 100000000: - mpll_mul = 0x19; - if (ref_clk_sel < 2) - ref_clk_fsel = 0x27; - break; - case 50000000: - mpll_mul = 0x32; - break; - case 125000000: - mpll_mul = 0x28; - break; - } val &= ~USBDRD_UCTL_CTL_REF_CLK_FSEL; val |= FIELD_PREP(USBDRD_UCTL_CTL_REF_CLK_FSEL, ref_clk_fsel); @@ -444,9 +345,22 @@ static int dwc3_octeon_clocks_start(struct device *dev, void __iomem *base) /* Step 8b: Wait 10 controller-clock cycles. */ udelay(10); - /* Steo 8c: Setup power-power control. */ - if (dwc3_octeon_config_power(dev, base)) - return -EINVAL; + /* Step 8c: Setup power control. */ + val = dwc3_octeon_readq(uctl_host_cfg_reg); + val |= USBDRD_UCTL_HOST_PPC_EN; + if (power_gpio == DWC3_GPIO_POWER_NONE) { + val &= ~USBDRD_UCTL_HOST_PPC_EN; + } else { + val |= USBDRD_UCTL_HOST_PPC_EN; + dwc3_octeon_config_gpio(((__force uintptr_t)octeon->base >> 24) & 1, + power_gpio); + dev_dbg(dev, "power control is using gpio%d\n", power_gpio); + } + if (power_active_low) + val &= ~USBDRD_UCTL_HOST_PPC_ACTIVE_HIGH_EN; + else + val |= USBDRD_UCTL_HOST_PPC_ACTIVE_HIGH_EN; + dwc3_octeon_writeq(uctl_host_cfg_reg, val); /* Step 8d: Deassert UAHC reset signal. */ val = dwc3_octeon_readq(uctl_ctl_reg); @@ -469,10 +383,10 @@ static int dwc3_octeon_clocks_start(struct device *dev, void __iomem *base) return 0; } -static void __init dwc3_octeon_set_endian_mode(void __iomem *base) +static void dwc3_octeon_set_endian_mode(struct dwc3_octeon *octeon) { u64 val; - void __iomem *uctl_shim_cfg_reg = base + USBDRD_UCTL_SHIM_CFG; + void __iomem *uctl_shim_cfg_reg = octeon->base + USBDRD_UCTL_SHIM_CFG; val = dwc3_octeon_readq(uctl_shim_cfg_reg); val &= ~USBDRD_UCTL_SHIM_CFG_DMA_ENDIAN_MODE; @@ -484,68 +398,147 @@ static void __init dwc3_octeon_set_endian_mode(void __iomem *base) dwc3_octeon_writeq(uctl_shim_cfg_reg, val); } -static void __init dwc3_octeon_phy_reset(void __iomem *base) +static void dwc3_octeon_phy_reset(struct dwc3_octeon *octeon) { u64 val; - void __iomem *uctl_ctl_reg = base + USBDRD_UCTL_CTL; + void __iomem *uctl_ctl_reg = octeon->base + USBDRD_UCTL_CTL; val = dwc3_octeon_readq(uctl_ctl_reg); val &= ~USBDRD_UCTL_CTL_UPHY_RST; dwc3_octeon_writeq(uctl_ctl_reg, val); } -static int __init dwc3_octeon_device_init(void) +static int dwc3_octeon_probe(struct platform_device *pdev) { - const char compat_node_name[] = "cavium,octeon-7130-usb-uctl"; - struct platform_device *pdev; - struct device_node *node; - struct resource *res; - void __iomem *base; + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct dwc3_octeon *octeon; + const char *hs_clock_type, *ss_clock_type; + int ref_clk_sel, ref_clk_fsel, mpll_mul; + int power_active_low, power_gpio; + int err, len; + u32 clock_rate; - /* - * There should only be three universal controllers, "uctl" - * in the device tree. Two USB and a SATA, which we ignore. - */ - node = NULL; - do { - node = of_find_node_by_name(node, "uctl"); - if (!node) - return -ENODEV; - - if (of_device_is_compatible(node, compat_node_name)) { - pdev = of_find_device_by_node(node); - if (!pdev) - return -ENODEV; - - /* - * The code below maps in the registers necessary for - * setting up the clocks and reseting PHYs. We must - * release the resources so the dwc3 subsystem doesn't - * know the difference. - */ - base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); - if (IS_ERR(base)) { - put_device(&pdev->dev); - return PTR_ERR(base); - } - - mutex_lock(&dwc3_octeon_clocks_mutex); - if (dwc3_octeon_clocks_start(&pdev->dev, base) == 0) - dev_info(&pdev->dev, "clocks initialized.\n"); - dwc3_octeon_set_endian_mode(base); - dwc3_octeon_phy_reset(base); - mutex_unlock(&dwc3_octeon_clocks_mutex); - devm_iounmap(&pdev->dev, base); - devm_release_mem_region(&pdev->dev, res->start, - resource_size(res)); - put_device(&pdev->dev); + if (of_property_read_u32(node, "refclk-frequency", &clock_rate)) { + dev_err(dev, "No UCTL \"refclk-frequency\"\n"); + return -EINVAL; + } + if (of_property_read_string(node, "refclk-type-ss", &ss_clock_type)) { + dev_err(dev, "No UCTL \"refclk-type-ss\"\n"); + return -EINVAL; + } + if (of_property_read_string(node, "refclk-type-hs", &hs_clock_type)) { + dev_err(dev, "No UCTL \"refclk-type-hs\"\n"); + return -EINVAL; + } + + ref_clk_sel = 2; + if (strcmp("dlmc_ref_clk0", ss_clock_type) == 0) { + if (strcmp(hs_clock_type, "dlmc_ref_clk0") == 0) + ref_clk_sel = 0; + else if (strcmp(hs_clock_type, "pll_ref_clk")) + dev_warn(dev, "Invalid HS clock type %s, using pll_ref_clk instead\n", + hs_clock_type); + } else if (strcmp(ss_clock_type, "dlmc_ref_clk1") == 0) { + if (strcmp(hs_clock_type, "dlmc_ref_clk1") == 0) { + ref_clk_sel = 1; + } else { + ref_clk_sel = 3; + if (strcmp(hs_clock_type, "pll_ref_clk")) + dev_warn(dev, "Invalid HS clock type %s, using pll_ref_clk instead\n", + hs_clock_type); } - } while (node != NULL); + } else { + dev_warn(dev, "Invalid SS clock type %s, using dlmc_ref_clk0 instead\n", + ss_clock_type); + } - return 0; + ref_clk_fsel = 0x07; + switch (clock_rate) { + default: + dev_warn(dev, "Invalid ref_clk %u, using 100000000 instead\n", + clock_rate); + fallthrough; + case 100000000: + mpll_mul = 0x19; + if (ref_clk_sel < 2) + ref_clk_fsel = 0x27; + break; + case 50000000: + mpll_mul = 0x32; + break; + case 125000000: + mpll_mul = 0x28; + break; + } + + power_gpio = DWC3_GPIO_POWER_NONE; + power_active_low = 0; + if (of_find_property(node, "power", &len)) { + u32 gpio_pwr[3]; + + switch (len) { + case 8: + of_property_read_u32_array(node, "power", gpio_pwr, 2); + break; + case 12: + of_property_read_u32_array(node, "power", gpio_pwr, 3); + power_active_low = gpio_pwr[2] & 0x01; + break; + default: + dev_err(dev, "invalid power configuration\n"); + return -EINVAL; + } + power_gpio = gpio_pwr[1]; + } + + octeon = devm_kzalloc(dev, sizeof(*octeon), GFP_KERNEL); + if (!octeon) + return -ENOMEM; + + octeon->dev = dev; + octeon->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(octeon->base)) + return PTR_ERR(octeon->base); + + err = dwc3_octeon_setup(octeon, ref_clk_sel, ref_clk_fsel, mpll_mul, + power_gpio, power_active_low); + if (err) + return err; + + dwc3_octeon_set_endian_mode(octeon); + dwc3_octeon_phy_reset(octeon); + + platform_set_drvdata(pdev, octeon); + + return of_platform_populate(node, NULL, NULL, dev); +} + +static void dwc3_octeon_remove(struct platform_device *pdev) +{ + struct dwc3_octeon *octeon = platform_get_drvdata(pdev); + + of_platform_depopulate(octeon->dev); + platform_set_drvdata(pdev, NULL); } -device_initcall(dwc3_octeon_device_init); -MODULE_AUTHOR("David Daney <[email protected]>"); +static const struct of_device_id dwc3_octeon_of_match[] = { + { .compatible = "cavium,octeon-7130-usb-uctl" }, + { }, +}; +MODULE_DEVICE_TABLE(of, dwc3_octeon_of_match); + +static struct platform_driver dwc3_octeon_driver = { + .probe = dwc3_octeon_probe, + .remove_new = dwc3_octeon_remove, + .driver = { + .name = "dwc3-octeon", + .of_match_table = dwc3_octeon_of_match, + }, +}; +module_platform_driver(dwc3_octeon_driver); + +MODULE_ALIAS("platform:dwc3-octeon"); +MODULE_AUTHOR("Ladislav Michl <[email protected]>"); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("USB driver for OCTEON III SoC"); +MODULE_DESCRIPTION("DesignWare USB3 OCTEON III Glue Layer"); diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c index 7e6ad8fe61a5..d1539fc9eabd 100644 --- a/drivers/usb/dwc3/dwc3-of-simple.c +++ b/drivers/usb/dwc3/dwc3-of-simple.c @@ -170,7 +170,6 @@ static const struct dev_pm_ops dwc3_of_simple_dev_pm_ops = { static const struct of_device_id of_dwc3_simple_match[] = { { .compatible = "rockchip,rk3399-dwc3" }, - { .compatible = "cavium,octeon-7130-usb-uctl" }, { .compatible = "sprd,sc9860-dwc3" }, { .compatible = "allwinner,sun50i-h6-dwc3" }, { .compatible = "hisilicon,hi3670-dwc3" }, diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 5fd067151fbf..858fe4c299b7 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -4455,9 +4455,14 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt) u32 count; if (pm_runtime_suspended(dwc->dev)) { + dwc->pending_events = true; + /* + * Trigger runtime resume. The get() function will be balanced + * after processing the pending events in dwc3_process_pending + * events(). + */ pm_runtime_get(dwc->dev); disable_irq_nosync(dwc->irq_gadget); - dwc->pending_events = true; return IRQ_HANDLED; } @@ -4718,6 +4723,8 @@ void dwc3_gadget_process_pending_events(struct dwc3 *dwc) { if (dwc->pending_events) { dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf); + dwc3_thread_interrupt(dwc->irq_gadget, dwc->ev_buf); + pm_runtime_put(dwc->dev); dwc->pending_events = false; enable_irq(dwc->irq_gadget); } diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 336db8f92afa..b3592bcb0f96 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -208,6 +208,9 @@ config USB_F_UVC config USB_F_MIDI tristate +config USB_F_MIDI2 + tristate + config USB_F_HID tristate @@ -436,6 +439,21 @@ config USB_CONFIGFS_F_MIDI connections can then be made on the gadget system, using ALSA's aconnect utility etc. +config USB_CONFIGFS_F_MIDI2 + bool "MIDI 2.0 function" + depends on USB_CONFIGFS + depends on SND + select USB_LIBCOMPOSITE + select SND_UMP + select SND_UMP_LEGACY_RAWMIDI + select USB_F_MIDI2 + help + The MIDI 2.0 function driver provides the generic emulated + USB MIDI 2.0 interface, looped back to ALSA UMP rawmidi + device on the gadget host. It supports UMP 1.1 spec and + responds UMP Stream messages for UMP Endpoint and Function + Block information / configuration. + config USB_CONFIGFS_F_HID bool "HID function" depends on USB_CONFIGFS diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile index 5d3a6cf02218..87917a7d4a9b 100644 --- a/drivers/usb/gadget/function/Makefile +++ b/drivers/usb/gadget/function/Makefile @@ -44,6 +44,8 @@ usb_f_uvc-y := f_uvc.o uvc_queue.o uvc_v4l2.o uvc_video.o uvc_configfs.o obj-$(CONFIG_USB_F_UVC) += usb_f_uvc.o usb_f_midi-y := f_midi.o obj-$(CONFIG_USB_F_MIDI) += usb_f_midi.o +usb_f_midi2-y := f_midi2.o +obj-$(CONFIG_USB_F_MIDI2) += usb_f_midi2.o usb_f_hid-y := f_hid.o obj-$(CONFIG_USB_F_HID) += usb_f_hid.o usb_f_printer-y := f_printer.o diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index da07e45ae6df..722a3ab2b337 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -927,7 +927,7 @@ static void invalidate_sub(struct fsg_lun *curlun) { struct file *filp = curlun->filp; struct inode *inode = file_inode(filp); - unsigned long rc; + unsigned long __maybe_unused rc; rc = invalidate_mapping_pages(inode->i_mapping, 0, -1); VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc); diff --git a/drivers/usb/gadget/function/f_midi2.c b/drivers/usb/gadget/function/f_midi2.c new file mode 100644 index 000000000000..5a971ba600fe --- /dev/null +++ b/drivers/usb/gadget/function/f_midi2.c @@ -0,0 +1,2883 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * f_midi2.c -- USB MIDI 2.0 class function driver + */ + +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> + +#include <sound/core.h> +#include <sound/control.h> +#include <sound/ump.h> +#include <sound/ump_msg.h> +#include <sound/ump_convert.h> + +#include <linux/usb/ch9.h> +#include <linux/usb/gadget.h> +#include <linux/usb/audio.h> +#include <linux/usb/midi-v2.h> + +#include "u_f.h" +#include "u_midi2.h" + +struct f_midi2; +struct f_midi2_ep; +struct f_midi2_usb_ep; + +/* Context for each USB request */ +struct f_midi2_req_ctx { + struct f_midi2_usb_ep *usb_ep; /* belonging USB EP */ + unsigned int index; /* array index: 0-31 */ + struct usb_request *req; /* assigned request */ +}; + +/* Resources for a USB Endpoint */ +struct f_midi2_usb_ep { + struct f_midi2 *card; /* belonging card */ + struct f_midi2_ep *ep; /* belonging UMP EP (optional) */ + struct usb_ep *usb_ep; /* assigned USB EP */ + void (*complete)(struct usb_ep *usb_ep, struct usb_request *req); + unsigned long free_reqs; /* bitmap for unused requests */ + unsigned int num_reqs; /* number of allocated requests */ + struct f_midi2_req_ctx *reqs; /* request context array */ +}; + +/* Resources for UMP Function Block (and USB Group Terminal Block) */ +struct f_midi2_block { + struct f_midi2_block_info info; /* FB info, copied from configfs */ + struct snd_ump_block *fb; /* assigned FB */ + unsigned int gtb_id; /* assigned GTB id */ + unsigned int string_id; /* assigned string id */ +}; + +/* Temporary buffer for altset 0 MIDI 1.0 handling */ +struct f_midi2_midi1_port { + unsigned int pending; /* pending bytes on the input buffer */ + u8 buf[32]; /* raw MIDI 1.0 byte input */ + u8 state; /* running status */ + u8 data[2]; /* rendered USB MIDI 1.0 packet data */ +}; + +/* MIDI 1.0 message states */ +enum { + STATE_INITIAL = 0, /* pseudo state */ + STATE_1PARAM, + STATE_2PARAM_1, + STATE_2PARAM_2, + STATE_SYSEX_0, + STATE_SYSEX_1, + STATE_SYSEX_2, + STATE_REAL_TIME, + STATE_FINISHED, /* pseudo state */ +}; + +/* Resources for UMP Endpoint */ +struct f_midi2_ep { + struct snd_ump_endpoint *ump; /* assigned UMP EP */ + struct f_midi2 *card; /* belonging MIDI 2.0 device */ + + struct f_midi2_ep_info info; /* UMP EP info, copied from configfs */ + unsigned int num_blks; /* number of FBs */ + struct f_midi2_block blks[SNDRV_UMP_MAX_BLOCKS]; /* UMP FBs */ + + struct f_midi2_usb_ep ep_in; /* USB MIDI EP-in */ + struct f_midi2_usb_ep ep_out; /* USB MIDI EP-out */ + + u8 in_group_to_cable[SNDRV_UMP_MAX_GROUPS]; /* map to cable; 1-based! */ +}; + +/* indices for USB strings */ +enum { + STR_IFACE = 0, + STR_GTB1 = 1, +}; + +/* 1-based GTB id to string id */ +#define gtb_to_str_id(id) (STR_GTB1 + (id) - 1) + +/* mapping from MIDI 1.0 cable to UMP group */ +struct midi1_cable_mapping { + struct f_midi2_ep *ep; + unsigned char block; + unsigned char group; +}; + +/* operation mode */ +enum { + MIDI_OP_MODE_UNSET, /* no altset set yet */ + MIDI_OP_MODE_MIDI1, /* MIDI 1.0 (altset 0) is used */ + MIDI_OP_MODE_MIDI2, /* MIDI 2.0 (altset 1) is used */ +}; + +/* Resources for MIDI 2.0 Device */ +struct f_midi2 { + struct usb_function func; + struct usb_gadget *gadget; + struct snd_card *card; + + /* MIDI 1.0 in/out USB EPs */ + struct f_midi2_usb_ep midi1_ep_in; + struct f_midi2_usb_ep midi1_ep_out; + + /* number of MIDI 1.0 I/O cables */ + unsigned int num_midi1_in; + unsigned int num_midi1_out; + + /* conversion for MIDI 1.0 EP-in */ + struct f_midi2_midi1_port midi1_port[MAX_CABLES]; + /* conversion for MIDI 1.0 EP-out */ + struct ump_cvt_to_ump midi1_ump_cvt; + /* mapping between cables and UMP groups */ + struct midi1_cable_mapping in_cable_mapping[MAX_CABLES]; + struct midi1_cable_mapping out_cable_mapping[MAX_CABLES]; + + int midi_if; /* USB MIDI interface number */ + int operation_mode; /* current operation mode */ + + spinlock_t queue_lock; + + struct f_midi2_card_info info; /* card info, copied from configfs */ + + unsigned int num_eps; + struct f_midi2_ep midi2_eps[MAX_UMP_EPS]; + + unsigned int total_blocks; /* total number of blocks of all EPs */ + struct usb_string *string_defs; + struct usb_string *strings; +}; + +#define func_to_midi2(f) container_of(f, struct f_midi2, func) + +/* get EP name string */ +static const char *ump_ep_name(const struct f_midi2_ep *ep) +{ + return ep->info.ep_name ? ep->info.ep_name : "MIDI 2.0 Gadget"; +} + +/* get EP product ID string */ +static const char *ump_product_id(const struct f_midi2_ep *ep) +{ + return ep->info.product_id ? ep->info.product_id : "Unique Product ID"; +} + +/* get FB name string */ +static const char *ump_fb_name(const struct f_midi2_block_info *info) +{ + return info->name ? info->name : "MIDI 2.0 Gadget I/O"; +} + +/* + * USB Descriptor Definitions + */ +/* GTB header descriptor */ +static struct usb_ms20_gr_trm_block_header_descriptor gtb_header_desc = { + .bLength = sizeof(gtb_header_desc), + .bDescriptorType = USB_DT_CS_GR_TRM_BLOCK, + .bDescriptorSubtype = USB_MS_GR_TRM_BLOCK_HEADER, + .wTotalLength = __cpu_to_le16(0x12), // to be filled +}; + +/* GTB descriptor template: most items are replaced dynamically */ +static struct usb_ms20_gr_trm_block_descriptor gtb_desc = { + .bLength = sizeof(gtb_desc), + .bDescriptorType = USB_DT_CS_GR_TRM_BLOCK, + .bDescriptorSubtype = USB_MS_GR_TRM_BLOCK, + .bGrpTrmBlkID = 0x01, + .bGrpTrmBlkType = USB_MS_GR_TRM_BLOCK_TYPE_BIDIRECTIONAL, + .nGroupTrm = 0x00, + .nNumGroupTrm = 1, + .iBlockItem = 0, + .bMIDIProtocol = USB_MS_MIDI_PROTO_1_0_64, + .wMaxInputBandwidth = 0, + .wMaxOutputBandwidth = 0, +}; + +DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1); +DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(16); +DECLARE_UAC_AC_HEADER_DESCRIPTOR(1); +DECLARE_USB_MS20_ENDPOINT_DESCRIPTOR(32); + +#define EP_MAX_PACKET_INT 8 + +/* Audio Control Interface */ +static struct usb_interface_descriptor midi2_audio_if_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bInterfaceNumber = 0, // to be filled + .bNumEndpoints = 0, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, + .bInterfaceProtocol = 0, + .iInterface = 0, +}; + +static struct uac1_ac_header_descriptor_1 midi2_audio_class_desc = { + .bLength = 0x09, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubtype = 0x01, + .bcdADC = __cpu_to_le16(0x0100), + .wTotalLength = __cpu_to_le16(0x0009), + .bInCollection = 0x01, + .baInterfaceNr = { 0x01 }, // to be filled +}; + +/* MIDI 1.0 Streaming Interface (altset 0) */ +static struct usb_interface_descriptor midi2_midi1_if_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bInterfaceNumber = 0, // to be filled + .bAlternateSetting = 0, + .bNumEndpoints = 2, // to be filled + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_MIDISTREAMING, + .bInterfaceProtocol = 0, + .iInterface = 0, // to be filled +}; + +static struct usb_ms_header_descriptor midi2_midi1_class_desc = { + .bLength = 0x07, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubtype = USB_MS_HEADER, + .bcdMSC = __cpu_to_le16(0x0100), + .wTotalLength = __cpu_to_le16(0x41), // to be calculated +}; + +/* MIDI 1.0 EP OUT */ +static struct usb_endpoint_descriptor midi2_midi1_ep_out_desc = { + .bLength = USB_DT_ENDPOINT_AUDIO_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT | 0, // set up dynamically + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_ss_ep_comp_descriptor midi2_midi1_ep_out_ss_comp_desc = { + .bLength = sizeof(midi2_midi1_ep_out_ss_comp_desc), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, +}; + +static struct usb_ms_endpoint_descriptor_16 midi2_midi1_ep_out_class_desc = { + .bLength = 0x05, // to be filled + .bDescriptorType = USB_DT_CS_ENDPOINT, + .bDescriptorSubtype = USB_MS_GENERAL, + .bNumEmbMIDIJack = 1, + .baAssocJackID = { 0x01 }, +}; + +/* MIDI 1.0 EP IN */ +static struct usb_endpoint_descriptor midi2_midi1_ep_in_desc = { + .bLength = USB_DT_ENDPOINT_AUDIO_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN | 0, // set up dynamically + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_ss_ep_comp_descriptor midi2_midi1_ep_in_ss_comp_desc = { + .bLength = sizeof(midi2_midi1_ep_in_ss_comp_desc), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, +}; + +static struct usb_ms_endpoint_descriptor_16 midi2_midi1_ep_in_class_desc = { + .bLength = 0x05, // to be filled + .bDescriptorType = USB_DT_CS_ENDPOINT, + .bDescriptorSubtype = USB_MS_GENERAL, + .bNumEmbMIDIJack = 1, + .baAssocJackID = { 0x03 }, +}; + +/* MIDI 2.0 Streaming Interface (altset 1) */ +static struct usb_interface_descriptor midi2_midi2_if_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bInterfaceNumber = 0, // to be filled + .bAlternateSetting = 1, + .bNumEndpoints = 2, // to be filled + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_MIDISTREAMING, + .bInterfaceProtocol = 0, + .iInterface = 0, // to be filled +}; + +static struct usb_ms_header_descriptor midi2_midi2_class_desc = { + .bLength = 0x07, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubtype = USB_MS_HEADER, + .bcdMSC = __cpu_to_le16(0x0200), + .wTotalLength = __cpu_to_le16(0x07), +}; + +/* MIDI 2.0 EP OUT */ +static struct usb_endpoint_descriptor midi2_midi2_ep_out_desc[MAX_UMP_EPS]; + +static struct usb_ss_ep_comp_descriptor midi2_midi2_ep_out_ss_comp_desc = { + .bLength = sizeof(midi2_midi1_ep_out_ss_comp_desc), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, +}; + +static struct usb_ms20_endpoint_descriptor_32 midi2_midi2_ep_out_class_desc[MAX_UMP_EPS]; + +/* MIDI 2.0 EP IN */ +static struct usb_endpoint_descriptor midi2_midi2_ep_in_desc[MAX_UMP_EPS]; + +static struct usb_ss_ep_comp_descriptor midi2_midi2_ep_in_ss_comp_desc = { + .bLength = sizeof(midi2_midi2_ep_in_ss_comp_desc), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, +}; + +static struct usb_ms20_endpoint_descriptor_32 midi2_midi2_ep_in_class_desc[MAX_UMP_EPS]; + +/* Arrays of descriptors to be created */ +static void *midi2_audio_descs[] = { + &midi2_audio_if_desc, + &midi2_audio_class_desc, + NULL +}; + +static void *midi2_midi1_descs[] = { + &midi2_midi1_if_desc, + &midi2_midi1_class_desc, + NULL +}; + +static void *midi2_midi1_ep_out_descs[] = { + &midi2_midi1_ep_out_desc, + &midi2_midi1_ep_out_class_desc, + NULL +}; + +static void *midi2_midi1_ep_in_descs[] = { + &midi2_midi1_ep_in_desc, + &midi2_midi1_ep_in_class_desc, + NULL +}; + +static void *midi2_midi1_ep_out_ss_descs[] = { + &midi2_midi1_ep_out_desc, + &midi2_midi1_ep_out_ss_comp_desc, + &midi2_midi1_ep_out_class_desc, + NULL +}; + +static void *midi2_midi1_ep_in_ss_descs[] = { + &midi2_midi1_ep_in_desc, + &midi2_midi1_ep_in_ss_comp_desc, + &midi2_midi1_ep_in_class_desc, + NULL +}; + +static void *midi2_midi2_descs[] = { + &midi2_midi2_if_desc, + &midi2_midi2_class_desc, + NULL +}; + +/* + * USB request handling + */ + +/* get an empty request for the given EP */ +static struct usb_request *get_empty_request(struct f_midi2_usb_ep *usb_ep) +{ + struct usb_request *req = NULL; + unsigned long flags; + int index; + + spin_lock_irqsave(&usb_ep->card->queue_lock, flags); + if (!usb_ep->free_reqs) + goto unlock; + index = find_first_bit(&usb_ep->free_reqs, usb_ep->num_reqs); + if (index >= usb_ep->num_reqs) + goto unlock; + req = usb_ep->reqs[index].req; + if (!req) + goto unlock; + clear_bit(index, &usb_ep->free_reqs); + req->length = 0; + unlock: + spin_unlock_irqrestore(&usb_ep->card->queue_lock, flags); + return req; +} + +/* put the empty request back */ +static void put_empty_request(struct usb_request *req) +{ + struct f_midi2_req_ctx *ctx = req->context; + unsigned long flags; + + spin_lock_irqsave(&ctx->usb_ep->card->queue_lock, flags); + set_bit(ctx->index, &ctx->usb_ep->free_reqs); + spin_unlock_irqrestore(&ctx->usb_ep->card->queue_lock, flags); +} + +/* + * UMP v1.1 Stream message handling + */ + +/* queue a request to UMP EP; request is either queued or freed after this */ +static int queue_request_ep_raw(struct usb_request *req) +{ + struct f_midi2_req_ctx *ctx = req->context; + int err; + + req->complete = ctx->usb_ep->complete; + err = usb_ep_queue(ctx->usb_ep->usb_ep, req, GFP_ATOMIC); + if (err) { + put_empty_request(req); + return err; + } + return 0; +} + +/* queue a request with endianness conversion */ +static int queue_request_ep_in(struct usb_request *req) +{ + /* UMP packets have to be converted to little-endian */ + cpu_to_le32_array((u32 *)req->buf, req->length >> 2); + return queue_request_ep_raw(req); +} + +/* reply a UMP packet via EP-in */ +static int reply_ep_in(struct f_midi2_ep *ep, const void *buf, int len) +{ + struct f_midi2_usb_ep *usb_ep = &ep->ep_in; + struct usb_request *req; + + req = get_empty_request(usb_ep); + if (!req) + return -ENOSPC; + + req->length = len; + memcpy(req->buf, buf, len); + return queue_request_ep_in(req); +} + +/* reply a UMP stream EP info */ +static void reply_ump_stream_ep_info(struct f_midi2_ep *ep) +{ + struct snd_ump_stream_msg_ep_info rep = { + .type = UMP_MSG_TYPE_STREAM, + .status = UMP_STREAM_MSG_STATUS_EP_INFO, + .ump_version_major = 0x01, + .ump_version_minor = 0x01, + .num_function_blocks = ep->num_blks, + .static_function_block = !!ep->card->info.static_block, + .protocol = (UMP_STREAM_MSG_EP_INFO_CAP_MIDI1 | + UMP_STREAM_MSG_EP_INFO_CAP_MIDI2) >> 8, + }; + + reply_ep_in(ep, &rep, sizeof(rep)); +} + +/* reply a UMP EP device info */ +static void reply_ump_stream_ep_device(struct f_midi2_ep *ep) +{ + struct snd_ump_stream_msg_devince_info rep = { + .type = UMP_MSG_TYPE_STREAM, + .status = UMP_STREAM_MSG_STATUS_DEVICE_INFO, + .manufacture_id = ep->info.manufacturer, + .family_lsb = ep->info.family & 0xff, + .family_msb = (ep->info.family >> 8) & 0xff, + .model_lsb = ep->info.model & 0xff, + .model_msb = (ep->info.model >> 8) & 0xff, + .sw_revision = ep->info.sw_revision, + }; + + reply_ep_in(ep, &rep, sizeof(rep)); +} + +#define UMP_STREAM_PKT_BYTES 16 /* UMP stream packet size = 16 bytes*/ +#define UMP_STREAM_EP_STR_OFF 2 /* offset of name string for EP info */ +#define UMP_STREAM_FB_STR_OFF 3 /* offset of name string for FB info */ + +/* Helper to replay a string */ +static void reply_ump_stream_string(struct f_midi2_ep *ep, const u8 *name, + unsigned int type, unsigned int extra, + unsigned int start_ofs) +{ + struct f_midi2_usb_ep *usb_ep = &ep->ep_in; + struct f_midi2 *midi2 = ep->card; + struct usb_request *req; + unsigned int pos; + u32 *buf; + + if (!*name) + return; + req = get_empty_request(usb_ep); + if (!req) + return; + + buf = (u32 *)req->buf; + pos = start_ofs; + for (;;) { + if (pos == start_ofs) { + memset(buf, 0, UMP_STREAM_PKT_BYTES); + buf[0] = ump_stream_compose(type, 0) | extra; + } + buf[pos / 4] |= *name++ << ((3 - (pos % 4)) * 8); + if (!*name) { + if (req->length) + buf[0] |= UMP_STREAM_MSG_FORMAT_END << 26; + req->length += UMP_STREAM_PKT_BYTES; + break; + } + if (++pos == UMP_STREAM_PKT_BYTES) { + if (!req->length) + buf[0] |= UMP_STREAM_MSG_FORMAT_START << 26; + else + buf[0] |= UMP_STREAM_MSG_FORMAT_CONTINUE << 26; + req->length += UMP_STREAM_PKT_BYTES; + if (midi2->info.req_buf_size - req->length < UMP_STREAM_PKT_BYTES) + break; + buf += 4; + pos = start_ofs; + } + } + + if (req->length) + queue_request_ep_in(req); + else + put_empty_request(req); +} + +/* Reply a UMP EP name string */ +static void reply_ump_stream_ep_name(struct f_midi2_ep *ep) +{ + reply_ump_stream_string(ep, ump_ep_name(ep), + UMP_STREAM_MSG_STATUS_EP_NAME, 0, + UMP_STREAM_EP_STR_OFF); +} + +/* Reply a UMP EP product ID string */ +static void reply_ump_stream_ep_pid(struct f_midi2_ep *ep) +{ + reply_ump_stream_string(ep, ump_product_id(ep), + UMP_STREAM_MSG_STATUS_PRODUCT_ID, 0, + UMP_STREAM_EP_STR_OFF); +} + +/* Reply a UMP EP stream config */ +static void reply_ump_stream_ep_config(struct f_midi2_ep *ep) +{ + struct snd_ump_stream_msg_stream_cfg rep = { + .type = UMP_MSG_TYPE_STREAM, + .status = UMP_STREAM_MSG_STATUS_STREAM_CFG, + }; + + if ((ep->info.protocol & SNDRV_UMP_EP_INFO_PROTO_MIDI_MASK) == + SNDRV_UMP_EP_INFO_PROTO_MIDI2) + rep.protocol = UMP_STREAM_MSG_EP_INFO_CAP_MIDI2 >> 8; + else + rep.protocol = UMP_STREAM_MSG_EP_INFO_CAP_MIDI1 >> 8; + + reply_ep_in(ep, &rep, sizeof(rep)); +} + +/* Reply a UMP FB info */ +static void reply_ump_stream_fb_info(struct f_midi2_ep *ep, int blk) +{ + struct f_midi2_block_info *b = &ep->blks[blk].info; + struct snd_ump_stream_msg_fb_info rep = { + .type = UMP_MSG_TYPE_STREAM, + .status = UMP_STREAM_MSG_STATUS_FB_INFO, + .active = !!b->active, + .function_block_id = blk, + .ui_hint = b->ui_hint, + .midi_10 = b->is_midi1, + .direction = b->direction, + .first_group = b->first_group, + .num_groups = b->num_groups, + .midi_ci_version = b->midi_ci_version, + .sysex8_streams = b->sysex8_streams, + }; + + reply_ep_in(ep, &rep, sizeof(rep)); +} + +/* Reply a FB name string */ +static void reply_ump_stream_fb_name(struct f_midi2_ep *ep, unsigned int blk) +{ + reply_ump_stream_string(ep, ump_fb_name(&ep->blks[blk].info), + UMP_STREAM_MSG_STATUS_FB_NAME, blk << 8, + UMP_STREAM_FB_STR_OFF); +} + +/* Process a UMP Stream message */ +static void process_ump_stream_msg(struct f_midi2_ep *ep, const u32 *data) +{ + struct f_midi2 *midi2 = ep->card; + unsigned int format, status, blk; + + format = ump_stream_message_format(*data); + status = ump_stream_message_status(*data); + switch (status) { + case UMP_STREAM_MSG_STATUS_EP_DISCOVERY: + if (format) + return; // invalid + if (data[1] & UMP_STREAM_MSG_REQUEST_EP_INFO) + reply_ump_stream_ep_info(ep); + if (data[1] & UMP_STREAM_MSG_REQUEST_DEVICE_INFO) + reply_ump_stream_ep_device(ep); + if (data[1] & UMP_STREAM_MSG_REQUEST_EP_NAME) + reply_ump_stream_ep_name(ep); + if (data[1] & UMP_STREAM_MSG_REQUEST_PRODUCT_ID) + reply_ump_stream_ep_pid(ep); + if (data[1] & UMP_STREAM_MSG_REQUEST_STREAM_CFG) + reply_ump_stream_ep_config(ep); + return; + case UMP_STREAM_MSG_STATUS_STREAM_CFG_REQUEST: + if (*data & UMP_STREAM_MSG_EP_INFO_CAP_MIDI2) { + ep->info.protocol = SNDRV_UMP_EP_INFO_PROTO_MIDI2; + DBG(midi2, "Switching Protocol to MIDI2\n"); + } else { + ep->info.protocol = SNDRV_UMP_EP_INFO_PROTO_MIDI1; + DBG(midi2, "Switching Protocol to MIDI1\n"); + } + snd_ump_switch_protocol(ep->ump, ep->info.protocol); + reply_ump_stream_ep_config(ep); + return; + case UMP_STREAM_MSG_STATUS_FB_DISCOVERY: + if (format) + return; // invalid + blk = (*data >> 8) & 0xff; + if (blk >= ep->num_blks) + return; + if (*data & UMP_STREAM_MSG_REQUEST_FB_INFO) + reply_ump_stream_fb_info(ep, blk); + if (*data & UMP_STREAM_MSG_REQUEST_FB_NAME) + reply_ump_stream_fb_name(ep, blk); + return; + } +} + +/* Process UMP messages included in a USB request */ +static void process_ump(struct f_midi2_ep *ep, const struct usb_request *req) +{ + const u32 *data = (u32 *)req->buf; + int len = req->actual >> 2; + const u32 *in_buf = ep->ump->input_buf; + + for (; len > 0; len--, data++) { + if (snd_ump_receive_ump_val(ep->ump, *data) <= 0) + continue; + if (ump_message_type(*in_buf) == UMP_MSG_TYPE_STREAM) + process_ump_stream_msg(ep, in_buf); + } +} + +/* + * MIDI 2.0 UMP USB request handling + */ + +/* complete handler for UMP EP-out requests */ +static void f_midi2_ep_out_complete(struct usb_ep *usb_ep, + struct usb_request *req) +{ + struct f_midi2_req_ctx *ctx = req->context; + struct f_midi2_ep *ep = ctx->usb_ep->ep; + struct f_midi2 *midi2 = ep->card; + int status = req->status; + + if (status) { + DBG(midi2, "%s complete error %d: %d/%d\n", + usb_ep->name, status, req->actual, req->length); + goto error; + } + + /* convert to UMP packet in native endianness */ + le32_to_cpu_array((u32 *)req->buf, req->actual >> 2); + + if (midi2->info.process_ump) + process_ump(ep, req); + + snd_ump_receive(ep->ump, req->buf, req->actual & ~3); + + if (midi2->operation_mode != MIDI_OP_MODE_MIDI2) + goto error; + + if (queue_request_ep_raw(req)) + goto error; + return; + + error: + put_empty_request(req); +} + +/* Transmit UMP packets received from user-space to the gadget */ +static void process_ump_transmit(struct f_midi2_ep *ep) +{ + struct f_midi2_usb_ep *usb_ep = &ep->ep_in; + struct f_midi2 *midi2 = ep->card; + struct usb_request *req; + int len; + + if (!usb_ep->usb_ep->enabled) + return; + + for (;;) { + req = get_empty_request(usb_ep); + if (!req) + break; + len = snd_ump_transmit(ep->ump, (u32 *)req->buf, + midi2->info.req_buf_size); + if (len <= 0) { + put_empty_request(req); + break; + } + + req->length = len; + if (queue_request_ep_in(req) < 0) + break; + } +} + +/* Complete handler for UMP EP-in requests */ +static void f_midi2_ep_in_complete(struct usb_ep *usb_ep, + struct usb_request *req) +{ + struct f_midi2_req_ctx *ctx = req->context; + struct f_midi2_ep *ep = ctx->usb_ep->ep; + struct f_midi2 *midi2 = ep->card; + int status = req->status; + + put_empty_request(req); + + if (status) { + DBG(midi2, "%s complete error %d: %d/%d\n", + usb_ep->name, status, req->actual, req->length); + return; + } + + process_ump_transmit(ep); +} + +/* + * MIDI1 (altset 0) USB request handling + */ + +/* process one MIDI byte -- copied from f_midi.c + * + * fill the packet or request if needed + * returns true if the request became empty (queued) + */ +static bool process_midi1_byte(struct f_midi2 *midi2, u8 cable, u8 b, + struct usb_request **req_p) +{ + struct f_midi2_midi1_port *port = &midi2->midi1_port[cable]; + u8 p[4] = { cable << 4, 0, 0, 0 }; + int next_state = STATE_INITIAL; + struct usb_request *req = *req_p; + + switch (b) { + case 0xf8 ... 0xff: + /* System Real-Time Messages */ + p[0] |= 0x0f; + p[1] = b; + next_state = port->state; + port->state = STATE_REAL_TIME; + break; + + case 0xf7: + /* End of SysEx */ + switch (port->state) { + case STATE_SYSEX_0: + p[0] |= 0x05; + p[1] = 0xf7; + next_state = STATE_FINISHED; + break; + case STATE_SYSEX_1: + p[0] |= 0x06; + p[1] = port->data[0]; + p[2] = 0xf7; + next_state = STATE_FINISHED; + break; + case STATE_SYSEX_2: + p[0] |= 0x07; + p[1] = port->data[0]; + p[2] = port->data[1]; + p[3] = 0xf7; + next_state = STATE_FINISHED; + break; + default: + /* Ignore byte */ + next_state = port->state; + port->state = STATE_INITIAL; + } + break; + + case 0xf0 ... 0xf6: + /* System Common Messages */ + port->data[0] = port->data[1] = 0; + port->state = STATE_INITIAL; + switch (b) { + case 0xf0: + port->data[0] = b; + port->data[1] = 0; + next_state = STATE_SYSEX_1; + break; + case 0xf1: + case 0xf3: + port->data[0] = b; + next_state = STATE_1PARAM; + break; + case 0xf2: + port->data[0] = b; + next_state = STATE_2PARAM_1; + break; + case 0xf4: + case 0xf5: + next_state = STATE_INITIAL; + break; + case 0xf6: + p[0] |= 0x05; + p[1] = 0xf6; + next_state = STATE_FINISHED; + break; + } + break; + + case 0x80 ... 0xef: + /* + * Channel Voice Messages, Channel Mode Messages + * and Control Change Messages. + */ + port->data[0] = b; + port->data[1] = 0; + port->state = STATE_INITIAL; + if (b >= 0xc0 && b <= 0xdf) + next_state = STATE_1PARAM; + else + next_state = STATE_2PARAM_1; + break; + + case 0x00 ... 0x7f: + /* Message parameters */ + switch (port->state) { + case STATE_1PARAM: + if (port->data[0] < 0xf0) + p[0] |= port->data[0] >> 4; + else + p[0] |= 0x02; + + p[1] = port->data[0]; + p[2] = b; + /* This is to allow Running State Messages */ + next_state = STATE_1PARAM; + break; + case STATE_2PARAM_1: + port->data[1] = b; + next_state = STATE_2PARAM_2; + break; + case STATE_2PARAM_2: + if (port->data[0] < 0xf0) + p[0] |= port->data[0] >> 4; + else + p[0] |= 0x03; + + p[1] = port->data[0]; + p[2] = port->data[1]; + p[3] = b; + /* This is to allow Running State Messages */ + next_state = STATE_2PARAM_1; + break; + case STATE_SYSEX_0: + port->data[0] = b; + next_state = STATE_SYSEX_1; + break; + case STATE_SYSEX_1: + port->data[1] = b; + next_state = STATE_SYSEX_2; + break; + case STATE_SYSEX_2: + p[0] |= 0x04; + p[1] = port->data[0]; + p[2] = port->data[1]; + p[3] = b; + next_state = STATE_SYSEX_0; + break; + } + break; + } + + /* States where we have to write into the USB request */ + if (next_state == STATE_FINISHED || + port->state == STATE_SYSEX_2 || + port->state == STATE_1PARAM || + port->state == STATE_2PARAM_2 || + port->state == STATE_REAL_TIME) { + memcpy(req->buf + req->length, p, sizeof(p)); + req->length += sizeof(p); + + if (next_state == STATE_FINISHED) { + next_state = STATE_INITIAL; + port->data[0] = port->data[1] = 0; + } + + if (midi2->info.req_buf_size - req->length <= 4) { + queue_request_ep_raw(req); + *req_p = NULL; + return true; + } + } + + port->state = next_state; + return false; +} + +/* process all pending MIDI bytes in the internal buffer; + * returns true if the request gets empty + * returns false if all have been processed + */ +static bool process_midi1_pending_buf(struct f_midi2 *midi2, + struct usb_request **req_p) +{ + unsigned int cable, c; + + for (cable = 0; cable < midi2->num_midi1_in; cable++) { + struct f_midi2_midi1_port *port = &midi2->midi1_port[cable]; + + if (!port->pending) + continue; + for (c = 0; c < port->pending; c++) { + if (process_midi1_byte(midi2, cable, port->buf[c], + req_p)) { + port->pending -= c; + if (port->pending) + memmove(port->buf, port->buf + c, + port->pending); + return true; + } + } + port->pending = 0; + } + + return false; +} + +/* fill the MIDI bytes onto the temporary buffer + */ +static void fill_midi1_pending_buf(struct f_midi2 *midi2, u8 cable, u8 *buf, + unsigned int size) +{ + struct f_midi2_midi1_port *port = &midi2->midi1_port[cable]; + + if (port->pending + size > sizeof(port->buf)) + return; + memcpy(port->buf + port->pending, buf, size); + port->pending += size; +} + +/* try to process data given from the associated UMP stream */ +static void process_midi1_transmit(struct f_midi2 *midi2) +{ + struct f_midi2_usb_ep *usb_ep = &midi2->midi1_ep_in; + struct f_midi2_ep *ep = &midi2->midi2_eps[0]; + struct usb_request *req = NULL; + /* 12 is the largest outcome (4 MIDI1 cmds) for a single UMP packet */ + unsigned char outbuf[12]; + unsigned char group, cable; + int len, size; + u32 ump; + + if (!usb_ep->usb_ep || !usb_ep->usb_ep->enabled) + return; + + for (;;) { + if (!req) { + req = get_empty_request(usb_ep); + if (!req) + break; + } + + if (process_midi1_pending_buf(midi2, &req)) + continue; + + len = snd_ump_transmit(ep->ump, &ump, 4); + if (len <= 0) + break; + if (snd_ump_receive_ump_val(ep->ump, ump) <= 0) + continue; + size = snd_ump_convert_from_ump(ep->ump->input_buf, outbuf, + &group); + if (size <= 0) + continue; + cable = ep->in_group_to_cable[group]; + if (!cable) + continue; + cable--; /* to 0-base */ + fill_midi1_pending_buf(midi2, cable, outbuf, size); + } + + if (req) { + if (req->length) + queue_request_ep_raw(req); + else + put_empty_request(req); + } +} + +/* complete handler for MIDI1 EP-in requests */ +static void f_midi2_midi1_ep_in_complete(struct usb_ep *usb_ep, + struct usb_request *req) +{ + struct f_midi2_req_ctx *ctx = req->context; + struct f_midi2 *midi2 = ctx->usb_ep->card; + int status = req->status; + + put_empty_request(req); + + if (status) { + DBG(midi2, "%s complete error %d: %d/%d\n", + usb_ep->name, status, req->actual, req->length); + return; + } + + process_midi1_transmit(midi2); +} + +/* complete handler for MIDI1 EP-out requests */ +static void f_midi2_midi1_ep_out_complete(struct usb_ep *usb_ep, + struct usb_request *req) +{ + struct f_midi2_req_ctx *ctx = req->context; + struct f_midi2 *midi2 = ctx->usb_ep->card; + struct f_midi2_ep *ep; + struct ump_cvt_to_ump *cvt = &midi2->midi1_ump_cvt; + static const u8 midi1_packet_bytes[16] = { + 0, 0, 2, 3, 3, 1, 2, 3, 3, 3, 3, 3, 2, 2, 3, 1 + }; + unsigned int group, cable, bytes, c, len; + int status = req->status; + const u8 *buf = req->buf; + + if (status) { + DBG(midi2, "%s complete error %d: %d/%d\n", + usb_ep->name, status, req->actual, req->length); + goto error; + } + + len = req->actual >> 2; + for (; len; len--, buf += 4) { + cable = *buf >> 4; + ep = midi2->out_cable_mapping[cable].ep; + if (!ep) + continue; + group = midi2->out_cable_mapping[cable].group; + bytes = midi1_packet_bytes[*buf & 0x0f]; + for (c = 0; c < bytes; c++) { + snd_ump_convert_to_ump(cvt, group, ep->info.protocol, + buf[c + 1]); + if (cvt->ump_bytes) { + snd_ump_receive(ep->ump, cvt->ump, + cvt->ump_bytes); + cvt->ump_bytes = 0; + } + } + } + + if (midi2->operation_mode != MIDI_OP_MODE_MIDI1) + goto error; + + if (queue_request_ep_raw(req)) + goto error; + return; + + error: + put_empty_request(req); +} + +/* + * Common EP handling helpers + */ + +/* Start MIDI EP */ +static int f_midi2_start_ep(struct f_midi2_usb_ep *usb_ep, + struct usb_function *fn) +{ + int err; + + if (!usb_ep->usb_ep) + return 0; + + usb_ep_disable(usb_ep->usb_ep); + err = config_ep_by_speed(usb_ep->card->gadget, fn, usb_ep->usb_ep); + if (err) + return err; + return usb_ep_enable(usb_ep->usb_ep); +} + +/* Drop pending requests */ +static void f_midi2_drop_reqs(struct f_midi2_usb_ep *usb_ep) +{ + int i; + + if (!usb_ep->usb_ep || !usb_ep->num_reqs) + return; + + for (i = 0; i < usb_ep->num_reqs; i++) { + if (!test_bit(i, &usb_ep->free_reqs) && usb_ep->reqs[i].req) { + usb_ep_dequeue(usb_ep->usb_ep, usb_ep->reqs[i].req); + set_bit(i, &usb_ep->free_reqs); + } + } +} + +/* Allocate requests for the given EP */ +static int f_midi2_alloc_ep_reqs(struct f_midi2_usb_ep *usb_ep) +{ + struct f_midi2 *midi2 = usb_ep->card; + int i; + + if (!usb_ep->usb_ep) + return 0; + if (!usb_ep->reqs) + return -EINVAL; + + for (i = 0; i < midi2->info.num_reqs; i++) { + if (usb_ep->reqs[i].req) + continue; + usb_ep->reqs[i].req = alloc_ep_req(usb_ep->usb_ep, + midi2->info.req_buf_size); + if (!usb_ep->reqs[i].req) + return -ENOMEM; + usb_ep->reqs[i].req->context = &usb_ep->reqs[i]; + } + return 0; +} + +/* Free allocated requests */ +static void f_midi2_free_ep_reqs(struct f_midi2_usb_ep *usb_ep) +{ + struct f_midi2 *midi2 = usb_ep->card; + int i; + + for (i = 0; i < midi2->info.num_reqs; i++) { + if (!usb_ep->reqs[i].req) + continue; + free_ep_req(usb_ep->usb_ep, usb_ep->reqs[i].req); + usb_ep->reqs[i].req = NULL; + } +} + +/* Initialize EP */ +static int f_midi2_init_ep(struct f_midi2 *midi2, struct f_midi2_ep *ep, + struct f_midi2_usb_ep *usb_ep, + void *desc, + void (*complete)(struct usb_ep *usb_ep, + struct usb_request *req)) +{ + int i; + + usb_ep->card = midi2; + usb_ep->ep = ep; + usb_ep->usb_ep = usb_ep_autoconfig(midi2->gadget, desc); + if (!usb_ep->usb_ep) + return -ENODEV; + usb_ep->complete = complete; + + usb_ep->reqs = kcalloc(midi2->info.num_reqs, sizeof(*usb_ep->reqs), + GFP_KERNEL); + if (!usb_ep->reqs) + return -ENOMEM; + for (i = 0; i < midi2->info.num_reqs; i++) { + usb_ep->reqs[i].index = i; + usb_ep->reqs[i].usb_ep = usb_ep; + set_bit(i, &usb_ep->free_reqs); + usb_ep->num_reqs++; + } + + return 0; +} + +/* Free EP */ +static void f_midi2_free_ep(struct f_midi2_usb_ep *usb_ep) +{ + f_midi2_drop_reqs(usb_ep); + + f_midi2_free_ep_reqs(usb_ep); + + kfree(usb_ep->reqs); + usb_ep->num_reqs = 0; + usb_ep->free_reqs = 0; + usb_ep->reqs = NULL; +} + +/* Queue requests for EP-out at start */ +static void f_midi2_queue_out_reqs(struct f_midi2_usb_ep *usb_ep) +{ + int i, err; + + if (!usb_ep->usb_ep) + return; + + for (i = 0; i < usb_ep->num_reqs; i++) { + if (!test_bit(i, &usb_ep->free_reqs) || !usb_ep->reqs[i].req) + continue; + usb_ep->reqs[i].req->complete = usb_ep->complete; + err = usb_ep_queue(usb_ep->usb_ep, usb_ep->reqs[i].req, + GFP_ATOMIC); + if (!err) + clear_bit(i, &usb_ep->free_reqs); + } +} + +/* + * Gadget Function callbacks + */ + +/* stop both IN and OUT EPs */ +static void f_midi2_stop_eps(struct f_midi2_usb_ep *ep_in, + struct f_midi2_usb_ep *ep_out) +{ + f_midi2_drop_reqs(ep_in); + f_midi2_drop_reqs(ep_out); + f_midi2_free_ep_reqs(ep_in); + f_midi2_free_ep_reqs(ep_out); +} + +/* start/queue both IN and OUT EPs */ +static int f_midi2_start_eps(struct f_midi2_usb_ep *ep_in, + struct f_midi2_usb_ep *ep_out, + struct usb_function *fn) +{ + int err; + + err = f_midi2_start_ep(ep_in, fn); + if (err) + return err; + err = f_midi2_start_ep(ep_out, fn); + if (err) + return err; + + err = f_midi2_alloc_ep_reqs(ep_in); + if (err) + return err; + err = f_midi2_alloc_ep_reqs(ep_out); + if (err) + return err; + + f_midi2_queue_out_reqs(ep_out); + return 0; +} + +/* gadget function set_alt callback */ +static int f_midi2_set_alt(struct usb_function *fn, unsigned int intf, + unsigned int alt) +{ + struct f_midi2 *midi2 = func_to_midi2(fn); + struct f_midi2_ep *ep; + int i, op_mode, err; + + if (intf != midi2->midi_if || alt > 1) + return 0; + + if (alt == 0) + op_mode = MIDI_OP_MODE_MIDI1; + else if (alt == 1) + op_mode = MIDI_OP_MODE_MIDI2; + else + op_mode = MIDI_OP_MODE_UNSET; + + if (midi2->operation_mode == op_mode) + return 0; + + midi2->operation_mode = op_mode; + + if (op_mode != MIDI_OP_MODE_MIDI1) + f_midi2_stop_eps(&midi2->midi1_ep_in, &midi2->midi1_ep_out); + + if (op_mode != MIDI_OP_MODE_MIDI2) { + for (i = 0; i < midi2->num_eps; i++) { + ep = &midi2->midi2_eps[i]; + f_midi2_stop_eps(&ep->ep_in, &ep->ep_out); + } + } + + if (op_mode == MIDI_OP_MODE_MIDI1) + return f_midi2_start_eps(&midi2->midi1_ep_in, + &midi2->midi1_ep_out, fn); + + if (op_mode == MIDI_OP_MODE_MIDI2) { + for (i = 0; i < midi2->num_eps; i++) { + ep = &midi2->midi2_eps[i]; + + err = f_midi2_start_eps(&ep->ep_in, &ep->ep_out, fn); + if (err) + return err; + } + } + + return 0; +} + +/* gadget function get_alt callback */ +static int f_midi2_get_alt(struct usb_function *fn, unsigned int intf) +{ + struct f_midi2 *midi2 = func_to_midi2(fn); + + if (intf == midi2->midi_if && + midi2->operation_mode == MIDI_OP_MODE_MIDI2) + return 1; + return 0; +} + +/* convert UMP direction to USB MIDI 2.0 direction */ +static unsigned int ump_to_usb_dir(unsigned int ump_dir) +{ + switch (ump_dir) { + case SNDRV_UMP_DIR_INPUT: + return USB_MS_GR_TRM_BLOCK_TYPE_INPUT_ONLY; + case SNDRV_UMP_DIR_OUTPUT: + return USB_MS_GR_TRM_BLOCK_TYPE_OUTPUT_ONLY; + default: + return USB_MS_GR_TRM_BLOCK_TYPE_BIDIRECTIONAL; + } +} + +/* assign GTB descriptors (for the given request) */ +static void assign_block_descriptors(struct f_midi2 *midi2, + struct usb_request *req, + int max_len) +{ + struct usb_ms20_gr_trm_block_header_descriptor header; + struct usb_ms20_gr_trm_block_descriptor *desc; + struct f_midi2_block_info *b; + struct f_midi2_ep *ep; + int i, blk, len; + char *data; + + len = sizeof(gtb_header_desc) + sizeof(gtb_desc) * midi2->total_blocks; + if (WARN_ON(len > midi2->info.req_buf_size)) + return; + + header = gtb_header_desc; + header.wTotalLength = cpu_to_le16(len); + if (max_len < len) { + len = min_t(int, len, sizeof(header)); + memcpy(req->buf, &header, len); + req->length = len; + req->zero = len < max_len; + return; + } + + memcpy(req->buf, &header, sizeof(header)); + data = req->buf + sizeof(header); + for (i = 0; i < midi2->num_eps; i++) { + ep = &midi2->midi2_eps[i]; + for (blk = 0; blk < ep->num_blks; blk++) { + b = &ep->blks[blk].info; + desc = (struct usb_ms20_gr_trm_block_descriptor *)data; + + *desc = gtb_desc; + desc->bGrpTrmBlkID = ep->blks[blk].gtb_id; + desc->bGrpTrmBlkType = ump_to_usb_dir(b->direction); + desc->nGroupTrm = b->first_group; + desc->nNumGroupTrm = b->num_groups; + desc->iBlockItem = ep->blks[blk].string_id; + + if (ep->info.protocol & SNDRV_UMP_EP_INFO_PROTO_MIDI2) + desc->bMIDIProtocol = USB_MS_MIDI_PROTO_2_0; + else + desc->bMIDIProtocol = USB_MS_MIDI_PROTO_1_0_128; + + if (b->is_midi1 == 2) { + desc->wMaxInputBandwidth = cpu_to_le16(1); + desc->wMaxOutputBandwidth = cpu_to_le16(1); + } + + data += sizeof(*desc); + } + } + + req->length = len; + req->zero = len < max_len; +} + +/* gadget function setup callback: handle GTB requests */ +static int f_midi2_setup(struct usb_function *fn, + const struct usb_ctrlrequest *ctrl) +{ + struct f_midi2 *midi2 = func_to_midi2(fn); + struct usb_composite_dev *cdev = fn->config->cdev; + struct usb_request *req = cdev->req; + u16 value, length; + + if ((ctrl->bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD || + ctrl->bRequest != USB_REQ_GET_DESCRIPTOR) + return -EOPNOTSUPP; + + value = le16_to_cpu(ctrl->wValue); + length = le16_to_cpu(ctrl->wLength); + + if ((value >> 8) != USB_DT_CS_GR_TRM_BLOCK) + return -EOPNOTSUPP; + + /* handle only altset 1 */ + if ((value & 0xff) != 1) + return -EOPNOTSUPP; + + assign_block_descriptors(midi2, req, length); + return usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); +} + +/* gadget function disable callback */ +static void f_midi2_disable(struct usb_function *fn) +{ + struct f_midi2 *midi2 = func_to_midi2(fn); + + midi2->operation_mode = MIDI_OP_MODE_UNSET; +} + +/* + * ALSA UMP ops: most of them are NOPs, only trigger for write is needed + */ +static int f_midi2_ump_open(struct snd_ump_endpoint *ump, int dir) +{ + return 0; +} + +static void f_midi2_ump_close(struct snd_ump_endpoint *ump, int dir) +{ +} + +static void f_midi2_ump_trigger(struct snd_ump_endpoint *ump, int dir, int up) +{ + struct f_midi2_ep *ep = ump->private_data; + struct f_midi2 *midi2 = ep->card; + + if (up && dir == SNDRV_RAWMIDI_STREAM_OUTPUT) { + switch (midi2->operation_mode) { + case MIDI_OP_MODE_MIDI1: + process_midi1_transmit(midi2); + break; + case MIDI_OP_MODE_MIDI2: + process_ump_transmit(ep); + break; + } + } +} + +static void f_midi2_ump_drain(struct snd_ump_endpoint *ump, int dir) +{ +} + +static const struct snd_ump_ops f_midi2_ump_ops = { + .open = f_midi2_ump_open, + .close = f_midi2_ump_close, + .trigger = f_midi2_ump_trigger, + .drain = f_midi2_ump_drain, +}; + +/* + * "Operation Mode" control element + */ +static int f_midi2_operation_mode_info(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_info *uinfo) +{ + uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; + uinfo->count = 1; + uinfo->value.integer.min = MIDI_OP_MODE_UNSET; + uinfo->value.integer.max = MIDI_OP_MODE_MIDI2; + return 0; +} + +static int f_midi2_operation_mode_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct f_midi2 *midi2 = snd_kcontrol_chip(kcontrol); + + ucontrol->value.integer.value[0] = midi2->operation_mode; + return 0; +} + +static const struct snd_kcontrol_new operation_mode_ctl = { + .iface = SNDRV_CTL_ELEM_IFACE_RAWMIDI, + .name = "Operation Mode", + .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, + .info = f_midi2_operation_mode_info, + .get = f_midi2_operation_mode_get, +}; + +/* + * ALSA UMP instance creation / deletion + */ +static void f_midi2_free_card(struct f_midi2 *midi2) +{ + if (midi2->card) { + snd_card_free_when_closed(midi2->card); + midi2->card = NULL; + } +} + +/* use a reverse direction for the gadget host */ +static int reverse_dir(int dir) +{ + if (!dir || dir == SNDRV_UMP_DIR_BIDIRECTION) + return dir; + return (dir == SNDRV_UMP_DIR_OUTPUT) ? + SNDRV_UMP_DIR_INPUT : SNDRV_UMP_DIR_OUTPUT; +} + +static int f_midi2_create_card(struct f_midi2 *midi2) +{ + struct snd_card *card; + struct snd_ump_endpoint *ump; + struct f_midi2_ep *ep; + int i, id, blk, err; + __be32 sw; + + err = snd_card_new(&midi2->gadget->dev, -1, NULL, THIS_MODULE, 0, + &card); + if (err < 0) + return err; + midi2->card = card; + + strcpy(card->driver, "f_midi2"); + strcpy(card->shortname, "MIDI 2.0 Gadget"); + strcpy(card->longname, "MIDI 2.0 Gadget"); + + id = 0; + for (i = 0; i < midi2->num_eps; i++) { + ep = &midi2->midi2_eps[i]; + err = snd_ump_endpoint_new(card, "MIDI 2.0 Gadget", id, + 1, 1, &ump); + if (err < 0) + goto error; + id++; + + ep->ump = ump; + ump->no_process_stream = true; + ump->private_data = ep; + ump->ops = &f_midi2_ump_ops; + if (midi2->info.static_block) + ump->info.flags |= SNDRV_UMP_EP_INFO_STATIC_BLOCKS; + ump->info.protocol_caps = (ep->info.protocol_caps & 3) << 8; + ump->info.protocol = (ep->info.protocol & 3) << 8; + ump->info.version = 0x0101; + ump->info.family_id = ep->info.family; + ump->info.model_id = ep->info.model; + ump->info.manufacturer_id = ep->info.manufacturer & 0xffffff; + sw = cpu_to_be32(ep->info.sw_revision); + memcpy(ump->info.sw_revision, &sw, 4); + + strscpy(ump->info.name, ump_ep_name(ep), + sizeof(ump->info.name)); + strscpy(ump->info.product_id, ump_product_id(ep), + sizeof(ump->info.product_id)); + strscpy(ump->core.name, ump->info.name, sizeof(ump->core.name)); + + for (blk = 0; blk < ep->num_blks; blk++) { + const struct f_midi2_block_info *b = &ep->blks[blk].info; + struct snd_ump_block *fb; + + err = snd_ump_block_new(ump, blk, + reverse_dir(b->direction), + b->first_group, b->num_groups, + &ep->blks[blk].fb); + if (err < 0) + goto error; + fb = ep->blks[blk].fb; + fb->info.active = !!b->active; + fb->info.midi_ci_version = b->midi_ci_version; + fb->info.ui_hint = reverse_dir(b->ui_hint); + fb->info.sysex8_streams = b->sysex8_streams; + fb->info.flags |= b->is_midi1; + strscpy(fb->info.name, ump_fb_name(b), + sizeof(fb->info.name)); + } + } + + for (i = 0; i < midi2->num_eps; i++) { + err = snd_ump_attach_legacy_rawmidi(midi2->midi2_eps[i].ump, + "Legacy MIDI", id); + if (err < 0) + goto error; + id++; + } + + err = snd_ctl_add(card, snd_ctl_new1(&operation_mode_ctl, midi2)); + if (err < 0) + goto error; + + err = snd_card_register(card); + if (err < 0) + goto error; + + return 0; + + error: + f_midi2_free_card(midi2); + return err; +} + +/* + * Creation of USB descriptors + */ +struct f_midi2_usb_config { + struct usb_descriptor_header **list; + unsigned int size; + unsigned int alloc; + + /* MIDI 1.0 jacks */ + unsigned char jack_in, jack_out, jack_id; + struct usb_midi_in_jack_descriptor jack_ins[MAX_CABLES]; + struct usb_midi_out_jack_descriptor_1 jack_outs[MAX_CABLES]; +}; + +static int append_config(struct f_midi2_usb_config *config, void *d) +{ + unsigned int size; + void *buf; + + if (config->size + 2 >= config->alloc) { + size = config->size + 16; + buf = krealloc(config->list, size * sizeof(void *), GFP_KERNEL); + if (!buf) + return -ENOMEM; + config->list = buf; + config->alloc = size; + } + + config->list[config->size] = d; + config->size++; + config->list[config->size] = NULL; + return 0; +} + +static int append_configs(struct f_midi2_usb_config *config, void **d) +{ + int err; + + for (; *d; d++) { + err = append_config(config, *d); + if (err) + return err; + } + return 0; +} + +static int append_midi1_in_jack(struct f_midi2 *midi2, + struct f_midi2_usb_config *config, + struct midi1_cable_mapping *map, + unsigned int type) +{ + struct usb_midi_in_jack_descriptor *jack = + &config->jack_ins[config->jack_in++]; + int id = ++config->jack_id; + int err; + + jack->bLength = 0x06; + jack->bDescriptorType = USB_DT_CS_INTERFACE; + jack->bDescriptorSubtype = USB_MS_MIDI_IN_JACK; + jack->bJackType = type; + jack->bJackID = id; + /* use the corresponding block name as jack name */ + if (map->ep) + jack->iJack = map->ep->blks[map->block].string_id; + + err = append_config(config, jack); + if (err < 0) + return err; + return id; +} + +static int append_midi1_out_jack(struct f_midi2 *midi2, + struct f_midi2_usb_config *config, + struct midi1_cable_mapping *map, + unsigned int type, unsigned int source) +{ + struct usb_midi_out_jack_descriptor_1 *jack = + &config->jack_outs[config->jack_out++]; + int id = ++config->jack_id; + int err; + + jack->bLength = 0x09; + jack->bDescriptorType = USB_DT_CS_INTERFACE; + jack->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK; + jack->bJackType = type; + jack->bJackID = id; + jack->bNrInputPins = 1; + jack->pins[0].baSourceID = source; + jack->pins[0].baSourcePin = 0x01; + /* use the corresponding block name as jack name */ + if (map->ep) + jack->iJack = map->ep->blks[map->block].string_id; + + err = append_config(config, jack); + if (err < 0) + return err; + return id; +} + +static int f_midi2_create_usb_configs(struct f_midi2 *midi2, + struct f_midi2_usb_config *config, + int speed) +{ + void **midi1_in_eps, **midi1_out_eps; + int i, jack, total; + int err; + + switch (speed) { + default: + case USB_SPEED_HIGH: + midi2_midi1_ep_out_desc.wMaxPacketSize = cpu_to_le16(512); + midi2_midi1_ep_in_desc.wMaxPacketSize = cpu_to_le16(512); + for (i = 0; i < midi2->num_eps; i++) + midi2_midi2_ep_out_desc[i].wMaxPacketSize = + cpu_to_le16(512); + fallthrough; + case USB_SPEED_FULL: + midi1_in_eps = midi2_midi1_ep_in_descs; + midi1_out_eps = midi2_midi1_ep_out_descs; + break; + case USB_SPEED_SUPER: + case USB_SPEED_SUPER_PLUS: + midi2_midi1_ep_out_desc.wMaxPacketSize = cpu_to_le16(1024); + midi2_midi1_ep_in_desc.wMaxPacketSize = cpu_to_le16(1024); + for (i = 0; i < midi2->num_eps; i++) + midi2_midi2_ep_out_desc[i].wMaxPacketSize = + cpu_to_le16(1024); + midi1_in_eps = midi2_midi1_ep_in_ss_descs; + midi1_out_eps = midi2_midi1_ep_out_ss_descs; + break; + } + + err = append_configs(config, midi2_audio_descs); + if (err < 0) + return err; + + if (midi2->num_midi1_in && midi2->num_midi1_out) + midi2_midi1_if_desc.bNumEndpoints = 2; + else + midi2_midi1_if_desc.bNumEndpoints = 1; + + err = append_configs(config, midi2_midi1_descs); + if (err < 0) + return err; + + total = USB_DT_MS_HEADER_SIZE; + if (midi2->num_midi1_out) { + midi2_midi1_ep_out_class_desc.bLength = + USB_DT_MS_ENDPOINT_SIZE(midi2->num_midi1_out); + total += midi2_midi1_ep_out_class_desc.bLength; + midi2_midi1_ep_out_class_desc.bNumEmbMIDIJack = + midi2->num_midi1_out; + total += midi2->num_midi1_out * + (USB_DT_MIDI_IN_SIZE + USB_DT_MIDI_OUT_SIZE(1)); + for (i = 0; i < midi2->num_midi1_out; i++) { + jack = append_midi1_in_jack(midi2, config, + &midi2->in_cable_mapping[i], + USB_MS_EMBEDDED); + if (jack < 0) + return jack; + midi2_midi1_ep_out_class_desc.baAssocJackID[i] = jack; + jack = append_midi1_out_jack(midi2, config, + &midi2->in_cable_mapping[i], + USB_MS_EXTERNAL, jack); + if (jack < 0) + return jack; + } + } + + if (midi2->num_midi1_in) { + midi2_midi1_ep_in_class_desc.bLength = + USB_DT_MS_ENDPOINT_SIZE(midi2->num_midi1_in); + total += midi2_midi1_ep_in_class_desc.bLength; + midi2_midi1_ep_in_class_desc.bNumEmbMIDIJack = + midi2->num_midi1_in; + total += midi2->num_midi1_in * + (USB_DT_MIDI_IN_SIZE + USB_DT_MIDI_OUT_SIZE(1)); + for (i = 0; i < midi2->num_midi1_in; i++) { + jack = append_midi1_in_jack(midi2, config, + &midi2->out_cable_mapping[i], + USB_MS_EXTERNAL); + if (jack < 0) + return jack; + jack = append_midi1_out_jack(midi2, config, + &midi2->out_cable_mapping[i], + USB_MS_EMBEDDED, jack); + if (jack < 0) + return jack; + midi2_midi1_ep_in_class_desc.baAssocJackID[i] = jack; + } + } + + midi2_midi1_class_desc.wTotalLength = cpu_to_le16(total); + + if (midi2->num_midi1_out) { + err = append_configs(config, midi1_out_eps); + if (err < 0) + return err; + } + if (midi2->num_midi1_in) { + err = append_configs(config, midi1_in_eps); + if (err < 0) + return err; + } + + err = append_configs(config, midi2_midi2_descs); + if (err < 0) + return err; + + for (i = 0; i < midi2->num_eps; i++) { + err = append_config(config, &midi2_midi2_ep_out_desc[i]); + if (err < 0) + return err; + if (speed == USB_SPEED_SUPER || speed == USB_SPEED_SUPER_PLUS) { + err = append_config(config, &midi2_midi2_ep_out_ss_comp_desc); + if (err < 0) + return err; + } + err = append_config(config, &midi2_midi2_ep_out_class_desc[i]); + if (err < 0) + return err; + err = append_config(config, &midi2_midi2_ep_in_desc[i]); + if (err < 0) + return err; + if (speed == USB_SPEED_SUPER || speed == USB_SPEED_SUPER_PLUS) { + err = append_config(config, &midi2_midi2_ep_in_ss_comp_desc); + if (err < 0) + return err; + } + err = append_config(config, &midi2_midi2_ep_in_class_desc[i]); + if (err < 0) + return err; + } + + return 0; +} + +static void f_midi2_free_usb_configs(struct f_midi2_usb_config *config) +{ + kfree(config->list); + memset(config, 0, sizeof(*config)); +} + +/* as we use the static descriptors for simplicity, serialize bind call */ +static DEFINE_MUTEX(f_midi2_desc_mutex); + +/* fill MIDI2 EP class-specific descriptor */ +static void fill_midi2_class_desc(struct f_midi2_ep *ep, + struct usb_ms20_endpoint_descriptor_32 *cdesc) +{ + int blk; + + cdesc->bLength = USB_DT_MS20_ENDPOINT_SIZE(ep->num_blks); + cdesc->bDescriptorType = USB_DT_CS_ENDPOINT; + cdesc->bDescriptorSubtype = USB_MS_GENERAL_2_0; + cdesc->bNumGrpTrmBlock = ep->num_blks; + for (blk = 0; blk < ep->num_blks; blk++) + cdesc->baAssoGrpTrmBlkID[blk] = ep->blks[blk].gtb_id; +} + +/* initialize MIDI2 EP-in */ +static int f_midi2_init_midi2_ep_in(struct f_midi2 *midi2, int index) +{ + struct f_midi2_ep *ep = &midi2->midi2_eps[index]; + struct usb_endpoint_descriptor *desc = &midi2_midi2_ep_in_desc[index]; + + desc->bLength = USB_DT_ENDPOINT_SIZE; + desc->bDescriptorType = USB_DT_ENDPOINT; + desc->bEndpointAddress = USB_DIR_IN; + desc->bmAttributes = USB_ENDPOINT_XFER_INT; + desc->wMaxPacketSize = cpu_to_le16(EP_MAX_PACKET_INT); + desc->bInterval = 1; + + fill_midi2_class_desc(ep, &midi2_midi2_ep_in_class_desc[index]); + + return f_midi2_init_ep(midi2, ep, &ep->ep_in, desc, + f_midi2_ep_in_complete); +} + +/* initialize MIDI2 EP-out */ +static int f_midi2_init_midi2_ep_out(struct f_midi2 *midi2, int index) +{ + struct f_midi2_ep *ep = &midi2->midi2_eps[index]; + struct usb_endpoint_descriptor *desc = &midi2_midi2_ep_out_desc[index]; + + desc->bLength = USB_DT_ENDPOINT_SIZE; + desc->bDescriptorType = USB_DT_ENDPOINT; + desc->bEndpointAddress = USB_DIR_OUT; + desc->bmAttributes = USB_ENDPOINT_XFER_BULK; + + fill_midi2_class_desc(ep, &midi2_midi2_ep_out_class_desc[index]); + + return f_midi2_init_ep(midi2, ep, &ep->ep_out, desc, + f_midi2_ep_out_complete); +} + +/* gadget function bind callback */ +static int f_midi2_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct f_midi2 *midi2 = func_to_midi2(f); + struct f_midi2_ep *ep; + struct f_midi2_usb_config config = {}; + struct usb_gadget_strings string_fn = { + .language = 0x0409, /* en-us */ + .strings = midi2->string_defs, + }; + struct usb_gadget_strings *strings[] = { + &string_fn, + NULL, + }; + int i, blk, status; + + midi2->gadget = cdev->gadget; + midi2->operation_mode = MIDI_OP_MODE_UNSET; + + status = f_midi2_create_card(midi2); + if (status < 0) + goto fail_register; + + /* maybe allocate device-global string ID */ + midi2->strings = usb_gstrings_attach(c->cdev, strings, + midi2->total_blocks + 1); + if (IS_ERR(midi2->strings)) { + status = PTR_ERR(midi2->strings); + goto fail_string; + } + + mutex_lock(&f_midi2_desc_mutex); + midi2_midi1_if_desc.iInterface = midi2->strings[STR_IFACE].id; + midi2_midi2_if_desc.iInterface = midi2->strings[STR_IFACE].id; + for (i = 0; i < midi2->num_eps; i++) { + ep = &midi2->midi2_eps[i]; + for (blk = 0; blk < ep->num_blks; blk++) + ep->blks[blk].string_id = + midi2->strings[gtb_to_str_id(ep->blks[blk].gtb_id)].id; + } + + midi2_midi2_if_desc.bNumEndpoints = midi2->num_eps * 2; + + /* audio interface */ + status = usb_interface_id(c, f); + if (status < 0) + goto fail; + midi2_audio_if_desc.bInterfaceNumber = status; + + /* MIDI streaming */ + status = usb_interface_id(c, f); + if (status < 0) + goto fail; + midi2->midi_if = status; + midi2_midi1_if_desc.bInterfaceNumber = status; + midi2_midi2_if_desc.bInterfaceNumber = status; + midi2_audio_class_desc.baInterfaceNr[0] = status; + + /* allocate instance-specific endpoints */ + if (midi2->midi2_eps[0].blks[0].info.direction != SNDRV_UMP_DIR_OUTPUT) { + status = f_midi2_init_ep(midi2, NULL, &midi2->midi1_ep_in, + &midi2_midi1_ep_in_desc, + f_midi2_midi1_ep_in_complete); + if (status) + goto fail; + } + + if (midi2->midi2_eps[0].blks[0].info.direction != SNDRV_UMP_DIR_INPUT) { + status = f_midi2_init_ep(midi2, NULL, &midi2->midi1_ep_out, + &midi2_midi1_ep_out_desc, + f_midi2_midi1_ep_out_complete); + if (status) + goto fail; + } + + for (i = 0; i < midi2->num_eps; i++) { + status = f_midi2_init_midi2_ep_in(midi2, i); + if (status) + goto fail; + status = f_midi2_init_midi2_ep_out(midi2, i); + if (status) + goto fail; + } + + status = f_midi2_create_usb_configs(midi2, &config, USB_SPEED_FULL); + if (status < 0) + goto fail; + f->fs_descriptors = usb_copy_descriptors(config.list); + if (!f->fs_descriptors) { + status = -ENOMEM; + goto fail; + } + f_midi2_free_usb_configs(&config); + + if (gadget_is_dualspeed(midi2->gadget)) { + status = f_midi2_create_usb_configs(midi2, &config, USB_SPEED_HIGH); + if (status < 0) + goto fail; + f->hs_descriptors = usb_copy_descriptors(config.list); + if (!f->hs_descriptors) { + status = -ENOMEM; + goto fail; + } + f_midi2_free_usb_configs(&config); + } + + if (gadget_is_superspeed(midi2->gadget)) { + status = f_midi2_create_usb_configs(midi2, &config, USB_SPEED_SUPER); + if (status < 0) + goto fail; + f->ss_descriptors = usb_copy_descriptors(config.list); + if (!f->ss_descriptors) { + status = -ENOMEM; + goto fail; + } + if (gadget_is_superspeed_plus(midi2->gadget)) { + f->ssp_descriptors = usb_copy_descriptors(config.list); + if (!f->ssp_descriptors) { + status = -ENOMEM; + goto fail; + } + } + f_midi2_free_usb_configs(&config); + } + + mutex_unlock(&f_midi2_desc_mutex); + return 0; + +fail: + f_midi2_free_usb_configs(&config); + mutex_unlock(&f_midi2_desc_mutex); + usb_free_all_descriptors(f); +fail_string: + f_midi2_free_card(midi2); +fail_register: + ERROR(midi2, "%s: can't bind, err %d\n", f->name, status); + return status; +} + +/* gadget function unbind callback */ +static void f_midi2_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct f_midi2 *midi2 = func_to_midi2(f); + int i; + + f_midi2_free_card(midi2); + + f_midi2_free_ep(&midi2->midi1_ep_in); + f_midi2_free_ep(&midi2->midi1_ep_out); + for (i = 0; i < midi2->num_eps; i++) { + f_midi2_free_ep(&midi2->midi2_eps[i].ep_in); + f_midi2_free_ep(&midi2->midi2_eps[i].ep_out); + } + + usb_free_all_descriptors(f); +} + +/* + * ConfigFS interface + */ + +/* type conversion helpers */ +static inline struct f_midi2_opts *to_f_midi2_opts(struct config_item *item) +{ + return container_of(to_config_group(item), struct f_midi2_opts, + func_inst.group); +} + +static inline struct f_midi2_ep_opts * +to_f_midi2_ep_opts(struct config_item *item) +{ + return container_of(to_config_group(item), struct f_midi2_ep_opts, + group); +} + +static inline struct f_midi2_block_opts * +to_f_midi2_block_opts(struct config_item *item) +{ + return container_of(to_config_group(item), struct f_midi2_block_opts, + group); +} + +/* trim the string to be usable for EP and FB name strings */ +static void make_name_string(char *s) +{ + char *p; + + p = strchr(s, '\n'); + if (p) + *p = 0; + + p = s + strlen(s); + for (; p > s && isspace(*p); p--) + *p = 0; +} + +/* configfs helpers: generic show/store for unisnged int */ +static ssize_t f_midi2_opts_uint_show(struct f_midi2_opts *opts, + u32 val, const char *format, char *page) +{ + int result; + + mutex_lock(&opts->lock); + result = sprintf(page, format, val); + mutex_unlock(&opts->lock); + return result; +} + +static ssize_t f_midi2_opts_uint_store(struct f_midi2_opts *opts, + u32 *valp, u32 minval, u32 maxval, + const char *page, size_t len) +{ + int ret; + u32 val; + + mutex_lock(&opts->lock); + if (opts->refcnt) { + ret = -EBUSY; + goto end; + } + + ret = kstrtou32(page, 0, &val); + if (ret) + goto end; + if (val < minval || val > maxval) { + ret = -EINVAL; + goto end; + } + + *valp = val; + ret = len; + +end: + mutex_unlock(&opts->lock); + return ret; +} + +/* generic store for bool */ +static ssize_t f_midi2_opts_bool_store(struct f_midi2_opts *opts, + bool *valp, const char *page, size_t len) +{ + int ret; + bool val; + + mutex_lock(&opts->lock); + if (opts->refcnt) { + ret = -EBUSY; + goto end; + } + + ret = kstrtobool(page, &val); + if (ret) + goto end; + *valp = val; + ret = len; + +end: + mutex_unlock(&opts->lock); + return ret; +} + +/* generic show/store for string */ +static ssize_t f_midi2_opts_str_show(struct f_midi2_opts *opts, + const char *str, char *page) +{ + int result = 0; + + mutex_lock(&opts->lock); + if (str) + result = scnprintf(page, PAGE_SIZE, "%s\n", str); + mutex_unlock(&opts->lock); + return result; +} + +static ssize_t f_midi2_opts_str_store(struct f_midi2_opts *opts, + const char **strp, size_t maxlen, + const char *page, size_t len) +{ + char *c; + int ret; + + mutex_lock(&opts->lock); + if (opts->refcnt) { + ret = -EBUSY; + goto end; + } + + c = kstrndup(page, min(len, maxlen), GFP_KERNEL); + if (!c) { + ret = -ENOMEM; + goto end; + } + + kfree(*strp); + make_name_string(c); + *strp = c; + ret = len; + +end: + mutex_unlock(&opts->lock); + return ret; +} + +/* + * Definitions for UMP Block config + */ + +/* define an uint option for block */ +#define F_MIDI2_BLOCK_OPT(name, format, minval, maxval) \ +static ssize_t f_midi2_block_opts_##name##_show(struct config_item *item,\ + char *page) \ +{ \ + struct f_midi2_block_opts *opts = to_f_midi2_block_opts(item); \ + return f_midi2_opts_uint_show(opts->ep->opts, opts->info.name, \ + format "\n", page); \ +} \ + \ +static ssize_t f_midi2_block_opts_##name##_store(struct config_item *item,\ + const char *page, size_t len) \ +{ \ + struct f_midi2_block_opts *opts = to_f_midi2_block_opts(item); \ + return f_midi2_opts_uint_store(opts->ep->opts, &opts->info.name,\ + minval, maxval, page, len); \ +} \ + \ +CONFIGFS_ATTR(f_midi2_block_opts_, name) + +/* define a boolean option for block */ +#define F_MIDI2_BLOCK_BOOL_OPT(name) \ +static ssize_t f_midi2_block_opts_##name##_show(struct config_item *item,\ + char *page) \ +{ \ + struct f_midi2_block_opts *opts = to_f_midi2_block_opts(item); \ + return f_midi2_opts_uint_show(opts->ep->opts, opts->info.name, \ + "%u\n", page); \ +} \ + \ +static ssize_t f_midi2_block_opts_##name##_store(struct config_item *item,\ + const char *page, size_t len) \ +{ \ + struct f_midi2_block_opts *opts = to_f_midi2_block_opts(item); \ + return f_midi2_opts_bool_store(opts->ep->opts, &opts->info.name,\ + page, len); \ +} \ + \ +CONFIGFS_ATTR(f_midi2_block_opts_, name) + +F_MIDI2_BLOCK_OPT(direction, "0x%x", 1, 3); +F_MIDI2_BLOCK_OPT(first_group, "0x%x", 0, 15); +F_MIDI2_BLOCK_OPT(num_groups, "0x%x", 1, 16); +F_MIDI2_BLOCK_OPT(midi1_first_group, "0x%x", 0, 15); +F_MIDI2_BLOCK_OPT(midi1_num_groups, "0x%x", 0, 16); +F_MIDI2_BLOCK_OPT(ui_hint, "0x%x", 0, 3); +F_MIDI2_BLOCK_OPT(midi_ci_version, "%u", 0, 1); +F_MIDI2_BLOCK_OPT(sysex8_streams, "%u", 0, 255); +F_MIDI2_BLOCK_OPT(is_midi1, "%u", 0, 2); +F_MIDI2_BLOCK_BOOL_OPT(active); + +static ssize_t f_midi2_block_opts_name_show(struct config_item *item, + char *page) +{ + struct f_midi2_block_opts *opts = to_f_midi2_block_opts(item); + + return f_midi2_opts_str_show(opts->ep->opts, opts->info.name, page); +} + +static ssize_t f_midi2_block_opts_name_store(struct config_item *item, + const char *page, size_t len) +{ + struct f_midi2_block_opts *opts = to_f_midi2_block_opts(item); + + return f_midi2_opts_str_store(opts->ep->opts, &opts->info.name, 128, + page, len); +} + +CONFIGFS_ATTR(f_midi2_block_opts_, name); + +static struct configfs_attribute *f_midi2_block_attrs[] = { + &f_midi2_block_opts_attr_direction, + &f_midi2_block_opts_attr_first_group, + &f_midi2_block_opts_attr_num_groups, + &f_midi2_block_opts_attr_midi1_first_group, + &f_midi2_block_opts_attr_midi1_num_groups, + &f_midi2_block_opts_attr_ui_hint, + &f_midi2_block_opts_attr_midi_ci_version, + &f_midi2_block_opts_attr_sysex8_streams, + &f_midi2_block_opts_attr_is_midi1, + &f_midi2_block_opts_attr_active, + &f_midi2_block_opts_attr_name, + NULL, +}; + +static void f_midi2_block_opts_release(struct config_item *item) +{ + struct f_midi2_block_opts *opts = to_f_midi2_block_opts(item); + + kfree(opts->info.name); + kfree(opts); +} + +static struct configfs_item_operations f_midi2_block_item_ops = { + .release = f_midi2_block_opts_release, +}; + +static const struct config_item_type f_midi2_block_type = { + .ct_item_ops = &f_midi2_block_item_ops, + .ct_attrs = f_midi2_block_attrs, + .ct_owner = THIS_MODULE, +}; + +/* create a f_midi2_block_opts instance for the given block number */ +static int f_midi2_block_opts_create(struct f_midi2_ep_opts *ep_opts, + unsigned int blk, + struct f_midi2_block_opts **block_p) +{ + struct f_midi2_block_opts *block_opts; + int ret = 0; + + mutex_lock(&ep_opts->opts->lock); + if (ep_opts->opts->refcnt || ep_opts->blks[blk]) { + ret = -EBUSY; + goto out; + } + + block_opts = kzalloc(sizeof(*block_opts), GFP_KERNEL); + if (!block_opts) { + ret = -ENOMEM; + goto out; + } + + block_opts->ep = ep_opts; + block_opts->id = blk; + + /* set up the default values */ + block_opts->info.direction = SNDRV_UMP_DIR_BIDIRECTION; + block_opts->info.first_group = 0; + block_opts->info.num_groups = 1; + block_opts->info.ui_hint = SNDRV_UMP_BLOCK_UI_HINT_BOTH; + block_opts->info.active = 1; + + ep_opts->blks[blk] = block_opts; + *block_p = block_opts; + + out: + mutex_unlock(&ep_opts->opts->lock); + return ret; +} + +/* make_group callback for a block */ +static struct config_group * +f_midi2_opts_block_make(struct config_group *group, const char *name) +{ + struct f_midi2_ep_opts *ep_opts; + struct f_midi2_block_opts *block_opts; + unsigned int blk; + int ret; + + if (strncmp(name, "block.", 6)) + return ERR_PTR(-EINVAL); + ret = kstrtouint(name + 6, 10, &blk); + if (ret) + return ERR_PTR(ret); + + ep_opts = to_f_midi2_ep_opts(&group->cg_item); + + if (blk >= SNDRV_UMP_MAX_BLOCKS) + return ERR_PTR(-EINVAL); + if (ep_opts->blks[blk]) + return ERR_PTR(-EBUSY); + ret = f_midi2_block_opts_create(ep_opts, blk, &block_opts); + if (ret) + return ERR_PTR(ret); + + config_group_init_type_name(&block_opts->group, name, + &f_midi2_block_type); + return &block_opts->group; +} + +/* drop_item callback for a block */ +static void +f_midi2_opts_block_drop(struct config_group *group, struct config_item *item) +{ + struct f_midi2_block_opts *block_opts = to_f_midi2_block_opts(item); + + mutex_lock(&block_opts->ep->opts->lock); + block_opts->ep->blks[block_opts->id] = NULL; + mutex_unlock(&block_opts->ep->opts->lock); + config_item_put(item); +} + +/* + * Definitions for UMP Endpoint config + */ + +/* define an uint option for EP */ +#define F_MIDI2_EP_OPT(name, format, minval, maxval) \ +static ssize_t f_midi2_ep_opts_##name##_show(struct config_item *item, \ + char *page) \ +{ \ + struct f_midi2_ep_opts *opts = to_f_midi2_ep_opts(item); \ + return f_midi2_opts_uint_show(opts->opts, opts->info.name, \ + format "\n", page); \ +} \ + \ +static ssize_t f_midi2_ep_opts_##name##_store(struct config_item *item, \ + const char *page, size_t len)\ +{ \ + struct f_midi2_ep_opts *opts = to_f_midi2_ep_opts(item); \ + return f_midi2_opts_uint_store(opts->opts, &opts->info.name, \ + minval, maxval, page, len); \ +} \ + \ +CONFIGFS_ATTR(f_midi2_ep_opts_, name) + +/* define a string option for EP */ +#define F_MIDI2_EP_STR_OPT(name, maxlen) \ +static ssize_t f_midi2_ep_opts_##name##_show(struct config_item *item, \ + char *page) \ +{ \ + struct f_midi2_ep_opts *opts = to_f_midi2_ep_opts(item); \ + return f_midi2_opts_str_show(opts->opts, opts->info.name, page);\ +} \ + \ +static ssize_t f_midi2_ep_opts_##name##_store(struct config_item *item, \ + const char *page, size_t len) \ +{ \ + struct f_midi2_ep_opts *opts = to_f_midi2_ep_opts(item); \ + return f_midi2_opts_str_store(opts->opts, &opts->info.name, maxlen,\ + page, len); \ +} \ + \ +CONFIGFS_ATTR(f_midi2_ep_opts_, name) + +F_MIDI2_EP_OPT(protocol, "0x%x", 1, 2); +F_MIDI2_EP_OPT(protocol_caps, "0x%x", 1, 3); +F_MIDI2_EP_OPT(manufacturer, "0x%x", 0, 0xffffff); +F_MIDI2_EP_OPT(family, "0x%x", 0, 0xffff); +F_MIDI2_EP_OPT(model, "0x%x", 0, 0xffff); +F_MIDI2_EP_OPT(sw_revision, "0x%x", 0, 0xffffffff); +F_MIDI2_EP_STR_OPT(ep_name, 128); +F_MIDI2_EP_STR_OPT(product_id, 128); + +static struct configfs_attribute *f_midi2_ep_attrs[] = { + &f_midi2_ep_opts_attr_protocol, + &f_midi2_ep_opts_attr_protocol_caps, + &f_midi2_ep_opts_attr_ep_name, + &f_midi2_ep_opts_attr_product_id, + &f_midi2_ep_opts_attr_manufacturer, + &f_midi2_ep_opts_attr_family, + &f_midi2_ep_opts_attr_model, + &f_midi2_ep_opts_attr_sw_revision, + NULL, +}; + +static void f_midi2_ep_opts_release(struct config_item *item) +{ + struct f_midi2_ep_opts *opts = to_f_midi2_ep_opts(item); + + kfree(opts->info.ep_name); + kfree(opts->info.product_id); + kfree(opts); +} + +static struct configfs_item_operations f_midi2_ep_item_ops = { + .release = f_midi2_ep_opts_release, +}; + +static struct configfs_group_operations f_midi2_ep_group_ops = { + .make_group = f_midi2_opts_block_make, + .drop_item = f_midi2_opts_block_drop, +}; + +static const struct config_item_type f_midi2_ep_type = { + .ct_item_ops = &f_midi2_ep_item_ops, + .ct_group_ops = &f_midi2_ep_group_ops, + .ct_attrs = f_midi2_ep_attrs, + .ct_owner = THIS_MODULE, +}; + +/* create a f_midi2_ep_opts instance */ +static int f_midi2_ep_opts_create(struct f_midi2_opts *opts, + unsigned int index, + struct f_midi2_ep_opts **ep_p) +{ + struct f_midi2_ep_opts *ep_opts; + + ep_opts = kzalloc(sizeof(*ep_opts), GFP_KERNEL); + if (!ep_opts) + return -ENOMEM; + + ep_opts->opts = opts; + ep_opts->index = index; + + /* set up the default values */ + ep_opts->info.protocol = 2; + ep_opts->info.protocol_caps = 3; + + opts->eps[index] = ep_opts; + *ep_p = ep_opts; + return 0; +} + +/* make_group callback for an EP */ +static struct config_group * +f_midi2_opts_ep_make(struct config_group *group, const char *name) +{ + struct f_midi2_opts *opts; + struct f_midi2_ep_opts *ep_opts; + unsigned int index; + int ret; + + if (strncmp(name, "ep.", 3)) + return ERR_PTR(-EINVAL); + ret = kstrtouint(name + 3, 10, &index); + if (ret) + return ERR_PTR(ret); + + opts = to_f_midi2_opts(&group->cg_item); + if (index >= MAX_UMP_EPS) + return ERR_PTR(-EINVAL); + if (opts->eps[index]) + return ERR_PTR(-EBUSY); + ret = f_midi2_ep_opts_create(opts, index, &ep_opts); + if (ret) + return ERR_PTR(ret); + + config_group_init_type_name(&ep_opts->group, name, &f_midi2_ep_type); + return &ep_opts->group; +} + +/* drop_item callback for an EP */ +static void +f_midi2_opts_ep_drop(struct config_group *group, struct config_item *item) +{ + struct f_midi2_ep_opts *ep_opts = to_f_midi2_ep_opts(item); + + mutex_lock(&ep_opts->opts->lock); + ep_opts->opts->eps[ep_opts->index] = NULL; + mutex_unlock(&ep_opts->opts->lock); + config_item_put(item); +} + +/* + * Definitions for card config + */ + +/* define a bool option for card */ +#define F_MIDI2_BOOL_OPT(name) \ +static ssize_t f_midi2_opts_##name##_show(struct config_item *item, \ + char *page) \ +{ \ + struct f_midi2_opts *opts = to_f_midi2_opts(item); \ + return f_midi2_opts_uint_show(opts, opts->info.name, \ + "%u\n", page); \ +} \ + \ +static ssize_t f_midi2_opts_##name##_store(struct config_item *item, \ + const char *page, size_t len) \ +{ \ + struct f_midi2_opts *opts = to_f_midi2_opts(item); \ + return f_midi2_opts_bool_store(opts, &opts->info.name, \ + page, len); \ +} \ + \ +CONFIGFS_ATTR(f_midi2_opts_, name) + +F_MIDI2_BOOL_OPT(process_ump); +F_MIDI2_BOOL_OPT(static_block); + +static ssize_t f_midi2_opts_iface_name_show(struct config_item *item, + char *page) +{ + struct f_midi2_opts *opts = to_f_midi2_opts(item); + + return f_midi2_opts_str_show(opts, opts->info.iface_name, page); +} + +static ssize_t f_midi2_opts_iface_name_store(struct config_item *item, + const char *page, size_t len) +{ + struct f_midi2_opts *opts = to_f_midi2_opts(item); + + return f_midi2_opts_str_store(opts, &opts->info.iface_name, 128, + page, len); +} + +CONFIGFS_ATTR(f_midi2_opts_, iface_name); + +static struct configfs_attribute *f_midi2_attrs[] = { + &f_midi2_opts_attr_process_ump, + &f_midi2_opts_attr_static_block, + &f_midi2_opts_attr_iface_name, + NULL +}; + +static void f_midi2_opts_release(struct config_item *item) +{ + struct f_midi2_opts *opts = to_f_midi2_opts(item); + + usb_put_function_instance(&opts->func_inst); +} + +static struct configfs_item_operations f_midi2_item_ops = { + .release = f_midi2_opts_release, +}; + +static struct configfs_group_operations f_midi2_group_ops = { + .make_group = f_midi2_opts_ep_make, + .drop_item = f_midi2_opts_ep_drop, +}; + +static const struct config_item_type f_midi2_func_type = { + .ct_item_ops = &f_midi2_item_ops, + .ct_group_ops = &f_midi2_group_ops, + .ct_attrs = f_midi2_attrs, + .ct_owner = THIS_MODULE, +}; + +static void f_midi2_free_inst(struct usb_function_instance *f) +{ + struct f_midi2_opts *opts; + + opts = container_of(f, struct f_midi2_opts, func_inst); + + kfree(opts->info.iface_name); + kfree(opts); +} + +/* gadget alloc_inst */ +static struct usb_function_instance *f_midi2_alloc_inst(void) +{ + struct f_midi2_opts *opts; + struct f_midi2_ep_opts *ep_opts; + struct f_midi2_block_opts *block_opts; + int ret; + + opts = kzalloc(sizeof(*opts), GFP_KERNEL); + if (!opts) + return ERR_PTR(-ENOMEM); + + mutex_init(&opts->lock); + opts->func_inst.free_func_inst = f_midi2_free_inst; + opts->info.process_ump = true; + opts->info.static_block = true; + opts->info.num_reqs = 32; + opts->info.req_buf_size = 512; + + /* create the default ep */ + ret = f_midi2_ep_opts_create(opts, 0, &ep_opts); + if (ret) { + kfree(opts); + return ERR_PTR(ret); + } + + /* create the default block */ + ret = f_midi2_block_opts_create(ep_opts, 0, &block_opts); + if (ret) { + kfree(ep_opts); + kfree(opts); + return ERR_PTR(ret); + } + + /* set up the default MIDI1 (that is mandatory) */ + block_opts->info.midi1_num_groups = 1; + + config_group_init_type_name(&opts->func_inst.group, "", + &f_midi2_func_type); + + config_group_init_type_name(&ep_opts->group, "ep.0", + &f_midi2_ep_type); + configfs_add_default_group(&ep_opts->group, &opts->func_inst.group); + + config_group_init_type_name(&block_opts->group, "block.0", + &f_midi2_block_type); + configfs_add_default_group(&block_opts->group, &ep_opts->group); + + return &opts->func_inst; +} + +static void do_f_midi2_free(struct f_midi2 *midi2, struct f_midi2_opts *opts) +{ + mutex_lock(&opts->lock); + --opts->refcnt; + mutex_unlock(&opts->lock); + kfree(midi2->string_defs); + kfree(midi2); +} + +static void f_midi2_free(struct usb_function *f) +{ + do_f_midi2_free(func_to_midi2(f), + container_of(f->fi, struct f_midi2_opts, func_inst)); +} + +/* verify the parameters set up via configfs; + * return the number of EPs or a negative error + */ +static int verify_parameters(struct f_midi2_opts *opts) +{ + int i, j, num_eps, num_blks; + struct f_midi2_ep_info *ep; + struct f_midi2_block_info *bp; + + for (num_eps = 0; num_eps < MAX_UMP_EPS && opts->eps[num_eps]; + num_eps++) + ; + if (!num_eps) { + pr_err("f_midi2: No EP is defined\n"); + return -EINVAL; + } + + num_blks = 0; + for (i = 0; i < num_eps; i++) { + ep = &opts->eps[i]->info; + if (!(ep->protocol_caps & ep->protocol)) { + pr_err("f_midi2: Invalid protocol 0x%x (caps 0x%x) for EP %d\n", + ep->protocol, ep->protocol_caps, i); + return -EINVAL; + } + + for (j = 0; j < SNDRV_UMP_MAX_BLOCKS && opts->eps[i]->blks[j]; + j++, num_blks++) { + bp = &opts->eps[i]->blks[j]->info; + if (bp->first_group + bp->num_groups > SNDRV_UMP_MAX_GROUPS) { + pr_err("f_midi2: Invalid group definitions for block %d:%d\n", + i, j); + return -EINVAL; + } + + if (bp->midi1_num_groups) { + if (bp->midi1_first_group < bp->first_group || + bp->midi1_first_group + bp->midi1_num_groups > + bp->first_group + bp->num_groups) { + pr_err("f_midi2: Invalid MIDI1 group definitions for block %d:%d\n", + i, j); + return -EINVAL; + } + } + } + } + if (!num_blks) { + pr_err("f_midi2: No block is defined\n"); + return -EINVAL; + } + + return num_eps; +} + +/* fill mapping between MIDI 1.0 cable and UMP EP/group */ +static void fill_midi1_cable_mapping(struct f_midi2 *midi2, + struct f_midi2_ep *ep, + int blk) +{ + const struct f_midi2_block_info *binfo = &ep->blks[blk].info; + struct midi1_cable_mapping *map; + int i, group; + + if (!binfo->midi1_num_groups) + return; + if (binfo->direction != SNDRV_UMP_DIR_OUTPUT) { + group = binfo->midi1_first_group; + map = midi2->in_cable_mapping + midi2->num_midi1_in; + for (i = 0; i < binfo->midi1_num_groups; i++, group++, map++) { + if (midi2->num_midi1_in >= MAX_CABLES) + break; + map->ep = ep; + map->block = blk; + map->group = group; + midi2->num_midi1_in++; + /* store 1-based cable number */ + ep->in_group_to_cable[group] = midi2->num_midi1_in; + } + } + + if (binfo->direction != SNDRV_UMP_DIR_INPUT) { + group = binfo->midi1_first_group; + map = midi2->out_cable_mapping + midi2->num_midi1_out; + for (i = 0; i < binfo->midi1_num_groups; i++, group++, map++) { + if (midi2->num_midi1_out >= MAX_CABLES) + break; + map->ep = ep; + map->block = blk; + map->group = group; + midi2->num_midi1_out++; + } + } +} + +/* gadget alloc callback */ +static struct usb_function *f_midi2_alloc(struct usb_function_instance *fi) +{ + struct f_midi2 *midi2; + struct f_midi2_opts *opts; + struct f_midi2_ep *ep; + struct f_midi2_block *bp; + int i, num_eps, blk; + + midi2 = kzalloc(sizeof(*midi2), GFP_KERNEL); + if (!midi2) + return ERR_PTR(-ENOMEM); + + opts = container_of(fi, struct f_midi2_opts, func_inst); + mutex_lock(&opts->lock); + num_eps = verify_parameters(opts); + if (num_eps < 0) { + mutex_unlock(&opts->lock); + kfree(midi2); + return ERR_PTR(num_eps); + } + ++opts->refcnt; + mutex_unlock(&opts->lock); + + spin_lock_init(&midi2->queue_lock); + + midi2->func.name = "midi2_func"; + midi2->func.bind = f_midi2_bind; + midi2->func.unbind = f_midi2_unbind; + midi2->func.get_alt = f_midi2_get_alt; + midi2->func.set_alt = f_midi2_set_alt; + midi2->func.setup = f_midi2_setup; + midi2->func.disable = f_midi2_disable; + midi2->func.free_func = f_midi2_free; + + midi2->info = opts->info; + midi2->num_eps = num_eps; + + for (i = 0; i < num_eps; i++) { + ep = &midi2->midi2_eps[i]; + ep->info = opts->eps[i]->info; + ep->card = midi2; + for (blk = 0; blk < SNDRV_UMP_MAX_BLOCKS && + opts->eps[i]->blks[blk]; blk++) { + bp = &ep->blks[blk]; + ep->num_blks++; + bp->info = opts->eps[i]->blks[blk]->info; + bp->gtb_id = ++midi2->total_blocks; + } + } + + midi2->string_defs = kcalloc(midi2->total_blocks + 1, + sizeof(*midi2->string_defs), GFP_KERNEL); + if (!midi2->string_defs) { + do_f_midi2_free(midi2, opts); + return ERR_PTR(-ENOMEM); + } + + if (opts->info.iface_name && *opts->info.iface_name) + midi2->string_defs[STR_IFACE].s = opts->info.iface_name; + else + midi2->string_defs[STR_IFACE].s = ump_ep_name(&midi2->midi2_eps[0]); + + for (i = 0; i < midi2->num_eps; i++) { + ep = &midi2->midi2_eps[i]; + for (blk = 0; blk < ep->num_blks; blk++) { + bp = &ep->blks[blk]; + midi2->string_defs[gtb_to_str_id(bp->gtb_id)].s = + ump_fb_name(&bp->info); + + fill_midi1_cable_mapping(midi2, ep, blk); + } + } + + if (!midi2->num_midi1_in && !midi2->num_midi1_out) { + pr_err("f_midi2: MIDI1 definition is missing\n"); + do_f_midi2_free(midi2, opts); + return ERR_PTR(-EINVAL); + } + + return &midi2->func; +} + +DECLARE_USB_FUNCTION_INIT(midi2, f_midi2_alloc_inst, f_midi2_alloc); + +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/gadget/function/u_midi2.h b/drivers/usb/gadget/function/u_midi2.h new file mode 100644 index 000000000000..4e7adb41dfb7 --- /dev/null +++ b/drivers/usb/gadget/function/u_midi2.h @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Utility definitions for MIDI 2.0 function + */ + +#ifndef U_MIDI2_H +#define U_MIDI2_H + +#include <linux/usb/composite.h> +#include <sound/asound.h> + +struct f_midi2_opts; +struct f_midi2_ep_opts; +struct f_midi2_block_opts; + +/* UMP Function Block info */ +struct f_midi2_block_info { + unsigned int direction; /* FB direction: 1-3 */ + unsigned int first_group; /* first UMP group: 0-15 */ + unsigned int num_groups; /* number of UMP groups: 1-16 */ + unsigned int midi1_first_group; /* first UMP group for MIDI 1.0 */ + unsigned int midi1_num_groups; /* number of UMP groups for MIDI 1.0 */ + unsigned int ui_hint; /* UI-hint: 0-3 */ + unsigned int midi_ci_version; /* MIDI-CI version: 0-255 */ + unsigned int sysex8_streams; /* number of sysex8 streams: 0-255 */ + unsigned int is_midi1; /* MIDI 1.0 port: 0-2 */ + bool active; /* FB active flag: bool */ + const char *name; /* FB name */ +}; + +/* UMP Endpoint info */ +struct f_midi2_ep_info { + unsigned int protocol_caps; /* protocol capabilities: 1-3 */ + unsigned int protocol; /* default protocol: 1-2 */ + unsigned int manufacturer; /* manufacturer id: 0-0xffffff */ + unsigned int family; /* device family id: 0-0xffff */ + unsigned int model; /* device model id: 0x-0xffff */ + unsigned int sw_revision; /* software revision: 32bit */ + + const char *ep_name; /* Endpoint name */ + const char *product_id; /* Product ID */ +}; + +struct f_midi2_card_info { + bool process_ump; /* process UMP stream: bool */ + bool static_block; /* static FBs: bool */ + unsigned int req_buf_size; /* request buffer size */ + unsigned int num_reqs; /* number of requests */ + const char *iface_name; /* interface name */ +}; + +struct f_midi2_block_opts { + struct config_group group; + unsigned int id; + struct f_midi2_block_info info; + struct f_midi2_ep_opts *ep; +}; + +struct f_midi2_ep_opts { + struct config_group group; + unsigned int index; + struct f_midi2_ep_info info; + struct f_midi2_block_opts *blks[SNDRV_UMP_MAX_BLOCKS]; + struct f_midi2_opts *opts; +}; + +#define MAX_UMP_EPS 4 +#define MAX_CABLES 16 + +struct f_midi2_opts { + struct usb_function_instance func_inst; + struct mutex lock; + int refcnt; + + struct f_midi2_card_info info; + + unsigned int num_eps; + struct f_midi2_ep_opts *eps[MAX_UMP_EPS]; +}; + +#endif /* U_MIDI2_H */ diff --git a/drivers/usb/gadget/udc/aspeed-vhub/core.c b/drivers/usb/gadget/udc/aspeed-vhub/core.c index 16f2db8c4a2b..f60a019bb173 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/core.c +++ b/drivers/usb/gadget/udc/aspeed-vhub/core.c @@ -328,8 +328,7 @@ static int ast_vhub_probe(struct platform_device *pdev) vhub->port_irq_mask = GENMASK(VHUB_IRQ_DEV1_BIT + vhub->max_ports - 1, VHUB_IRQ_DEV1_BIT); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - vhub->regs = devm_ioremap_resource(&pdev->dev, res); + vhub->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(vhub->regs)) { dev_err(&pdev->dev, "Failed to map resources\n"); return PTR_ERR(vhub->regs); diff --git a/drivers/usb/gadget/udc/aspeed_udc.c b/drivers/usb/gadget/udc/aspeed_udc.c index 01968e2167f9..2ef89a442f50 100644 --- a/drivers/usb/gadget/udc/aspeed_udc.c +++ b/drivers/usb/gadget/udc/aspeed_udc.c @@ -1468,7 +1468,6 @@ static int ast_udc_probe(struct platform_device *pdev) enum usb_device_speed max_speed; struct device *dev = &pdev->dev; struct ast_udc_dev *udc; - struct resource *res; int rc; udc = devm_kzalloc(&pdev->dev, sizeof(struct ast_udc_dev), GFP_KERNEL); @@ -1484,8 +1483,7 @@ static int ast_udc_probe(struct platform_device *pdev) udc->gadget.name = "aspeed-udc"; udc->gadget.dev.init_name = "gadget"; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - udc->reg = devm_ioremap_resource(&pdev->dev, res); + udc->reg = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(udc->reg)) { dev_err(&pdev->dev, "Failed to map resources\n"); return PTR_ERR(udc->reg); diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index 6c0ed3fa5eb1..02b1bef5e22e 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -2285,15 +2285,13 @@ static int usba_udc_probe(struct platform_device *pdev) udc->gadget = usba_gadget_template; INIT_LIST_HEAD(&udc->gadget.ep_list); - res = platform_get_resource(pdev, IORESOURCE_MEM, CTRL_IOMEM_ID); - udc->regs = devm_ioremap_resource(&pdev->dev, res); + udc->regs = devm_platform_get_and_ioremap_resource(pdev, CTRL_IOMEM_ID, &res); if (IS_ERR(udc->regs)) return PTR_ERR(udc->regs); dev_info(&pdev->dev, "MMIO registers at %pR mapped at %p\n", res, udc->regs); - res = platform_get_resource(pdev, IORESOURCE_MEM, FIFO_IOMEM_ID); - udc->fifo = devm_ioremap_resource(&pdev->dev, res); + udc->fifo = devm_platform_get_and_ioremap_resource(pdev, FIFO_IOMEM_ID, &res); if (IS_ERR(udc->fifo)) return PTR_ERR(udc->fifo); dev_info(&pdev->dev, "FIFO at %pR mapped at %p\n", res, udc->fifo); diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index cd58f2a4e7f3..7166d1117742 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -40,6 +40,7 @@ static const struct bus_type gadget_bus_type; * @allow_connect: Indicates whether UDC is allowed to be pulled up. * Set/cleared by gadget_(un)bind_driver() after gadget driver is bound or * unbound. + * @vbus_work: work routine to handle VBUS status change notifications. * @connect_lock: protects udc->started, gadget->connect, * gadget->allow_connect and gadget->deactivate. The routines * usb_gadget_connect_locked(), usb_gadget_disconnect_locked(), @@ -822,6 +823,9 @@ EXPORT_SYMBOL_GPL(usb_gadget_disconnect); * usb_gadget_activate() is called. For example, user mode components may * need to be activated before the system can talk to hosts. * + * This routine may sleep; it must not be called in interrupt context + * (such as from within a gadget driver's disconnect() callback). + * * Returns zero on success, else negative errno. */ int usb_gadget_deactivate(struct usb_gadget *gadget) @@ -860,6 +864,8 @@ EXPORT_SYMBOL_GPL(usb_gadget_deactivate); * This routine activates gadget which was previously deactivated with * usb_gadget_deactivate() call. It calls usb_gadget_connect() if needed. * + * This routine may sleep; it must not be called in interrupt context. + * * Returns zero on success, else negative errno. */ int usb_gadget_activate(struct usb_gadget *gadget) @@ -1638,7 +1644,11 @@ static void gadget_unbind_driver(struct device *dev) usb_gadget_disable_async_callbacks(udc); if (gadget->irq) synchronize_irq(gadget->irq); + mutex_unlock(&udc->connect_lock); + udc->driver->unbind(gadget); + + mutex_lock(&udc->connect_lock); usb_gadget_udc_stop_locked(udc); mutex_unlock(&udc->connect_lock); diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c index 9c5dc1c1a68e..4aae86b47edf 100644 --- a/drivers/usb/gadget/udc/fsl_qe_udc.c +++ b/drivers/usb/gadget/udc/fsl_qe_udc.c @@ -1959,6 +1959,8 @@ static void ch9getstatus(struct qe_udc *udc, u8 request_type, u16 value, } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) { /* Get endpoint status */ int pipe = index & USB_ENDPOINT_NUMBER_MASK; + if (pipe >= USB_MAX_ENDPOINTS) + goto stall; struct qe_ep *target_ep = &udc->eps[pipe]; u16 usep; diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c index a67873a074b7..ee5705d336e3 100644 --- a/drivers/usb/gadget/udc/fsl_udc_core.c +++ b/drivers/usb/gadget/udc/fsl_udc_core.c @@ -36,7 +36,6 @@ #include <linux/platform_device.h> #include <linux/fsl_devices.h> #include <linux/dmapool.h> -#include <linux/of_device.h> #include <asm/byteorder.h> #include <asm/io.h> @@ -672,7 +671,7 @@ static int fsl_ep_disable(struct usb_ep *_ep) static struct usb_request * fsl_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) { - struct fsl_req *req = NULL; + struct fsl_req *req; req = kzalloc(sizeof *req, gfp_flags); if (!req) diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c index 09762559912d..c6dfa7cccc11 100644 --- a/drivers/usb/gadget/udc/gr_udc.c +++ b/drivers/usb/gadget/udc/gr_udc.c @@ -23,6 +23,7 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/errno.h> @@ -36,9 +37,7 @@ #include <linux/dmapool.h> #include <linux/debugfs.h> #include <linux/seq_file.h> -#include <linux/of_platform.h> -#include <linux/of_irq.h> -#include <linux/of_address.h> +#include <linux/of.h> #include <asm/byteorder.h> @@ -2137,15 +2136,15 @@ static int gr_probe(struct platform_device *pdev) return PTR_ERR(regs); dev->irq = platform_get_irq(pdev, 0); - if (dev->irq <= 0) - return -ENODEV; + if (dev->irq < 0) + return dev->irq; /* Some core configurations has separate irqs for IN and OUT events */ dev->irqi = platform_get_irq(pdev, 1); if (dev->irqi > 0) { dev->irqo = platform_get_irq(pdev, 2); - if (dev->irqo <= 0) - return -ENODEV; + if (dev->irqo < 0) + return dev->irqo; } else { dev->irqi = 0; } diff --git a/drivers/usb/gadget/udc/max3420_udc.c b/drivers/usb/gadget/udc/max3420_udc.c index 12c519f32bf7..2d57786d3db7 100644 --- a/drivers/usb/gadget/udc/max3420_udc.c +++ b/drivers/usb/gadget/udc/max3420_udc.c @@ -19,9 +19,7 @@ #include <linux/io.h> #include <linux/module.h> #include <linux/bitfield.h> -#include <linux/of_address.h> -#include <linux/of_device.h> -#include <linux/of_platform.h> +#include <linux/of.h> #include <linux/of_irq.h> #include <linux/prefetch.h> #include <linux/usb/ch9.h> diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c index 3473048a85f5..2a421f0ff931 100644 --- a/drivers/usb/gadget/udc/mv_u3d_core.c +++ b/drivers/usb/gadget/udc/mv_u3d_core.c @@ -665,7 +665,7 @@ static int mv_u3d_ep_disable(struct usb_ep *_ep) static struct usb_request * mv_u3d_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) { - struct mv_u3d_req *req = NULL; + struct mv_u3d_req *req; req = kzalloc(sizeof *req, gfp_flags); if (!req) @@ -1779,7 +1779,7 @@ static void mv_u3d_remove(struct platform_device *dev) static int mv_u3d_probe(struct platform_device *dev) { - struct mv_u3d *u3d = NULL; + struct mv_u3d *u3d; struct mv_usb_platform_data *pdata = dev_get_platdata(&dev->dev); int retval = 0; struct resource *r; diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c index 79db74e2040b..d888dcda2bc8 100644 --- a/drivers/usb/gadget/udc/mv_udc_core.c +++ b/drivers/usb/gadget/udc/mv_udc_core.c @@ -595,7 +595,7 @@ static int mv_ep_disable(struct usb_ep *_ep) static struct usb_request * mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) { - struct mv_req *req = NULL; + struct mv_req *req; req = kzalloc(sizeof *req, gfp_flags); if (!req) diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c index c4e1d957f913..61424cfd2e1c 100644 --- a/drivers/usb/gadget/udc/pxa27x_udc.c +++ b/drivers/usb/gadget/udc/pxa27x_udc.c @@ -23,7 +23,7 @@ #include <linux/prefetch.h> #include <linux/byteorder/generic.h> #include <linux/platform_data/pxa2xx_udc.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/of_gpio.h> #include <linux/usb.h> diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 59bb25de2015..3b01734ce1b7 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -14,7 +14,7 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> diff --git a/drivers/usb/gadget/udc/renesas_usbf.c b/drivers/usb/gadget/udc/renesas_usbf.c index 6cd0af83e91e..657f265ac7cc 100644 --- a/drivers/usb/gadget/udc/renesas_usbf.c +++ b/drivers/usb/gadget/udc/renesas_usbf.c @@ -12,10 +12,9 @@ #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/kfifo.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> -#include <linux/of_address.h> -#include <linux/of_irq.h> -#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/types.h> #include <linux/usb/composite.h> @@ -3379,7 +3378,6 @@ MODULE_DEVICE_TABLE(of, usbf_match); static struct platform_driver udc_driver = { .driver = { .name = "usbf_renesas", - .owner = THIS_MODULE, .of_match_table = usbf_match, }, .probe = usbf_probe, diff --git a/drivers/usb/gadget/udc/snps_udc_plat.c b/drivers/usb/gadget/udc/snps_udc_plat.c index 0ed685db149d..547af2ed9e5e 100644 --- a/drivers/usb/gadget/udc/snps_udc_plat.c +++ b/drivers/usb/gadget/udc/snps_udc_plat.c @@ -112,8 +112,7 @@ static int udc_plat_probe(struct platform_device *pdev) spin_lock_init(&udc->lock); udc->dev = dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - udc->virt_addr = devm_ioremap_resource(dev, res); + udc->virt_addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(udc->virt_addr)) return PTR_ERR(udc->virt_addr); @@ -301,7 +300,6 @@ static const struct dev_pm_ops udc_plat_pm_ops = { }; #endif -#if defined(CONFIG_OF) static const struct of_device_id of_udc_match[] = { { .compatible = "brcm,ns2-udc", }, { .compatible = "brcm,cygnus-udc", }, @@ -309,14 +307,13 @@ static const struct of_device_id of_udc_match[] = { { } }; MODULE_DEVICE_TABLE(of, of_udc_match); -#endif static struct platform_driver udc_plat_driver = { .probe = udc_plat_probe, .remove_new = udc_plat_remove, .driver = { .name = "snps-udc-plat", - .of_match_table = of_match_ptr(of_udc_match), + .of_match_table = of_udc_match, #ifdef CONFIG_PM_SLEEP .pm = &udc_plat_pm_ops, #endif diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c index df6028f7b273..cb85168fd00c 100644 --- a/drivers/usb/gadget/udc/tegra-xudc.c +++ b/drivers/usb/gadget/udc/tegra-xudc.c @@ -16,7 +16,6 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/phy/phy.h> #include <linux/phy/tegra/xusb.h> #include <linux/pm_domain.h> diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c index a4a7b90a97e7..f301b09bf3f8 100644 --- a/drivers/usb/gadget/udc/udc-xilinx.c +++ b/drivers/usb/gadget/udc/udc-xilinx.c @@ -18,10 +18,8 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> -#include <linux/of_address.h> -#include <linux/of_device.h> -#include <linux/of_platform.h> -#include <linux/of_irq.h> +#include <linux/of.h> +#include <linux/platform_device.h> #include <linux/prefetch.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> @@ -2080,8 +2078,7 @@ static int xudc_probe(struct platform_device *pdev) udc->req->usb_req.buf = buff; /* Map the registers */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - udc->addr = devm_ioremap_resource(&pdev->dev, res); + udc->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(udc->addr)) return PTR_ERR(udc->addr); diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c index 61808c51e702..6a6e1c510b28 100644 --- a/drivers/usb/host/ehci-atmel.c +++ b/drivers/usb/host/ehci-atmel.c @@ -102,8 +102,8 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev) pr_debug("Initializing Atmel-SoC USB Host Controller\n"); irq = platform_get_irq(pdev, 0); - if (irq <= 0) { - retval = -ENODEV; + if (irq < 0) { + retval = irq; goto fail_create_hcd; } @@ -122,8 +122,7 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev) } atmel_ehci = hcd_to_atmel_ehci_priv(hcd); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hcd->regs = devm_ioremap_resource(&pdev->dev, res); + hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(hcd->regs)) { retval = PTR_ERR(hcd->regs); goto fail_request_resource; diff --git a/drivers/usb/host/ehci-brcm.c b/drivers/usb/host/ehci-brcm.c index 0362a082abb4..77e42c739c58 100644 --- a/drivers/usb/host/ehci-brcm.c +++ b/drivers/usb/host/ehci-brcm.c @@ -140,8 +140,8 @@ static int ehci_brcm_probe(struct platform_device *pdev) return err; irq = platform_get_irq(pdev, 0); - if (irq <= 0) - return irq ? irq : -EINVAL; + if (irq < 0) + return irq; /* Hook the hub control routine to work around a bug */ ehci_brcm_hc_driver.hub_control = ehci_brcm_hub_control; diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c index 20f8c0ec6810..f644b131cc0b 100644 --- a/drivers/usb/host/ehci-exynos.c +++ b/drivers/usb/host/ehci-exynos.c @@ -173,8 +173,7 @@ static int exynos_ehci_probe(struct platform_device *pdev) if (err) goto fail_clk; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hcd->regs = devm_ioremap_resource(&pdev->dev, res); + hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(hcd->regs)) { err = PTR_ERR(hcd->regs); goto fail_io; diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c index 81d60a695510..5b1ce394a417 100644 --- a/drivers/usb/host/ehci-fsl.c +++ b/drivers/usb/host/ehci-fsl.c @@ -22,7 +22,7 @@ #include <linux/usb/otg.h> #include <linux/platform_device.h> #include <linux/fsl_devices.h> -#include <linux/of_platform.h> +#include <linux/of.h> #include <linux/io.h> #include "ehci.h" @@ -87,8 +87,7 @@ static int fsl_ehci_drv_probe(struct platform_device *pdev) goto err1; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hcd->regs = devm_ioremap_resource(&pdev->dev, res); + hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(hcd->regs)) { retval = PTR_ERR(hcd->regs); goto err2; diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index a1930db0da1c..802bfafb1012 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -755,10 +755,14 @@ restart: /* normal [4.15.1.2] or error [4.15.1.1] completion */ if (likely ((status & (STS_INT|STS_ERR)) != 0)) { - if (likely ((status & STS_ERR) == 0)) + if (likely ((status & STS_ERR) == 0)) { INCR(ehci->stats.normal); - else + } else { + /* Force to check port status */ + if (ehci->has_ci_pec_bug) + status |= STS_PCD; INCR(ehci->stats.error); + } bh = 1; } diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index efe30e3be22f..1aee392e8492 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -674,7 +674,8 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf) if ((temp & mask) != 0 || test_bit(i, &ehci->port_c_suspend) || (ehci->reset_done[i] && time_after_eq( - jiffies, ehci->reset_done[i]))) { + jiffies, ehci->reset_done[i])) + || ehci_has_ci_pec_bug(ehci, temp)) { if (i < 7) buf [0] |= 1 << (i + 1); else @@ -875,6 +876,13 @@ int ehci_hub_control( if (temp & PORT_PEC) status |= USB_PORT_STAT_C_ENABLE << 16; + if (ehci_has_ci_pec_bug(ehci, temp)) { + status |= USB_PORT_STAT_C_ENABLE << 16; + ehci_info(ehci, + "PE is cleared by HW port:%d PORTSC:%08x\n", + wIndex + 1, temp); + } + if ((temp & PORT_OCC) && (!ignore_oc && !ehci->spurious_oc)){ status |= USB_PORT_STAT_C_OVERCURRENT << 16; diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c index 9320cf0e5bc7..2f1fc7eb8b72 100644 --- a/drivers/usb/host/ehci-mv.c +++ b/drivers/usb/host/ehci-mv.c @@ -142,8 +142,7 @@ static int mv_ehci_probe(struct platform_device *pdev) goto err_put_hcd; } - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ehci_mv->base = devm_ioremap_resource(&pdev->dev, r); + ehci_mv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r); if (IS_ERR(ehci_mv->base)) { retval = PTR_ERR(ehci_mv->base); goto err_put_hcd; diff --git a/drivers/usb/host/ehci-npcm7xx.c b/drivers/usb/host/ehci-npcm7xx.c index ad79ce63afcf..3d3317a1a0b3 100644 --- a/drivers/usb/host/ehci-npcm7xx.c +++ b/drivers/usb/host/ehci-npcm7xx.c @@ -53,7 +53,7 @@ static int npcm7xx_ehci_hcd_drv_probe(struct platform_device *pdev) int irq; int retval; - dev_dbg(&pdev->dev, "initializing npcm7xx ehci USB Controller\n"); + dev_dbg(&pdev->dev, "initializing npcm7xx ehci USB Controller\n"); if (usb_disabled()) return -ENODEV; @@ -79,8 +79,7 @@ static int npcm7xx_ehci_hcd_drv_probe(struct platform_device *pdev) goto fail; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hcd->regs = devm_ioremap_resource(&pdev->dev, res); + hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(hcd->regs)) { retval = PTR_ERR(hcd->regs); goto err_put_hcd; diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c index cb6509a735ac..b24f371a46f3 100644 --- a/drivers/usb/host/ehci-omap.c +++ b/drivers/usb/host/ehci-omap.c @@ -113,8 +113,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) if (irq < 0) return irq; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - regs = devm_ioremap_resource(dev, res); + regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(regs)) return PTR_ERR(regs); diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c index 2cfb27dc943a..6c47ab0a491d 100644 --- a/drivers/usb/host/ehci-orion.c +++ b/drivers/usb/host/ehci-orion.c @@ -13,8 +13,6 @@ #include <linux/platform_data/usb-ehci-orion.h> #include <linux/of.h> #include <linux/phy/phy.h> -#include <linux/of_device.h> -#include <linux/of_irq.h> #include <linux/usb.h> #include <linux/usb/hcd.h> #include <linux/io.h> @@ -220,8 +218,8 @@ static int ehci_orion_drv_probe(struct platform_device *pdev) pr_debug("Initializing Orion-SoC USB Host Controller\n"); irq = platform_get_irq(pdev, 0); - if (irq <= 0) { - err = -ENODEV; + if (irq < 0) { + err = irq; goto err; } @@ -234,8 +232,7 @@ static int ehci_orion_drv_probe(struct platform_device *pdev) if (err) goto err; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - regs = devm_ioremap_resource(&pdev->dev, res); + regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(regs)) { err = PTR_ERR(regs); goto err; diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c index 83bf56c9424f..98b073185e1c 100644 --- a/drivers/usb/host/ehci-platform.c +++ b/drivers/usb/host/ehci-platform.c @@ -359,8 +359,7 @@ static int ehci_platform_probe(struct platform_device *dev) goto err_reset; } - res_mem = platform_get_resource(dev, IORESOURCE_MEM, 0); - hcd->regs = devm_ioremap_resource(&dev->dev, res_mem); + hcd->regs = devm_platform_get_and_ioremap_resource(dev, 0, &res_mem); if (IS_ERR(hcd->regs)) { err = PTR_ERR(hcd->regs); goto err_power; diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index bd542b6fc46b..7e834587e7de 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -490,13 +490,14 @@ static int tt_no_collision( static void enable_periodic(struct ehci_hcd *ehci) { if (ehci->periodic_count++) - return; + goto out; /* Stop waiting to turn off the periodic schedule */ ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC); /* Don't start the schedule until PSS is 0 */ ehci_poll_PSS(ehci); +out: turn_on_io_watchdog(ehci); } diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c index 0520e762801d..d31d9506e41a 100644 --- a/drivers/usb/host/ehci-sh.c +++ b/drivers/usb/host/ehci-sh.c @@ -82,8 +82,8 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev) return -ENODEV; irq = platform_get_irq(pdev, 0); - if (irq <= 0) { - ret = -ENODEV; + if (irq < 0) { + ret = irq; goto fail_create_hcd; } @@ -95,8 +95,7 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev) goto fail_create_hcd; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hcd->regs = devm_ioremap_resource(&pdev->dev, res); + hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(hcd->regs)) { ret = PTR_ERR(hcd->regs); goto fail_request_resource; diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c index 1407703649be..d0e94e4c9fe2 100644 --- a/drivers/usb/host/ehci-spear.c +++ b/drivers/usb/host/ehci-spear.c @@ -91,8 +91,7 @@ static int spear_ehci_hcd_drv_probe(struct platform_device *pdev) goto fail; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hcd->regs = devm_ioremap_resource(&pdev->dev, res); + hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(hcd->regs)) { retval = PTR_ERR(hcd->regs); goto err_put_hcd; diff --git a/drivers/usb/host/ehci-st.c b/drivers/usb/host/ehci-st.c index ee0976b815b4..2dbb0d86daaa 100644 --- a/drivers/usb/host/ehci-st.c +++ b/drivers/usb/host/ehci-st.c @@ -158,11 +158,6 @@ static int st_ehci_platform_probe(struct platform_device *dev) irq = platform_get_irq(dev, 0); if (irq < 0) return irq; - res_mem = platform_get_resource(dev, IORESOURCE_MEM, 0); - if (!res_mem) { - dev_err(&dev->dev, "no memory resource provided"); - return -ENXIO; - } hcd = usb_create_hcd(&ehci_platform_hc_driver, &dev->dev, dev_name(&dev->dev)); @@ -222,14 +217,13 @@ static int st_ehci_platform_probe(struct platform_device *dev) goto err_put_clks; } - hcd->rsrc_start = res_mem->start; - hcd->rsrc_len = resource_size(res_mem); - - hcd->regs = devm_ioremap_resource(&dev->dev, res_mem); + hcd->regs = devm_platform_get_and_ioremap_resource(dev, 0, &res_mem); if (IS_ERR(hcd->regs)) { err = PTR_ERR(hcd->regs); goto err_put_clks; } + hcd->rsrc_start = res_mem->start; + hcd->rsrc_len = resource_size(res_mem); err = usb_add_hcd(hcd, irq, IRQF_SHARED); if (err) diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index c5c7f8782549..1441e3400796 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h @@ -207,6 +207,7 @@ struct ehci_hcd { /* one per controller */ unsigned has_fsl_port_bug:1; /* FreeScale */ unsigned has_fsl_hs_errata:1; /* Freescale HS quirk */ unsigned has_fsl_susp_errata:1; /* NXP SUSP quirk */ + unsigned has_ci_pec_bug:1; /* ChipIdea PEC bug */ unsigned big_endian_mmio:1; unsigned big_endian_desc:1; unsigned big_endian_capbase:1; @@ -708,6 +709,15 @@ ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc) #define ehci_has_fsl_susp_errata(e) ((e)->has_fsl_susp_errata) /* + * Some Freescale/NXP processors using ChipIdea IP have a bug in which + * disabling the port (PE is cleared) does not cause PEC to be asserted + * when frame babble is detected. + */ +#define ehci_has_ci_pec_bug(e, portsc) \ + ((e)->has_ci_pec_bug && ((e)->command & CMD_PSE) \ + && !(portsc & PORT_PEC) && !(portsc & PORT_PE)) + +/* * While most USB host controllers implement their registers in * little-endian format, a minority (celleb companion chip) implement * them in big endian format. diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c index 66a045e01dad..9a1b5224f239 100644 --- a/drivers/usb/host/fhci-hcd.c +++ b/drivers/usb/host/fhci-hcd.c @@ -22,9 +22,10 @@ #include <linux/io.h> #include <linux/usb.h> #include <linux/usb/hcd.h> +#include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> -#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/slab.h> #include <linux/gpio/consumer.h> #include <soc/fsl/qe/qe.h> diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c index a9877f2569f4..8508d37a2aff 100644 --- a/drivers/usb/host/fsl-mph-dr-of.c +++ b/drivers/usb/host/fsl-mph-dr-of.c @@ -10,7 +10,8 @@ #include <linux/fsl_devices.h> #include <linux/err.h> #include <linux/io.h> -#include <linux/of_platform.h> +#include <linux/of.h> +#include <linux/of_device.h> #include <linux/clk.h> #include <linux/module.h> #include <linux/dma-mapping.h> diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c index 606f0a64f3b7..a52c3d858f3e 100644 --- a/drivers/usb/host/isp1362-hcd.c +++ b/drivers/usb/host/isp1362-hcd.c @@ -2651,8 +2651,7 @@ static int isp1362_probe(struct platform_device *pdev) if (IS_ERR(addr_reg)) return PTR_ERR(addr_reg); - data = platform_get_resource(pdev, IORESOURCE_MEM, 0); - data_reg = devm_ioremap_resource(&pdev->dev, data); + data_reg = devm_platform_get_and_ioremap_resource(pdev, 0, &data); if (IS_ERR(data_reg)) return PTR_ERR(data_reg); diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index b805c4b52ac3..f691cd98a574 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c @@ -17,13 +17,13 @@ #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/gpio/consumer.h> -#include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/platform_data/atmel.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mfd/syscon.h> +#include <linux/of.h> #include <linux/regmap.h> #include <linux/usb.h> #include <linux/usb/hcd.h> @@ -190,18 +190,15 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver, int irq; irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_dbg(dev, "hcd probe: missing irq resource\n"); + if (irq < 0) return irq; - } hcd = usb_create_hcd(driver, dev, "at91"); if (!hcd) return -ENOMEM; ohci_at91 = hcd_to_ohci_at91_priv(hcd); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hcd->regs = devm_ioremap_resource(dev, res); + hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(hcd->regs)) { retval = PTR_ERR(hcd->regs); goto err; diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c index e4191a868944..d9adae53466b 100644 --- a/drivers/usb/host/ohci-da8xx.c +++ b/drivers/usb/host/ohci-da8xx.c @@ -15,6 +15,7 @@ #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <linux/phy/phy.h> #include <linux/platform_data/usb-davinci.h> @@ -435,8 +436,7 @@ static int ohci_da8xx_probe(struct platform_device *pdev) goto err; } - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hcd->regs = devm_ioremap_resource(dev, mem); + hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &mem); if (IS_ERR(hcd->regs)) { error = PTR_ERR(hcd->regs); goto err; diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c index ab31c459b32d..20e26a474591 100644 --- a/drivers/usb/host/ohci-exynos.c +++ b/drivers/usb/host/ohci-exynos.c @@ -149,8 +149,7 @@ static int exynos_ohci_probe(struct platform_device *pdev) if (err) goto fail_clk; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hcd->regs = devm_ioremap_resource(&pdev->dev, res); + hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(hcd->regs)) { err = PTR_ERR(hcd->regs); goto fail_io; diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c index c04b2af5c766..8264c454f6bd 100644 --- a/drivers/usb/host/ohci-nxp.c +++ b/drivers/usb/host/ohci-nxp.c @@ -202,8 +202,7 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev) goto fail_hcd; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hcd->regs = devm_ioremap_resource(&pdev->dev, res); + hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(hcd->regs)) { ret = PTR_ERR(hcd->regs); goto fail_resource; diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c index 45a2c981319e..4a75507325dd 100644 --- a/drivers/usb/host/ohci-platform.c +++ b/drivers/usb/host/ohci-platform.c @@ -200,8 +200,7 @@ static int ohci_platform_probe(struct platform_device *dev) goto err_reset; } - res_mem = platform_get_resource(dev, IORESOURCE_MEM, 0); - hcd->regs = devm_ioremap_resource(&dev->dev, res_mem); + hcd->regs = devm_platform_get_and_ioremap_resource(dev, 0, &res_mem); if (IS_ERR(hcd->regs)) { err = PTR_ERR(hcd->regs); goto err_power; diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c index 35a7ad7e2569..f64bfe5f4d4d 100644 --- a/drivers/usb/host/ohci-ppc-of.c +++ b/drivers/usb/host/ohci-ppc-of.c @@ -15,9 +15,10 @@ */ #include <linux/signal.h> +#include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> -#include <linux/of_platform.h> +#include <linux/platform_device.h> static int ohci_ppc_of_start(struct usb_hcd *hcd) diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c index 7410442f720f..357d9aee38a3 100644 --- a/drivers/usb/host/ohci-pxa27x.c +++ b/drivers/usb/host/ohci-pxa27x.c @@ -435,8 +435,7 @@ static int ohci_hcd_pxa27x_probe(struct platform_device *pdev) if (!hcd) return -ENOMEM; - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hcd->regs = devm_ioremap_resource(&pdev->dev, r); + hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &r); if (IS_ERR(hcd->regs)) { retval = PTR_ERR(hcd->regs); goto err; diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c index 0468eeb4fcfd..4b39e9d6f33a 100644 --- a/drivers/usb/host/ohci-sm501.c +++ b/drivers/usb/host/ohci-sm501.c @@ -195,8 +195,7 @@ static void ohci_hcd_sm501_drv_remove(struct platform_device *pdev) release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (mem) - release_mem_region(mem->start, resource_size(mem)); + release_mem_region(mem->start, resource_size(mem)); /* mask interrupts and disable power */ diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c index f4b2656407dd..993f347c5c28 100644 --- a/drivers/usb/host/ohci-spear.c +++ b/drivers/usb/host/ohci-spear.c @@ -68,8 +68,7 @@ static int spear_ohci_hcd_drv_probe(struct platform_device *pdev) goto fail; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hcd->regs = devm_ioremap_resource(&pdev->dev, res); + hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(hcd->regs)) { retval = PTR_ERR(hcd->regs); goto err_put_hcd; diff --git a/drivers/usb/host/ohci-st.c b/drivers/usb/host/ohci-st.c index 884e447a8098..214342013f7e 100644 --- a/drivers/usb/host/ohci-st.c +++ b/drivers/usb/host/ohci-st.c @@ -139,12 +139,6 @@ static int st_ohci_platform_probe(struct platform_device *dev) if (irq < 0) return irq; - res_mem = platform_get_resource(dev, IORESOURCE_MEM, 0); - if (!res_mem) { - dev_err(&dev->dev, "no memory resource provided"); - return -ENXIO; - } - hcd = usb_create_hcd(&ohci_platform_hc_driver, &dev->dev, dev_name(&dev->dev)); if (!hcd) @@ -199,14 +193,14 @@ static int st_ohci_platform_probe(struct platform_device *dev) goto err_power; } - hcd->rsrc_start = res_mem->start; - hcd->rsrc_len = resource_size(res_mem); - - hcd->regs = devm_ioremap_resource(&dev->dev, res_mem); + hcd->regs = devm_platform_get_and_ioremap_resource(dev, 0, &res_mem); if (IS_ERR(hcd->regs)) { err = PTR_ERR(hcd->regs); goto err_power; } + hcd->rsrc_start = res_mem->start; + hcd->rsrc_len = resource_size(res_mem); + err = usb_add_hcd(hcd, irq, IRQF_SHARED); if (err) goto err_power; diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c index 50c1ccabb0f5..d467472f9d3c 100644 --- a/drivers/usb/host/oxu210hp-hcd.c +++ b/drivers/usb/host/oxu210hp-hcd.c @@ -4230,8 +4230,7 @@ static int oxu_drv_probe(struct platform_device *pdev) return irq; dev_dbg(&pdev->dev, "IRQ resource %d\n", irq); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(&pdev->dev, res); + base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(base)) { ret = PTR_ERR(base); goto error; diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c index 71ca532fc086..3dec5dd3a0d5 100644 --- a/drivers/usb/host/uhci-platform.c +++ b/drivers/usb/host/uhci-platform.c @@ -91,8 +91,7 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev) uhci = hcd_to_uhci(hcd); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hcd->regs = devm_ioremap_resource(&pdev->dev, res); + hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(hcd->regs)) { ret = PTR_ERR(hcd->regs); goto err_rmr; diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 19a402123de0..8714ab5bf04d 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1108,9 +1108,6 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS); max_packets = MAX_PACKET(8); break; - case USB_SPEED_WIRELESS: - xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); - return -EINVAL; default: /* Speed was set earlier, this shouldn't happen. */ return -EINVAL; diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index b26ea7cb4357..28218c8f1837 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -13,7 +13,6 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/usb/phy.h> #include <linux/slab.h> diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c index bf5261fed32c..ab9c5969e462 100644 --- a/drivers/usb/host/xhci-rcar.c +++ b/drivers/usb/host/xhci-rcar.c @@ -10,7 +10,6 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/usb/phy.h> #include "xhci.h" diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index 4693d83351c6..6246d5ad1468 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -14,7 +14,7 @@ #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/of_irq.h> #include <linux/phy/phy.h> #include <linux/phy/tegra/xusb.h> @@ -1912,6 +1912,15 @@ put_padctl: return err; } +static void tegra_xusb_disable(struct tegra_xusb *tegra) +{ + tegra_xusb_powergate_partitions(tegra); + tegra_xusb_powerdomain_remove(tegra->dev, tegra); + tegra_xusb_phy_disable(tegra); + tegra_xusb_clk_disable(tegra); + regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies); +} + static void tegra_xusb_remove(struct platform_device *pdev) { struct tegra_xusb *tegra = platform_get_drvdata(pdev); @@ -1934,14 +1943,18 @@ static void tegra_xusb_remove(struct platform_device *pdev) pm_runtime_put(&pdev->dev); - tegra_xusb_powergate_partitions(tegra); + tegra_xusb_disable(tegra); + tegra_xusb_padctl_put(tegra->padctl); +} - tegra_xusb_powerdomain_remove(&pdev->dev, tegra); +static void tegra_xusb_shutdown(struct platform_device *pdev) +{ + struct tegra_xusb *tegra = platform_get_drvdata(pdev); - tegra_xusb_phy_disable(tegra); - tegra_xusb_clk_disable(tegra); - regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies); - tegra_xusb_padctl_put(tegra->padctl); + pm_runtime_get_sync(&pdev->dev); + disable_irq(tegra->xhci_irq); + xhci_shutdown(tegra->hcd); + tegra_xusb_disable(tegra); } static bool xhci_hub_ports_suspended(struct xhci_hub *hub) @@ -2652,6 +2665,7 @@ MODULE_DEVICE_TABLE(of, tegra_xusb_of_match); static struct platform_driver tegra_xusb_driver = { .probe = tegra_xusb_probe, .remove_new = tegra_xusb_remove, + .shutdown = tegra_xusb_shutdown, .driver = { .name = "tegra-xusb", .pm = &tegra_xusb_pm_ops, diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index fae994f679d4..e1b1b64a0723 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -2194,7 +2194,6 @@ static unsigned int xhci_get_block_size(struct usb_device *udev) case USB_SPEED_SUPER_PLUS: return SS_BLOCK; case USB_SPEED_UNKNOWN: - case USB_SPEED_WIRELESS: default: /* Should never happen */ return 1; @@ -2555,10 +2554,7 @@ static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, case USB_SPEED_HIGH: interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1; break; - case USB_SPEED_SUPER: - case USB_SPEED_SUPER_PLUS: - case USB_SPEED_UNKNOWN: - case USB_SPEED_WIRELESS: + default: /* Should never happen because only LS/FS/HS endpoints will get * added to the endpoint list. */ @@ -2615,10 +2611,7 @@ static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, case USB_SPEED_HIGH: interval_bw->overhead[HS_OVERHEAD_TYPE] += 1; break; - case USB_SPEED_SUPER: - case USB_SPEED_SUPER_PLUS: - case USB_SPEED_UNKNOWN: - case USB_SPEED_WIRELESS: + default: /* Should never happen because only LS/FS/HS endpoints will get * added to the endpoint list. */ diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c index 14faec51d7a5..cecd7693b741 100644 --- a/drivers/usb/misc/cypress_cy7c63.c +++ b/drivers/usb/misc/cypress_cy7c63.c @@ -203,7 +203,7 @@ ATTRIBUTE_GROUPS(cypress); static int cypress_probe(struct usb_interface *interface, const struct usb_device_id *id) { - struct cypress *dev = NULL; + struct cypress *dev; int retval = -ENOMEM; /* allocate memory for our device state and initialize it */ diff --git a/drivers/usb/misc/cytherm.c b/drivers/usb/misc/cytherm.c index 3e3802aaefa3..875016dd073c 100644 --- a/drivers/usb/misc/cytherm.c +++ b/drivers/usb/misc/cytherm.c @@ -304,20 +304,20 @@ static int cytherm_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); - struct usb_cytherm *dev = NULL; + struct usb_cytherm *dev; int retval = -ENOMEM; - dev = kzalloc (sizeof(struct usb_cytherm), GFP_KERNEL); + dev = kzalloc(sizeof(struct usb_cytherm), GFP_KERNEL); if (!dev) goto error_mem; dev->udev = usb_get_dev(udev); - usb_set_intfdata (interface, dev); + usb_set_intfdata(interface, dev); dev->brightness = 0xFF; - dev_info (&interface->dev, + dev_info(&interface->dev, "Cypress thermometer device now attached\n"); return 0; @@ -329,10 +329,10 @@ static void cytherm_disconnect(struct usb_interface *interface) { struct usb_cytherm *dev; - dev = usb_get_intfdata (interface); + dev = usb_get_intfdata(interface); /* first remove the files, then NULL the pointer */ - usb_set_intfdata (interface, NULL); + usb_set_intfdata(interface, NULL); usb_put_dev(dev->udev); diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c index 83f14ca1d38e..3da1a4659c5f 100644 --- a/drivers/usb/misc/onboard_usb_hub.c +++ b/drivers/usb/misc/onboard_usb_hub.c @@ -27,6 +27,17 @@ #include "onboard_usb_hub.h" +/* + * Use generic names, as the actual names might differ between hubs. If a new + * hub requires more than the currently supported supplies, add a new one here. + */ +static const char * const supply_names[] = { + "vdd", + "vdd2", +}; + +#define MAX_SUPPLIES ARRAY_SIZE(supply_names) + static void onboard_hub_attach_usb_driver(struct work_struct *work); static struct usb_device_driver onboard_hub_usbdev_driver; @@ -40,7 +51,7 @@ struct usbdev_node { }; struct onboard_hub { - struct regulator *vdd; + struct regulator_bulk_data supplies[MAX_SUPPLIES]; struct device *dev; const struct onboard_hub_pdata *pdata; struct gpio_desc *reset_gpio; @@ -55,9 +66,9 @@ static int onboard_hub_power_on(struct onboard_hub *hub) { int err; - err = regulator_enable(hub->vdd); + err = regulator_bulk_enable(hub->pdata->num_supplies, hub->supplies); if (err) { - dev_err(hub->dev, "failed to enable regulator: %d\n", err); + dev_err(hub->dev, "failed to enable supplies: %d\n", err); return err; } @@ -75,9 +86,9 @@ static int onboard_hub_power_off(struct onboard_hub *hub) gpiod_set_value_cansleep(hub->reset_gpio, 1); - err = regulator_disable(hub->vdd); + err = regulator_bulk_disable(hub->pdata->num_supplies, hub->supplies); if (err) { - dev_err(hub->dev, "failed to disable regulator: %d\n", err); + dev_err(hub->dev, "failed to disable supplies: %d\n", err); return err; } @@ -232,6 +243,7 @@ static int onboard_hub_probe(struct platform_device *pdev) const struct of_device_id *of_id; struct device *dev = &pdev->dev; struct onboard_hub *hub; + unsigned int i; int err; hub = devm_kzalloc(dev, sizeof(*hub), GFP_KERNEL); @@ -246,9 +258,18 @@ static int onboard_hub_probe(struct platform_device *pdev) if (!hub->pdata) return -EINVAL; - hub->vdd = devm_regulator_get(dev, "vdd"); - if (IS_ERR(hub->vdd)) - return PTR_ERR(hub->vdd); + if (hub->pdata->num_supplies > MAX_SUPPLIES) + return dev_err_probe(dev, -EINVAL, "max %zu supplies supported!\n", + MAX_SUPPLIES); + + for (i = 0; i < hub->pdata->num_supplies; i++) + hub->supplies[i].supply = supply_names[i]; + + err = devm_regulator_bulk_get(dev, hub->pdata->num_supplies, hub->supplies); + if (err) { + dev_err(dev, "Failed to get regulator supplies: %d\n", err); + return err; + } hub->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); @@ -329,6 +350,7 @@ static struct platform_driver onboard_hub_driver = { /************************** USB driver **************************/ +#define VENDOR_ID_CYPRESS 0x04b4 #define VENDOR_ID_GENESYS 0x05e3 #define VENDOR_ID_MICROCHIP 0x0424 #define VENDOR_ID_REALTEK 0x0bda @@ -407,8 +429,11 @@ static void onboard_hub_usbdev_disconnect(struct usb_device *udev) } static const struct usb_device_id onboard_hub_id_table[] = { + { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6504) }, /* CYUSB33{0,1,2}x/CYUSB230x 3.0 */ + { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6506) }, /* CYUSB33{0,1,2}x/CYUSB230x 2.0 */ { USB_DEVICE(VENDOR_ID_GENESYS, 0x0608) }, /* Genesys Logic GL850G USB 2.0 */ { USB_DEVICE(VENDOR_ID_GENESYS, 0x0610) }, /* Genesys Logic GL852G USB 2.0 */ + { USB_DEVICE(VENDOR_ID_GENESYS, 0x0620) }, /* Genesys Logic GL3523 USB 3.1 */ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2517) }, /* USB2517 USB 2.0 */ { USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */ diff --git a/drivers/usb/misc/onboard_usb_hub.h b/drivers/usb/misc/onboard_usb_hub.h index aca5f50eb0da..4026ba64c592 100644 --- a/drivers/usb/misc/onboard_usb_hub.h +++ b/drivers/usb/misc/onboard_usb_hub.h @@ -8,30 +8,42 @@ struct onboard_hub_pdata { unsigned long reset_us; /* reset pulse width in us */ + unsigned int num_supplies; /* number of supplies */ }; static const struct onboard_hub_pdata microchip_usb424_data = { .reset_us = 1, + .num_supplies = 1, }; static const struct onboard_hub_pdata realtek_rts5411_data = { .reset_us = 0, + .num_supplies = 1, }; static const struct onboard_hub_pdata ti_tusb8041_data = { .reset_us = 3000, + .num_supplies = 1, +}; + +static const struct onboard_hub_pdata cypress_hx3_data = { + .reset_us = 10000, + .num_supplies = 2, }; static const struct onboard_hub_pdata genesys_gl850g_data = { .reset_us = 3, + .num_supplies = 1, }; static const struct onboard_hub_pdata genesys_gl852g_data = { .reset_us = 50, + .num_supplies = 1, }; static const struct onboard_hub_pdata vialab_vl817_data = { .reset_us = 10, + .num_supplies = 1, }; static const struct of_device_id onboard_hub_match[] = { @@ -39,8 +51,11 @@ static const struct of_device_id onboard_hub_match[] = { { .compatible = "usb424,2517", .data = µchip_usb424_data, }, { .compatible = "usb451,8140", .data = &ti_tusb8041_data, }, { .compatible = "usb451,8142", .data = &ti_tusb8041_data, }, + { .compatible = "usb4b4,6504", .data = &cypress_hx3_data, }, + { .compatible = "usb4b4,6506", .data = &cypress_hx3_data, }, { .compatible = "usb5e3,608", .data = &genesys_gl850g_data, }, { .compatible = "usb5e3,610", .data = &genesys_gl852g_data, }, + { .compatible = "usb5e3,620", .data = &genesys_gl852g_data, }, { .compatible = "usbbda,411", .data = &realtek_rts5411_data, }, { .compatible = "usbbda,5411", .data = &realtek_rts5411_data, }, { .compatible = "usbbda,414", .data = &realtek_rts5411_data, }, diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c index e4edb486b69e..7da404f55a6d 100644 --- a/drivers/usb/misc/usb251xb.c +++ b/drivers/usb/misc/usb251xb.c @@ -16,7 +16,7 @@ #include <linux/i2c.h> #include <linux/module.h> #include <linux/nls.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> diff --git a/drivers/usb/misc/usb_u132.h b/drivers/usb/misc/usb_u132.h deleted file mode 100644 index 1584efbbd704..000000000000 --- a/drivers/usb/misc/usb_u132.h +++ /dev/null @@ -1,97 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* -* Common Header File for the Elan Digital Systems U132 adapter -* this file should be included by both the "ftdi-u132" and -* the "u132-hcd" modules. -* -* Copyright(C) 2006 Elan Digital Systems Limited -*(http://www.elandigitalsystems.com) -* -* Author and Maintainer - Tony Olech - Elan Digital Systems -* -* The driver was written by Tony Olech([email protected]) -* based on various USB client drivers in the 2.6.15 linux kernel -* with constant reference to the 3rd Edition of Linux Device Drivers -* published by O'Reilly -* -* The U132 adapter is a USB to CardBus adapter specifically designed -* for PC cards that contain an OHCI host controller. Typical PC cards -* are the Orange Mobile 3G Option GlobeTrotter Fusion card. -* -* The U132 adapter will *NOT *work with PC cards that do not contain -* an OHCI controller. A simple way to test whether a PC card has an -* OHCI controller as an interface is to insert the PC card directly -* into a laptop(or desktop) with a CardBus slot and if "lspci" shows -* a new USB controller and "lsusb -v" shows a new OHCI Host Controller -* then there is a good chance that the U132 adapter will support the -* PC card.(you also need the specific client driver for the PC card) -* -* Please inform the Author and Maintainer about any PC cards that -* contain OHCI Host Controller and work when directly connected to -* an embedded CardBus slot but do not work when they are connected -* via an ELAN U132 adapter. -* -* The driver consists of two modules, the "ftdi-u132" module is -* a USB client driver that interfaces to the FTDI chip within -* the U132 adapter manufactured by Elan Digital Systems, and the -* "u132-hcd" module is a USB host controller driver that talks -* to the OHCI controller within CardBus card that are inserted -* in the U132 adapter. -* -* The "ftdi-u132" module should be loaded automatically by the -* hot plug system when the U132 adapter is plugged in. The module -* initialises the adapter which mostly consists of synchronising -* the FTDI chip, before continuously polling the adapter to detect -* PC card insertions. As soon as a PC card containing a recognised -* OHCI controller is seen the "ftdi-u132" module explicitly requests -* the kernel to load the "u132-hcd" module. -* -* The "ftdi-u132" module provides the interface to the inserted -* PC card and the "u132-hcd" module uses the API to send and receive -* data. The API features call-backs, so that part of the "u132-hcd" -* module code will run in the context of one of the kernel threads -* of the "ftdi-u132" module. -* -*/ -int ftdi_elan_switch_on_diagnostics(int number); -void ftdi_elan_gone_away(struct platform_device *pdev); -void start_usb_lock_device_tracing(void); -struct u132_platform_data { - u16 vendor; - u16 device; - u8 potpg; - void (*port_power) (struct device *dev, int is_on); - void (*reset) (struct device *dev); -}; -int usb_ftdi_elan_edset_single(struct platform_device *pdev, u8 ed_number, - void *endp, struct urb *urb, u8 address, u8 ep_number, u8 toggle_bits, - void (*callback) (void *endp, struct urb *urb, u8 *buf, int len, - int toggle_bits, int error_count, int condition_code, int repeat_number, - int halted, int skipped, int actual, int non_null)); -int usb_ftdi_elan_edset_output(struct platform_device *pdev, u8 ed_number, - void *endp, struct urb *urb, u8 address, u8 ep_number, u8 toggle_bits, - void (*callback) (void *endp, struct urb *urb, u8 *buf, int len, - int toggle_bits, int error_count, int condition_code, int repeat_number, - int halted, int skipped, int actual, int non_null)); -int usb_ftdi_elan_edset_empty(struct platform_device *pdev, u8 ed_number, - void *endp, struct urb *urb, u8 address, u8 ep_number, u8 toggle_bits, - void (*callback) (void *endp, struct urb *urb, u8 *buf, int len, - int toggle_bits, int error_count, int condition_code, int repeat_number, - int halted, int skipped, int actual, int non_null)); -int usb_ftdi_elan_edset_input(struct platform_device *pdev, u8 ed_number, - void *endp, struct urb *urb, u8 address, u8 ep_number, u8 toggle_bits, - void (*callback) (void *endp, struct urb *urb, u8 *buf, int len, - int toggle_bits, int error_count, int condition_code, int repeat_number, - int halted, int skipped, int actual, int non_null)); -int usb_ftdi_elan_edset_setup(struct platform_device *pdev, u8 ed_number, - void *endp, struct urb *urb, u8 address, u8 ep_number, u8 toggle_bits, - void (*callback) (void *endp, struct urb *urb, u8 *buf, int len, - int toggle_bits, int error_count, int condition_code, int repeat_number, - int halted, int skipped, int actual, int non_null)); -int usb_ftdi_elan_edset_flush(struct platform_device *pdev, u8 ed_number, - void *endp); -int usb_ftdi_elan_read_pcimem(struct platform_device *pdev, int mem_offset, - u8 width, u32 *data); -int usb_ftdi_elan_write_pcimem(struct platform_device *pdev, int mem_offset, - u8 width, u32 data); diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c index c3114d9bd128..546deff754ba 100644 --- a/drivers/usb/misc/usbsevseg.c +++ b/drivers/usb/misc/usbsevseg.c @@ -305,7 +305,7 @@ static int sevseg_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); - struct usb_sevsegdev *mydev = NULL; + struct usb_sevsegdev *mydev; int rc = -ENOMEM; mydev = kzalloc(sizeof(struct usb_sevsegdev), GFP_KERNEL); diff --git a/drivers/usb/mtu3/mtu3.h b/drivers/usb/mtu3/mtu3.h index b4a7662dded5..c11840b9a6f1 100644 --- a/drivers/usb/mtu3/mtu3.h +++ b/drivers/usb/mtu3/mtu3.h @@ -16,6 +16,7 @@ #include <linux/extcon.h> #include <linux/interrupt.h> #include <linux/list.h> +#include <linux/of.h> #include <linux/phy/phy.h> #include <linux/regulator/consumer.h> #include <linux/usb.h> diff --git a/drivers/usb/mtu3/mtu3_host.c b/drivers/usb/mtu3/mtu3_host.c index 177d2caf887c..9f2be22af844 100644 --- a/drivers/usb/mtu3/mtu3_host.c +++ b/drivers/usb/mtu3/mtu3_host.c @@ -11,6 +11,7 @@ #include <linux/irq.h> #include <linux/kernel.h> #include <linux/mfd/syscon.h> +#include <linux/of.h> #include <linux/of_platform.h> #include <linux/regmap.h> diff --git a/drivers/usb/musb/cppi_dma.h b/drivers/usb/musb/cppi_dma.h index 16dd1ed44bb5..3606be897cb2 100644 --- a/drivers/usb/musb/cppi_dma.h +++ b/drivers/usb/musb/cppi_dma.h @@ -121,9 +121,6 @@ struct cppi { struct list_head tx_complete; }; -/* CPPI IRQ handler */ -extern irqreturn_t cppi_interrupt(int, void *); - struct cppi41_dma_channel { struct dma_channel channel; struct cppi41_dma_controller *controller; diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c index 5aabdd7e2511..b38df9226278 100644 --- a/drivers/usb/musb/jz4740.c +++ b/drivers/usb/musb/jz4740.c @@ -10,7 +10,7 @@ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/usb/role.h> diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c index 598ee5c0bf34..0a35aab3ab81 100644 --- a/drivers/usb/musb/mediatek.c +++ b/drivers/usb/musb/mediatek.c @@ -10,6 +10,7 @@ #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/usb/role.h> diff --git a/drivers/usb/musb/mpfs.c b/drivers/usb/musb/mpfs.c index 24b98716f7fc..f0f56df38835 100644 --- a/drivers/usb/musb/mpfs.c +++ b/drivers/usb/musb/mpfs.c @@ -13,6 +13,7 @@ #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <linux/usb/usb_phy_generic.h> #include "musb_core.h" diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index ecbd3784bec3..b24adb5b399f 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -2610,8 +2610,8 @@ static int musb_probe(struct platform_device *pdev) int irq = platform_get_irq_byname(pdev, "mc"); void __iomem *base; - if (irq <= 0) - return -ENODEV; + if (irq < 0) + return irq; base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h index e2445ca3356d..0cd7fc468de8 100644 --- a/drivers/usb/musb/musb_dma.h +++ b/drivers/usb/musb/musb_dma.h @@ -199,10 +199,6 @@ tusb_dma_controller_create(struct musb *musb, void __iomem *base); extern void tusb_dma_controller_destroy(struct dma_controller *c); extern struct dma_controller * -cppi_dma_controller_create(struct musb *musb, void __iomem *base); -extern void cppi_dma_controller_destroy(struct dma_controller *c); - -extern struct dma_controller * cppi41_dma_controller_create(struct musb *musb, void __iomem *base); extern void cppi41_dma_controller_destroy(struct dma_controller *c); diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index 9119b1d51370..98b42dc04dee 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c @@ -26,9 +26,7 @@ #include <linux/sizes.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/of_address.h> -#include <linux/of_irq.h> #include <linux/usb/of.h> #include <linux/debugfs.h> diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 31c44325e828..051c6da7cf6d 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -1130,7 +1130,7 @@ static int musb_gadget_disable(struct usb_ep *ep) struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) { struct musb_ep *musb_ep = to_musb_ep(ep); - struct musb_request *request = NULL; + struct musb_request *request; request = kzalloc(sizeof *request, gfp_flags); if (!request) diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c index c5c6c4e09300..d54283fd026b 100644 --- a/drivers/usb/musb/sunxi.c +++ b/drivers/usb/musb/sunxi.c @@ -15,7 +15,6 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/phy/phy-sun4i-usb.h> #include <linux/platform_device.h> #include <linux/reset.h> diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c index cbc707fe570f..461587629bf2 100644 --- a/drivers/usb/musb/tusb6010.c +++ b/drivers/usb/musb/tusb6010.c @@ -21,6 +21,7 @@ #include <linux/usb.h> #include <linux/irq.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> @@ -1029,7 +1030,7 @@ static int tusb_musb_start(struct musb *musb) void __iomem *tbase = musb->ctrl_base; unsigned long flags; u32 reg; - int i; + int ret; /* * Enable or disable power to TUSB6010. When enabling, turn on 3.3 V and @@ -1037,17 +1038,13 @@ static int tusb_musb_start(struct musb *musb) * provide then PGOOD signal to TUSB6010 which will release it from reset. */ gpiod_set_value(glue->enable, 1); - msleep(1); /* Wait for 100ms until TUSB6010 pulls INT pin down */ - i = 100; - while (i && gpiod_get_value(glue->intpin)) { - msleep(1); - i--; - } - if (!i) { - pr_err("tusb: Powerup respones failed\n"); - return -ENODEV; + ret = read_poll_timeout(gpiod_get_value, reg, !reg, 5000, 100000, true, + glue->intpin); + if (ret) { + pr_err("tusb: Powerup response failed\n"); + return ret; } spin_lock_irqsave(&musb->lock, flags); diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c index e1a2b2ea098b..acd46b72899e 100644 --- a/drivers/usb/phy/phy-mxs-usb.c +++ b/drivers/usb/phy/phy-mxs-usb.c @@ -14,7 +14,7 @@ #include <linux/delay.h> #include <linux/err.h> #include <linux/io.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/regmap.h> #include <linux/mfd/syscon.h> #include <linux/iopoll.h> @@ -388,19 +388,14 @@ static void __mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool disconnect) static bool mxs_phy_is_otg_host(struct mxs_phy *mxs_phy) { - void __iomem *base = mxs_phy->phy.io_priv; - u32 phyctrl = readl(base + HW_USBPHY_CTRL); - - if (IS_ENABLED(CONFIG_USB_OTG) && - !(phyctrl & BM_USBPHY_CTRL_OTG_ID_VALUE)) - return true; - - return false; + return IS_ENABLED(CONFIG_USB_OTG) && + mxs_phy->phy.last_event == USB_EVENT_ID; } static void mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool on) { bool vbus_is_on = false; + enum usb_phy_events last_event = mxs_phy->phy.last_event; /* If the SoCs don't need to disconnect line without vbus, quit */ if (!(mxs_phy->data->flags & MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS)) @@ -412,7 +407,8 @@ static void mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool on) vbus_is_on = mxs_phy_get_vbus_status(mxs_phy); - if (on && !vbus_is_on && !mxs_phy_is_otg_host(mxs_phy)) + if (on && ((!vbus_is_on && !mxs_phy_is_otg_host(mxs_phy)) + || (last_event == USB_EVENT_VBUS))) __mxs_phy_disconnect_line(mxs_phy, true); else __mxs_phy_disconnect_line(mxs_phy, false); diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c index 8b2ff3a8882d..4ea47e6f835b 100644 --- a/drivers/usb/phy/phy-tegra-usb.c +++ b/drivers/usb/phy/phy-tegra-usb.c @@ -16,7 +16,7 @@ #include <linux/iopoll.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_device.h> +#include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/resource.h> #include <linux/slab.h> diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c index 111b7ee152c4..dd1c17542439 100644 --- a/drivers/usb/renesas_usbhs/common.c +++ b/drivers/usb/renesas_usbhs/common.c @@ -11,7 +11,7 @@ #include <linux/gpio/consumer.h> #include <linux/io.h> #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include <linux/slab.h> diff --git a/drivers/usb/renesas_usbhs/rza.c b/drivers/usb/renesas_usbhs/rza.c index 2d77edefb4b3..97b5217c5a90 100644 --- a/drivers/usb/renesas_usbhs/rza.c +++ b/drivers/usb/renesas_usbhs/rza.c @@ -8,7 +8,7 @@ #include <linux/delay.h> #include <linux/io.h> -#include <linux/of_device.h> +#include <linux/of.h> #include "common.h" #include "rza.h" diff --git a/drivers/usb/renesas_usbhs/rza2.c b/drivers/usb/renesas_usbhs/rza2.c index 3eed3334a17f..f079817250bb 100644 --- a/drivers/usb/renesas_usbhs/rza2.c +++ b/drivers/usb/renesas_usbhs/rza2.c @@ -8,7 +8,6 @@ #include <linux/delay.h> #include <linux/io.h> -#include <linux/of_device.h> #include <linux/phy/phy.h> #include "common.h" #include "rza.h" diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c index 5e912dd29b4c..115f05a6201a 100644 --- a/drivers/usb/storage/alauda.c +++ b/drivers/usb/storage/alauda.c @@ -318,7 +318,8 @@ static int alauda_get_media_status(struct us_data *us, unsigned char *data) rc = usb_stor_ctrl_transfer(us, us->recv_ctrl_pipe, command, 0xc0, 0, 1, data, 2); - usb_stor_dbg(us, "Media status %02X %02X\n", data[0], data[1]); + if (rc == USB_STOR_XFER_GOOD) + usb_stor_dbg(us, "Media status %02X %02X\n", data[0], data[1]); return rc; } @@ -454,9 +455,14 @@ static int alauda_init_media(struct us_data *us) static int alauda_check_media(struct us_data *us) { struct alauda_info *info = (struct alauda_info *) us->extra; - unsigned char status[2]; + unsigned char *status = us->iobuf; + int rc; - alauda_get_media_status(us, status); + rc = alauda_get_media_status(us, status); + if (rc != USB_STOR_XFER_GOOD) { + status[0] = 0xF0; /* Pretend there's no media */ + status[1] = 0; + } /* Check for no media or door open */ if ((status[0] & 0x80) || ((status[0] & 0x1F) == 0x10) diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c index 66de880b28d0..cdf8261e22db 100644 --- a/drivers/usb/typec/altmodes/displayport.c +++ b/drivers/usb/typec/altmodes/displayport.c @@ -60,6 +60,7 @@ struct dp_altmode { enum dp_state state; bool hpd; + bool pending_hpd; struct mutex lock; /* device lock */ struct work_struct work; @@ -144,8 +145,13 @@ static int dp_altmode_status_update(struct dp_altmode *dp) dp->state = DP_STATE_EXIT; } else if (!(con & DP_CONF_CURRENTLY(dp->data.conf))) { ret = dp_altmode_configure(dp, con); - if (!ret) + if (!ret) { dp->state = DP_STATE_CONFIGURE; + if (dp->hpd != hpd) { + dp->hpd = hpd; + dp->pending_hpd = true; + } + } } else { if (dp->hpd != hpd) { drm_connector_oob_hotplug_event(dp->connector_fwnode); @@ -161,6 +167,16 @@ static int dp_altmode_configured(struct dp_altmode *dp) { sysfs_notify(&dp->alt->dev.kobj, "displayport", "configuration"); sysfs_notify(&dp->alt->dev.kobj, "displayport", "pin_assignment"); + /* + * If the DFP_D/UFP_D sends a change in HPD when first notifying the + * DisplayPort driver that it is connected, then we wait until + * configuration is complete to signal HPD. + */ + if (dp->pending_hpd) { + drm_connector_oob_hotplug_event(dp->connector_fwnode); + sysfs_notify(&dp->alt->dev.kobj, "displayport", "hpd"); + dp->pending_hpd = false; + } return dp_altmode_notify(dp); } diff --git a/drivers/usb/typec/mux/Kconfig b/drivers/usb/typec/mux/Kconfig index 784b9d8107e9..65da61150ba7 100644 --- a/drivers/usb/typec/mux/Kconfig +++ b/drivers/usb/typec/mux/Kconfig @@ -29,6 +29,7 @@ config TYPEC_MUX_INTEL_PMC tristate "Intel PMC mux control" depends on ACPI depends on INTEL_SCU_IPC + select USB_COMMON select USB_ROLE_SWITCH help Driver for USB muxes controlled by Intel PMC FW. Intel PMC FW can diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c index 5e8edf3881c0..60ed1f809130 100644 --- a/drivers/usb/typec/mux/intel_pmc_mux.c +++ b/drivers/usb/typec/mux/intel_pmc_mux.c @@ -59,7 +59,7 @@ enum { }; /* Common Mode Data bits */ -#define PMC_USB_ALTMODE_ACTIVE_CABLE BIT(2) +#define PMC_USB_ALTMODE_RETIMER_CABLE BIT(2) #define PMC_USB_ALTMODE_ORI_SHIFT 1 #define PMC_USB_ALTMODE_UFP_SHIFT 3 @@ -71,6 +71,7 @@ enum { #define PMC_USB_ALTMODE_TBT_TYPE BIT(17) #define PMC_USB_ALTMODE_CABLE_TYPE BIT(18) #define PMC_USB_ALTMODE_ACTIVE_LINK BIT(20) +#define PMC_USB_ALTMODE_ACTIVE_CABLE BIT(22) #define PMC_USB_ALTMODE_FORCE_LSR BIT(23) #define PMC_USB_ALTMODE_CABLE_SPD(_s_) (((_s_) & GENMASK(2, 0)) << 25) #define PMC_USB_ALTMODE_CABLE_USB31 1 @@ -117,6 +118,16 @@ enum { IOM_PORT_STATUS_DHPD_HPD_STATUS_SHIFT) & \ IOM_PORT_STATUS_DHPD_HPD_STATUS_ASSERT) +/* IOM port status register */ +#define IOM_PORT_STATUS_REGS(_offset_, _size_) ((_offset_) | (_size_)) +#define IOM_PORT_STATUS_REGS_SZ_MASK BIT(0) +#define IOM_PORT_STATUS_REGS_SZ_4 0 +#define IOM_PORT_STATUS_REGS_SZ_8 1 +#define IOM_PORT_STATUS_REGS_OFFSET(_d_) \ + ((_d_) & ~IOM_PORT_STATUS_REGS_SZ_MASK) +#define IOM_PORT_STATUS_REGS_SIZE(_d_) \ + (4 << ((_d_) & IOM_PORT_STATUS_REGS_SZ_MASK)) + struct pmc_usb; struct pmc_usb_port { @@ -145,6 +156,7 @@ struct pmc_usb { struct acpi_device *iom_adev; void __iomem *iom_base; u32 iom_port_status_offset; + u8 iom_port_status_size; struct dentry *dentry; }; @@ -160,7 +172,7 @@ static void update_port_status(struct pmc_usb_port *port) port->iom_status = readl(port->pmc->iom_base + port->pmc->iom_port_status_offset + - port_num * sizeof(u32)); + port_num * port->pmc->iom_port_status_size); } static int sbu_orientation(struct pmc_usb_port *port) @@ -319,8 +331,18 @@ pmc_usb_mux_tbt(struct pmc_usb_port *port, struct typec_mux_state *state) if (data->cable_mode & TBT_CABLE_LINK_TRAINING) req.mode_data |= PMC_USB_ALTMODE_ACTIVE_LINK; - if (data->enter_vdo & TBT_ENTER_MODE_ACTIVE_CABLE) - req.mode_data |= PMC_USB_ALTMODE_ACTIVE_CABLE; + if (acpi_dev_hid_uid_match(port->pmc->iom_adev, "INTC1072", NULL) || + acpi_dev_hid_uid_match(port->pmc->iom_adev, "INTC1079", NULL)) { + if ((data->enter_vdo & TBT_ENTER_MODE_ACTIVE_CABLE) || + (data->cable_mode & TBT_CABLE_RETIMER)) + req.mode_data |= PMC_USB_ALTMODE_RETIMER_CABLE; + } else { + if (data->enter_vdo & TBT_ENTER_MODE_ACTIVE_CABLE) + req.mode_data |= PMC_USB_ALTMODE_ACTIVE_CABLE; + + if (data->cable_mode & TBT_CABLE_RETIMER) + req.mode_data |= PMC_USB_ALTMODE_RETIMER_CABLE; + } req.mode_data |= PMC_USB_ALTMODE_CABLE_SPD(cable_speed); @@ -359,8 +381,17 @@ pmc_usb_mux_usb4(struct pmc_usb_port *port, struct typec_mux_state *state) case EUDO_CABLE_TYPE_OPTICAL: req.mode_data |= PMC_USB_ALTMODE_CABLE_TYPE; fallthrough; + case EUDO_CABLE_TYPE_RE_TIMER: + if (!acpi_dev_hid_uid_match(port->pmc->iom_adev, "INTC1072", NULL) || + !acpi_dev_hid_uid_match(port->pmc->iom_adev, "INTC1079", NULL)) + req.mode_data |= PMC_USB_ALTMODE_RETIMER_CABLE; + fallthrough; default: - req.mode_data |= PMC_USB_ALTMODE_ACTIVE_CABLE; + if (acpi_dev_hid_uid_match(port->pmc->iom_adev, "INTC1072", NULL) || + acpi_dev_hid_uid_match(port->pmc->iom_adev, "INTC1079", NULL)) + req.mode_data |= PMC_USB_ALTMODE_RETIMER_CABLE; + else + req.mode_data |= PMC_USB_ALTMODE_ACTIVE_CABLE; /* Configure data rate to rounded in the case of Active TBT3 * and USB4 cables. @@ -589,13 +620,16 @@ err_unregister_switch: /* IOM ACPI IDs and IOM_PORT_STATUS_OFFSET */ static const struct acpi_device_id iom_acpi_ids[] = { /* TigerLake */ - { "INTC1072", 0x560, }, + { "INTC1072", IOM_PORT_STATUS_REGS(0x560, IOM_PORT_STATUS_REGS_SZ_4) }, /* AlderLake */ - { "INTC1079", 0x160, }, + { "INTC1079", IOM_PORT_STATUS_REGS(0x160, IOM_PORT_STATUS_REGS_SZ_4) }, /* Meteor Lake */ - { "INTC107A", 0x160, }, + { "INTC107A", IOM_PORT_STATUS_REGS(0x160, IOM_PORT_STATUS_REGS_SZ_4) }, + + /* Lunar Lake */ + { "INTC10EA", IOM_PORT_STATUS_REGS(0x150, IOM_PORT_STATUS_REGS_SZ_8) }, {} }; @@ -615,7 +649,8 @@ static int pmc_usb_probe_iom(struct pmc_usb *pmc) if (!adev) return -ENODEV; - pmc->iom_port_status_offset = (u32)dev_id->driver_data; + pmc->iom_port_status_offset = IOM_PORT_STATUS_REGS_OFFSET(dev_id->driver_data); + pmc->iom_port_status_size = IOM_PORT_STATUS_REGS_SIZE(dev_id->driver_data); INIT_LIST_HEAD(&resource_list); ret = acpi_dev_get_memory_resources(adev, &resource_list); diff --git a/drivers/usb/typec/mux/nb7vpq904m.c b/drivers/usb/typec/mux/nb7vpq904m.c index 80e580d50129..cda206cf0c38 100644 --- a/drivers/usb/typec/mux/nb7vpq904m.c +++ b/drivers/usb/typec/mux/nb7vpq904m.c @@ -463,16 +463,18 @@ static int nb7vpq904m_probe(struct i2c_client *client) ret = nb7vpq904m_register_bridge(nb7); if (ret) - return ret; + goto err_disable_gpio; sw_desc.drvdata = nb7; sw_desc.fwnode = dev->fwnode; sw_desc.set = nb7vpq904m_sw_set; nb7->sw = typec_switch_register(dev, &sw_desc); - if (IS_ERR(nb7->sw)) - return dev_err_probe(dev, PTR_ERR(nb7->sw), - "Error registering typec switch\n"); + if (IS_ERR(nb7->sw)) { + ret = dev_err_probe(dev, PTR_ERR(nb7->sw), + "Error registering typec switch\n"); + goto err_disable_gpio; + } retimer_desc.drvdata = nb7; retimer_desc.fwnode = dev->fwnode; @@ -480,12 +482,21 @@ static int nb7vpq904m_probe(struct i2c_client *client) nb7->retimer = typec_retimer_register(dev, &retimer_desc); if (IS_ERR(nb7->retimer)) { - typec_switch_unregister(nb7->sw); - return dev_err_probe(dev, PTR_ERR(nb7->retimer), - "Error registering typec retimer\n"); + ret = dev_err_probe(dev, PTR_ERR(nb7->retimer), + "Error registering typec retimer\n"); + goto err_switch_unregister; } return 0; + +err_switch_unregister: + typec_switch_unregister(nb7->sw); + +err_disable_gpio: + gpiod_set_value(nb7->enable_gpio, 0); + regulator_disable(nb7->vcc_supply); + + return ret; } static void nb7vpq904m_remove(struct i2c_client *client) @@ -517,7 +528,7 @@ static struct i2c_driver nb7vpq904m_driver = { .name = "nb7vpq904m", .of_match_table = nb7vpq904m_of_table, }, - .probe_new = nb7vpq904m_probe, + .probe = nb7vpq904m_probe, .remove = nb7vpq904m_remove, .id_table = nb7vpq904m_table, }; diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c index 7fc1ffa14f76..bc21006e979c 100644 --- a/drivers/usb/typec/tcpm/fusb302.c +++ b/drivers/usb/typec/tcpm/fusb302.c @@ -15,7 +15,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/pinctrl/consumer.h> #include <linux/proc_fs.h> #include <linux/regulator/consumer.h> diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c index 9b467a346114..af44ee4e6e86 100644 --- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c +++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c @@ -8,7 +8,7 @@ #include <linux/kernel.h> #include <linux/mod_devicetable.h> #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/regmap.h> diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c index 4e1b846627d2..bb0b8479d80f 100644 --- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c +++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c @@ -8,8 +8,6 @@ #include <linux/kernel.h> #include <linux/mod_devicetable.h> #include <linux/module.h> -#include <linux/of_device.h> -#include <linux/of_irq.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c index 94285f64b67d..a8f3f4d3a450 100644 --- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c +++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c @@ -9,7 +9,6 @@ #include <linux/kernel.h> #include <linux/mod_devicetable.h> #include <linux/module.h> -#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> @@ -214,6 +213,11 @@ int qcom_pmic_typec_port_get_cc(struct pmic_typec_port *pmic_typec_port, if (ret) goto done; switch (val & DETECTED_SRC_TYPE_MASK) { + case AUDIO_ACCESS_RA_RA: + val = TYPEC_CC_RA; + *cc1 = TYPEC_CC_RA; + *cc2 = TYPEC_CC_RA; + break; case SRC_RD_OPEN: val = TYPEC_CC_RD; break; diff --git a/drivers/usb/typec/tcpm/tcpci_mt6370.c b/drivers/usb/typec/tcpm/tcpci_mt6370.c index 2a079464b398..9cda1005ef01 100644 --- a/drivers/usb/typec/tcpm/tcpci_mt6370.c +++ b/drivers/usb/typec/tcpm/tcpci_mt6370.c @@ -147,7 +147,7 @@ static int mt6370_tcpc_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) - return dev_err_probe(dev, irq, "Failed to get irq\n"); + return irq; /* Assign TCPCI feature and ops */ priv->tcpci_data.auto_discharge_disconnect = 1; diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 829d75ebab42..5639b9a1e0bf 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -3253,23 +3253,12 @@ static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo, return ret; } -#define min_pps_apdo_current(x, y) \ - min(pdo_pps_apdo_max_current(x), pdo_pps_apdo_max_current(y)) - static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port) { - unsigned int i, j, max_mw = 0, max_mv = 0; - unsigned int min_src_mv, max_src_mv, src_ma, src_mw; - unsigned int min_snk_mv, max_snk_mv; - unsigned int max_op_mv; - u32 pdo, src, snk; - unsigned int src_pdo = 0, snk_pdo = 0; + unsigned int i, src_ma, max_temp_mw = 0, max_op_ma, op_mw; + unsigned int src_pdo = 0; + u32 pdo, src; - /* - * Select the source PPS APDO providing the most power while staying - * within the board's limits. We skip the first PDO as this is always - * 5V 3A. - */ for (i = 1; i < port->nr_source_caps; ++i) { pdo = port->source_caps[i]; @@ -3280,54 +3269,17 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port) continue; } - min_src_mv = pdo_pps_apdo_min_voltage(pdo); - max_src_mv = pdo_pps_apdo_max_voltage(pdo); - src_ma = pdo_pps_apdo_max_current(pdo); - src_mw = (src_ma * max_src_mv) / 1000; - - /* - * Now search through the sink PDOs to find a matching - * PPS APDO. Again skip the first sink PDO as this will - * always be 5V 3A. - */ - for (j = 1; j < port->nr_snk_pdo; j++) { - pdo = port->snk_pdo[j]; - - switch (pdo_type(pdo)) { - case PDO_TYPE_APDO: - if (pdo_apdo_type(pdo) != APDO_TYPE_PPS) { - tcpm_log(port, - "Not PPS APDO (sink), ignoring"); - continue; - } - - min_snk_mv = - pdo_pps_apdo_min_voltage(pdo); - max_snk_mv = - pdo_pps_apdo_max_voltage(pdo); - break; - default: - tcpm_log(port, - "Not APDO type (sink), ignoring"); - continue; - } + if (port->pps_data.req_out_volt > pdo_pps_apdo_max_voltage(pdo) || + port->pps_data.req_out_volt < pdo_pps_apdo_min_voltage(pdo)) + continue; - if (min_src_mv <= max_snk_mv && - max_src_mv >= min_snk_mv) { - max_op_mv = min(max_src_mv, max_snk_mv); - src_mw = (max_op_mv * src_ma) / 1000; - /* Prefer higher voltages if available */ - if ((src_mw == max_mw && - max_op_mv > max_mv) || - src_mw > max_mw) { - src_pdo = i; - snk_pdo = j; - max_mw = src_mw; - max_mv = max_op_mv; - } - } + src_ma = pdo_pps_apdo_max_current(pdo); + max_op_ma = min(src_ma, port->pps_data.req_op_curr); + op_mw = max_op_ma * port->pps_data.req_out_volt / 1000; + if (op_mw > max_temp_mw) { + src_pdo = i; + max_temp_mw = op_mw; } - break; default: tcpm_log(port, "Not APDO type (source), ignoring"); @@ -3337,16 +3289,10 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port) if (src_pdo) { src = port->source_caps[src_pdo]; - snk = port->snk_pdo[snk_pdo]; - - port->pps_data.req_min_volt = max(pdo_pps_apdo_min_voltage(src), - pdo_pps_apdo_min_voltage(snk)); - port->pps_data.req_max_volt = min(pdo_pps_apdo_max_voltage(src), - pdo_pps_apdo_max_voltage(snk)); - port->pps_data.req_max_curr = min_pps_apdo_current(src, snk); - port->pps_data.req_out_volt = min(port->pps_data.req_max_volt, - max(port->pps_data.req_min_volt, - port->pps_data.req_out_volt)); + + port->pps_data.req_min_volt = pdo_pps_apdo_min_voltage(src); + port->pps_data.req_max_volt = pdo_pps_apdo_max_voltage(src); + port->pps_data.req_max_curr = pdo_pps_apdo_max_current(src); port->pps_data.req_op_curr = min(port->pps_data.req_max_curr, port->pps_data.req_op_curr); } @@ -3464,32 +3410,16 @@ static int tcpm_pd_send_request(struct tcpm_port *port) static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo) { unsigned int out_mv, op_ma, op_mw, max_mv, max_ma, flags; - enum pd_pdo_type type; unsigned int src_pdo_index; - u32 pdo; src_pdo_index = tcpm_pd_select_pps_apdo(port); if (!src_pdo_index) return -EOPNOTSUPP; - pdo = port->source_caps[src_pdo_index]; - type = pdo_type(pdo); - - switch (type) { - case PDO_TYPE_APDO: - if (pdo_apdo_type(pdo) != APDO_TYPE_PPS) { - tcpm_log(port, "Invalid APDO selected!"); - return -EINVAL; - } - max_mv = port->pps_data.req_max_volt; - max_ma = port->pps_data.req_max_curr; - out_mv = port->pps_data.req_out_volt; - op_ma = port->pps_data.req_op_curr; - break; - default: - tcpm_log(port, "Invalid PDO selected!"); - return -EINVAL; - } + max_mv = port->pps_data.req_max_volt; + max_ma = port->pps_data.req_max_curr; + out_mv = port->pps_data.req_out_volt; + op_ma = port->pps_data.req_op_curr; flags = RDO_USB_COMM | RDO_NO_SUSPEND; @@ -4301,7 +4231,9 @@ static void run_state_machine(struct tcpm_port *port) if (port->slow_charger_loop && (current_lim > PD_P_SNK_STDBY_MW / 5)) current_lim = PD_P_SNK_STDBY_MW / 5; tcpm_set_current_limit(port, current_lim, 5000); - tcpm_set_charge(port, true); + /* Not sink vbus if operational current is 0mA */ + tcpm_set_charge(port, !!pdo_max_current(port->snk_pdo[0])); + if (!port->pd_supported) tcpm_set_state(port, SNK_READY, 0); else @@ -4582,7 +4514,8 @@ static void run_state_machine(struct tcpm_port *port) tcpm_set_current_limit(port, tcpm_get_current_limit(port), 5000); - tcpm_set_charge(port, true); + /* Not sink vbus if operational current is 0mA */ + tcpm_set_charge(port, !!pdo_max_current(port->snk_pdo[0])); } if (port->ams == HARD_RESET) tcpm_ams_finish(port); @@ -5349,6 +5282,10 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port) /* Do nothing, vbus drop expected */ break; + case SNK_HARD_RESET_WAIT_VBUS: + /* Do nothing, its OK to receive vbus off events */ + break; + default: if (port->pwr_role == TYPEC_SINK && port->attached) tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port)); @@ -5395,6 +5332,9 @@ static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port) case SNK_DEBOUNCED: /*Do nothing, still waiting for VSAFE5V for connect */ break; + case SNK_HARD_RESET_WAIT_VBUS: + /* Do nothing, its OK to receive vbus off events */ + break; default: if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled) tcpm_set_state(port, SNK_UNATTACHED, 0); @@ -5882,12 +5822,6 @@ static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 req_out_volt) goto port_unlock; } - if (req_out_volt < port->pps_data.min_volt || - req_out_volt > port->pps_data.max_volt) { - ret = -EINVAL; - goto port_unlock; - } - target_mw = (port->current_limit * req_out_volt) / 1000; if (target_mw < port->operating_snk_mw) { ret = -EINVAL; @@ -6440,11 +6374,7 @@ static int tcpm_psy_set_prop(struct power_supply *psy, ret = tcpm_psy_set_online(port, val); break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: - if (val->intval < port->pps_data.min_volt * 1000 || - val->intval > port->pps_data.max_volt * 1000) - ret = -EINVAL; - else - ret = tcpm_pps_set_out_volt(port, val->intval / 1000); + ret = tcpm_pps_set_out_volt(port, val->intval / 1000); break; case POWER_SUPPLY_PROP_CURRENT_NOW: if (val->intval > port->pps_data.max_curr * 1000) diff --git a/drivers/usb/typec/ucsi/Kconfig b/drivers/usb/typec/ucsi/Kconfig index b3bb0191987e..bdcb1764cfae 100644 --- a/drivers/usb/typec/ucsi/Kconfig +++ b/drivers/usb/typec/ucsi/Kconfig @@ -4,6 +4,7 @@ config TYPEC_UCSI tristate "USB Type-C Connector System Software Interface driver" depends on !CPU_BIG_ENDIAN depends on USB_ROLE_SWITCH || !USB_ROLE_SWITCH + select USB_COMMON if DEBUG_FS help USB Type-C Connector System Software Interface (UCSI) is a specification for an interface that allows the operating system to diff --git a/drivers/usb/typec/ucsi/Makefile b/drivers/usb/typec/ucsi/Makefile index 77f09e136956..b4679f94696b 100644 --- a/drivers/usb/typec/ucsi/Makefile +++ b/drivers/usb/typec/ucsi/Makefile @@ -5,6 +5,8 @@ obj-$(CONFIG_TYPEC_UCSI) += typec_ucsi.o typec_ucsi-y := ucsi.o +typec_ucsi-$(CONFIG_DEBUG_FS) += debugfs.o + typec_ucsi-$(CONFIG_TRACING) += trace.o ifneq ($(CONFIG_POWER_SUPPLY),) diff --git a/drivers/usb/typec/ucsi/debugfs.c b/drivers/usb/typec/ucsi/debugfs.c new file mode 100644 index 000000000000..0c7bf88d4a7f --- /dev/null +++ b/drivers/usb/typec/ucsi/debugfs.c @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * UCSI debugfs interface + * + * Copyright (C) 2023 Intel Corporation + * + * Authors: Rajaram Regupathy <[email protected]> + * Gopal Saranya <[email protected]> + */ +#include <linux/debugfs.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/usb.h> + +#include <asm/errno.h> + +#include "ucsi.h" + +static struct dentry *ucsi_debugfs_root; + +static int ucsi_cmd(void *data, u64 val) +{ + struct ucsi *ucsi = data; + int ret; + + memset(&ucsi->debugfs->response, 0, sizeof(ucsi->debugfs->response)); + ucsi->debugfs->status = 0; + + switch (UCSI_COMMAND(val)) { + case UCSI_SET_UOM: + case UCSI_SET_UOR: + case UCSI_SET_PDR: + case UCSI_CONNECTOR_RESET: + ret = ucsi_send_command(ucsi, val, NULL, 0); + break; + case UCSI_GET_CAPABILITY: + case UCSI_GET_CONNECTOR_CAPABILITY: + case UCSI_GET_ALTERNATE_MODES: + case UCSI_GET_CURRENT_CAM: + case UCSI_GET_PDOS: + case UCSI_GET_CABLE_PROPERTY: + case UCSI_GET_CONNECTOR_STATUS: + ret = ucsi_send_command(ucsi, val, + &ucsi->debugfs->response, + sizeof(ucsi->debugfs->response)); + break; + default: + ret = -EOPNOTSUPP; + } + + if (ret < 0) { + ucsi->debugfs->status = ret; + return ret; + } + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(ucsi_cmd_fops, NULL, ucsi_cmd, "0x%llx\n"); + +static int ucsi_resp_show(struct seq_file *s, void *not_used) +{ + struct ucsi *ucsi = s->private; + + if (ucsi->debugfs->status) + return ucsi->debugfs->status; + + seq_printf(s, "0x%016llx%016llx\n", ucsi->debugfs->response.high, + ucsi->debugfs->response.low); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(ucsi_resp); + +void ucsi_debugfs_register(struct ucsi *ucsi) +{ + ucsi->debugfs = kzalloc(sizeof(*ucsi->debugfs), GFP_KERNEL); + if (!ucsi->debugfs) + return; + + ucsi->debugfs->dentry = debugfs_create_dir(dev_name(ucsi->dev), ucsi_debugfs_root); + debugfs_create_file("command", 0200, ucsi->debugfs->dentry, ucsi, &ucsi_cmd_fops); + debugfs_create_file("response", 0400, ucsi->debugfs->dentry, ucsi, &ucsi_resp_fops); +} + +void ucsi_debugfs_unregister(struct ucsi *ucsi) +{ + debugfs_remove_recursive(ucsi->debugfs->dentry); + kfree(ucsi->debugfs); +} + +void ucsi_debugfs_init(void) +{ + ucsi_debugfs_root = debugfs_create_dir("ucsi", usb_debug_root); +} + +void ucsi_debugfs_exit(void) +{ + debugfs_remove(ucsi_debugfs_root); +} diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index f6901319639d..c6dfe3dff346 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -1530,6 +1530,7 @@ EXPORT_SYMBOL_GPL(ucsi_create); */ void ucsi_destroy(struct ucsi *ucsi) { + ucsi_debugfs_unregister(ucsi); kfree(ucsi); } EXPORT_SYMBOL_GPL(ucsi_destroy); @@ -1552,6 +1553,7 @@ int ucsi_register(struct ucsi *ucsi) queue_delayed_work(system_long_wq, &ucsi->work, 0); + ucsi_debugfs_register(ucsi); return 0; } EXPORT_SYMBOL_GPL(ucsi_register); @@ -1611,6 +1613,19 @@ void ucsi_unregister(struct ucsi *ucsi) } EXPORT_SYMBOL_GPL(ucsi_unregister); +static int __init ucsi_module_init(void) +{ + ucsi_debugfs_init(); + return 0; +} +module_init(ucsi_module_init); + +static void __exit ucsi_module_exit(void) +{ + ucsi_debugfs_exit(); +} +module_exit(ucsi_module_exit); + MODULE_AUTHOR("Heikki Krogerus <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("USB Type-C Connector System Software Interface driver"); diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h index c09af859f573..474315a72c77 100644 --- a/drivers/usb/typec/ucsi/ucsi.h +++ b/drivers/usb/typec/ucsi/ucsi.h @@ -15,6 +15,7 @@ struct ucsi; struct ucsi_altmode; +struct dentry; /* UCSI offsets (Bytes) */ #define UCSI_VERSION 0 @@ -277,6 +278,16 @@ struct ucsi_connector_status { /* -------------------------------------------------------------------------- */ +struct ucsi_debugfs_entry { + u64 command; + struct ucsi_data { + u64 low; + u64 high; + } response; + u32 status; + struct dentry *dentry; +}; + struct ucsi { u16 version; struct device *dev; @@ -286,6 +297,7 @@ struct ucsi { struct ucsi_capability cap; struct ucsi_connector *connector; + struct ucsi_debugfs_entry *debugfs; struct work_struct resume_work; struct delayed_work work; @@ -388,6 +400,18 @@ static inline void ucsi_displayport_remove_partner(struct typec_altmode *adev) { } #endif /* CONFIG_TYPEC_DP_ALTMODE */ +#ifdef CONFIG_DEBUG_FS +void ucsi_debugfs_init(void); +void ucsi_debugfs_exit(void); +void ucsi_debugfs_register(struct ucsi *ucsi); +void ucsi_debugfs_unregister(struct ucsi *ucsi); +#else +static inline void ucsi_debugfs_init(void) { } +static inline void ucsi_debugfs_exit(void) { } +static inline void ucsi_debugfs_register(struct ucsi *ucsi) { } +static inline void ucsi_debugfs_unregister(struct ucsi *ucsi) { } +#endif /* CONFIG_DEBUG_FS */ + /* * NVIDIA VirtualLink (svid 0x955) has two altmode. VirtualLink * DP mode with vdo=0x1 and NVIDIA test mode with vdo=0x3 diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c index 1fe9cb5b6bd9..bb1854b3311d 100644 --- a/drivers/usb/typec/ucsi/ucsi_glink.c +++ b/drivers/usb/typec/ucsi/ucsi_glink.c @@ -5,7 +5,6 @@ */ #include <linux/auxiliary_bus.h> #include <linux/module.h> -#include <linux/of_device.h> #include <linux/mutex.h> #include <linux/property.h> #include <linux/soc/qcom/pdr.h> diff --git a/drivers/usb/usbip/vudc_dev.c b/drivers/usb/usbip/vudc_dev.c index 2bc428f2e261..44b04c54c086 100644 --- a/drivers/usb/usbip/vudc_dev.c +++ b/drivers/usb/usbip/vudc_dev.c @@ -489,11 +489,11 @@ static void vudc_device_unusable(struct usbip_device *ud) struct vudc_device *alloc_vudc_device(int devid) { - struct vudc_device *udc_dev = NULL; + struct vudc_device *udc_dev; udc_dev = kzalloc(sizeof(*udc_dev), GFP_KERNEL); if (!udc_dev) - goto out; + return NULL; INIT_LIST_HEAD(&udc_dev->dev_entry); @@ -503,7 +503,6 @@ struct vudc_device *alloc_vudc_device(int devid) udc_dev = NULL; } -out: return udc_dev; } diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 030ab44fce18..82324c327a50 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -441,13 +441,23 @@ void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache, u64 num_bytes) { struct btrfs_caching_control *caching_ctl; + int progress; caching_ctl = btrfs_get_caching_control(cache); if (!caching_ctl) return; + /* + * We've already failed to allocate from this block group, so even if + * there's enough space in the block group it isn't contiguous enough to + * allow for an allocation, so wait for at least the next wakeup tick, + * or for the thing to be done. + */ + progress = atomic_read(&caching_ctl->progress); + wait_event(caching_ctl->wait, btrfs_block_group_done(cache) || - (cache->free_space_ctl->free_space >= num_bytes)); + (progress != atomic_read(&caching_ctl->progress) && + (cache->free_space_ctl->free_space >= num_bytes))); btrfs_put_caching_control(caching_ctl); } @@ -802,8 +812,10 @@ next: if (total_found > CACHING_CTL_WAKE_UP) { total_found = 0; - if (wakeup) + if (wakeup) { + atomic_inc(&caching_ctl->progress); wake_up(&caching_ctl->wait); + } } } path->slots[0]++; @@ -910,6 +922,7 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait) init_waitqueue_head(&caching_ctl->wait); caching_ctl->block_group = cache; refcount_set(&caching_ctl->count, 2); + atomic_set(&caching_ctl->progress, 0); btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL); spin_lock(&cache->lock); diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index aba5dff66c19..74b61e663028 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -90,6 +90,8 @@ struct btrfs_caching_control { wait_queue_head_t wait; struct btrfs_work work; struct btrfs_block_group *block_group; + /* Track progress of caching during allocation. */ + atomic_t progress; refcount_t count; }; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 9b9914e5f03d..a9a2c5446c18 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1103,7 +1103,8 @@ static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev) btrfs_drew_lock_init(&root->snapshot_lock); if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID && - !btrfs_is_data_reloc_root(root)) { + !btrfs_is_data_reloc_root(root) && + is_fstree(root->root_key.objectid)) { set_bit(BTRFS_ROOT_SHAREABLE, &root->state); btrfs_check_and_init_root_item(&root->root_item); } @@ -1300,6 +1301,16 @@ static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info, root = btrfs_get_global_root(fs_info, objectid); if (root) return root; + + /* + * If we're called for non-subvolume trees, and above function didn't + * find one, do not try to read it from disk. + * + * This is namely for free-space-tree and quota tree, which can change + * at runtime and should only be grabbed from fs_info. + */ + if (!is_fstree(objectid) && objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) + return ERR_PTR(-ENOENT); again: root = btrfs_lookup_fs_root(fs_info, objectid); if (root) { diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 911908ea5f6f..f396a9afa403 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4310,8 +4310,11 @@ have_block_group: ret = 0; } - if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) + if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) { + if (!cache_block_group_error) + cache_block_group_error = -EIO; goto loop; + } if (!find_free_extent_check_size_class(ffe_ctl, block_group)) goto loop; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index a91d5ad27984..ca765d62324f 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2145,6 +2145,12 @@ retry: continue; } + if (!folio_test_dirty(folio)) { + /* Someone wrote it for us. */ + folio_unlock(folio); + continue; + } + if (wbc->sync_mode != WB_SYNC_NONE) { if (folio_test_writeback(folio)) submit_write_bio(bio_ctrl, 0); @@ -2164,11 +2170,12 @@ retry: } /* - * the filesystem may choose to bump up nr_to_write. + * The filesystem may choose to bump up nr_to_write. * We have to make sure to honor the new nr_to_write - * at any time + * at any time. */ - nr_to_write_done = wbc->nr_to_write <= 0; + nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE && + wbc->nr_to_write <= 0); } folio_batch_release(&fbatch); cond_resched(); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 49cef61f6a39..9055e19b01ef 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1654,8 +1654,6 @@ out_unlock: clear_bits, page_ops); start += cur_alloc_size; - if (start >= end) - return ret; } /* @@ -1664,9 +1662,11 @@ out_unlock: * space_info's bytes_may_use counter, reserved in * btrfs_check_data_free_space(). */ - extent_clear_unlock_delalloc(inode, start, end, locked_page, - clear_bits | EXTENT_CLEAR_DATA_RESV, - page_ops); + if (start < end) { + clear_bits |= EXTENT_CLEAR_DATA_RESV; + extent_clear_unlock_delalloc(inode, start, end, locked_page, + clear_bits, page_ops); + } return ret; } diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 25a3361caedc..46c3c1d57266 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1916,7 +1916,39 @@ again: err = PTR_ERR(root); break; } - ASSERT(root->reloc_root == reloc_root); + + if (unlikely(root->reloc_root != reloc_root)) { + if (root->reloc_root) { + btrfs_err(fs_info, +"reloc tree mismatch, root %lld has reloc root key (%lld %u %llu) gen %llu, expect reloc root key (%lld %u %llu) gen %llu", + root->root_key.objectid, + root->reloc_root->root_key.objectid, + root->reloc_root->root_key.type, + root->reloc_root->root_key.offset, + btrfs_root_generation( + &root->reloc_root->root_item), + reloc_root->root_key.objectid, + reloc_root->root_key.type, + reloc_root->root_key.offset, + btrfs_root_generation( + &reloc_root->root_item)); + } else { + btrfs_err(fs_info, +"reloc tree mismatch, root %lld has no reloc root, expect reloc root key (%lld %u %llu) gen %llu", + root->root_key.objectid, + reloc_root->root_key.objectid, + reloc_root->root_key.type, + reloc_root->root_key.offset, + btrfs_root_generation( + &reloc_root->root_item)); + } + list_add(&reloc_root->root_list, &reloc_roots); + btrfs_put_root(root); + btrfs_abort_transaction(trans, -EUCLEAN); + if (!err) + err = -EUCLEAN; + break; + } /* * set reference count to 1, so btrfs_recover_relocation @@ -1989,7 +2021,7 @@ again: root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false); if (btrfs_root_refs(&reloc_root->root_item) > 0) { - if (IS_ERR(root)) { + if (WARN_ON(IS_ERR(root))) { /* * For recovery we read the fs roots on mount, * and if we didn't find the root then we marked @@ -1998,17 +2030,14 @@ again: * memory. However there's no reason we can't * handle the error properly here just in case. */ - ASSERT(0); ret = PTR_ERR(root); goto out; } - if (root->reloc_root != reloc_root) { + if (WARN_ON(root->reloc_root != reloc_root)) { /* - * This is actually impossible without something - * going really wrong (like weird race condition - * or cosmic rays). + * This can happen if on-disk metadata has some + * corruption, e.g. bad reloc tree key offset. */ - ASSERT(0); ret = -EINVAL; goto out; } diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 038dfa8f1788..ab08a0b01311 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -446,6 +446,20 @@ static int check_root_key(struct extent_buffer *leaf, struct btrfs_key *key, btrfs_item_key_to_cpu(leaf, &item_key, slot); is_root_item = (item_key.type == BTRFS_ROOT_ITEM_KEY); + /* + * Bad rootid for reloc trees. + * + * Reloc trees are only for subvolume trees, other trees only need + * to be COWed to be relocated. + */ + if (unlikely(is_root_item && key->objectid == BTRFS_TREE_RELOC_OBJECTID && + !is_fstree(key->offset))) { + generic_err(leaf, slot, + "invalid reloc tree for root %lld, root id is not a subvolume tree", + key->offset); + return -EUCLEAN; + } + /* No such tree id */ if (unlikely(key->objectid == 0)) { if (is_root_item) diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 4a2b39d9a61a..bdcffb04513f 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -2019,9 +2019,10 @@ unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn) } } +WRAP_DIR_ITER(ceph_readdir) // FIXME! const struct file_operations ceph_dir_fops = { .read = ceph_read_dir, - .iterate = ceph_readdir, + .iterate_shared = shared_ceph_readdir, .llseek = ceph_dir_llseek, .open = ceph_open, .release = ceph_release, @@ -2033,7 +2034,7 @@ const struct file_operations ceph_dir_fops = { }; const struct file_operations ceph_snapdir_fops = { - .iterate = ceph_readdir, + .iterate_shared = shared_ceph_readdir, .llseek = ceph_dir_llseek, .open = ceph_open, .release = ceph_release, diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 66048a86c480..5fb367b1d4b0 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -4764,7 +4764,7 @@ static void delayed_work(struct work_struct *work) dout("mdsc delayed_work\n"); - if (mdsc->stopping) + if (mdsc->stopping >= CEPH_MDSC_STOPPING_FLUSHED) return; mutex_lock(&mdsc->mutex); @@ -4943,7 +4943,7 @@ void send_flush_mdlog(struct ceph_mds_session *s) void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc) { dout("pre_umount\n"); - mdsc->stopping = 1; + mdsc->stopping = CEPH_MDSC_STOPPING_BEGIN; ceph_mdsc_iterate_sessions(mdsc, send_flush_mdlog, true); ceph_mdsc_iterate_sessions(mdsc, lock_unlock_session, false); diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index 724307ff89cd..86d2965e68a1 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -380,6 +380,11 @@ struct cap_wait { int want; }; +enum { + CEPH_MDSC_STOPPING_BEGIN = 1, + CEPH_MDSC_STOPPING_FLUSHED = 2, +}; + /* * mds client state */ diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 3fc48b43cab0..a5f52013314d 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -1374,6 +1374,16 @@ static void ceph_kill_sb(struct super_block *s) ceph_mdsc_pre_umount(fsc->mdsc); flush_fs_workqueues(fsc); + /* + * Though the kill_anon_super() will finally trigger the + * sync_filesystem() anyway, we still need to do it here + * and then bump the stage of shutdown to stop the work + * queue as earlier as possible. + */ + sync_filesystem(s); + + fsc->mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHED; + kill_anon_super(s); fsc->client->extra_mon_dispatch = NULL; diff --git a/fs/coda/dir.c b/fs/coda/dir.c index 8450b1bd354b..1b960de2bf39 100644 --- a/fs/coda/dir.c +++ b/fs/coda/dir.c @@ -429,21 +429,14 @@ static int coda_readdir(struct file *coda_file, struct dir_context *ctx) cfi = coda_ftoc(coda_file); host_file = cfi->cfi_container; - if (host_file->f_op->iterate || host_file->f_op->iterate_shared) { + if (host_file->f_op->iterate_shared) { struct inode *host_inode = file_inode(host_file); ret = -ENOENT; if (!IS_DEADDIR(host_inode)) { - if (host_file->f_op->iterate_shared) { - inode_lock_shared(host_inode); - ret = host_file->f_op->iterate_shared(host_file, ctx); - file_accessed(host_file); - inode_unlock_shared(host_inode); - } else { - inode_lock(host_inode); - ret = host_file->f_op->iterate(host_file, ctx); - file_accessed(host_file); - inode_unlock(host_inode); - } + inode_lock_shared(host_inode); + ret = host_file->f_op->iterate_shared(host_file, ctx); + file_accessed(host_file); + inode_unlock_shared(host_inode); } return ret; } @@ -585,10 +578,11 @@ const struct inode_operations coda_dir_inode_operations = { .setattr = coda_setattr, }; +WRAP_DIR_ITER(coda_readdir) // FIXME! const struct file_operations coda_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = coda_readdir, + .iterate_shared = shared_coda_readdir, .open = coda_open, .release = coda_release, .fsync = coda_fsync, diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 9d6a3c6158bd..566f68ddfa36 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -889,8 +889,6 @@ static void erofs_kill_sb(struct super_block *sb) { struct erofs_sb_info *sbi; - WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC); - /* pseudo mount for anon inodes */ if (sb->s_flags & SB_KERNMOUNT) { kill_anon_super(sb); diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index b69d89a11dd0..de4f12152b62 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1144,10 +1144,11 @@ static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be, struct z_erofs_bvec *bvec) { struct z_erofs_bvec_item *item; + unsigned int pgnr; - if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK)) { - unsigned int pgnr; - + if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK) && + (bvec->end == PAGE_SIZE || + bvec->offset + bvec->end == be->pcl->length)) { pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT; DBG_BUGON(pgnr >= be->nr_pages); if (!be->decompressed_pages[pgnr]) { diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c index 9f42f25fab92..e918decb3735 100644 --- a/fs/exfat/balloc.c +++ b/fs/exfat/balloc.c @@ -69,7 +69,7 @@ static int exfat_allocate_bitmap(struct super_block *sb, } sbi->map_sectors = ((need_map_size - 1) >> (sb->s_blocksize_bits)) + 1; - sbi->vol_amap = kmalloc_array(sbi->map_sectors, + sbi->vol_amap = kvmalloc_array(sbi->map_sectors, sizeof(struct buffer_head *), GFP_KERNEL); if (!sbi->vol_amap) return -ENOMEM; @@ -84,7 +84,7 @@ static int exfat_allocate_bitmap(struct super_block *sb, while (j < i) brelse(sbi->vol_amap[j++]); - kfree(sbi->vol_amap); + kvfree(sbi->vol_amap); sbi->vol_amap = NULL; return -EIO; } @@ -138,7 +138,7 @@ void exfat_free_bitmap(struct exfat_sb_info *sbi) for (i = 0; i < sbi->map_sectors; i++) __brelse(sbi->vol_amap[i]); - kfree(sbi->vol_amap); + kvfree(sbi->vol_amap); } int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync) diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c index 957574180a5e..e1586bba6d86 100644 --- a/fs/exfat/dir.c +++ b/fs/exfat/dir.c @@ -34,6 +34,7 @@ static int exfat_get_uniname_from_ext_entry(struct super_block *sb, { int i, err; struct exfat_entry_set_cache es; + unsigned int uni_len = 0, len; err = exfat_get_dentry_set(&es, sb, p_dir, entry, ES_ALL_ENTRIES); if (err) @@ -52,7 +53,10 @@ static int exfat_get_uniname_from_ext_entry(struct super_block *sb, if (exfat_get_entry_type(ep) != TYPE_EXTEND) break; - exfat_extract_uni_name(ep, uniname); + len = exfat_extract_uni_name(ep, uniname); + uni_len += len; + if (len != EXFAT_FILE_NAME_LEN || uni_len >= MAX_NAME_LENGTH) + break; uniname += EXFAT_FILE_NAME_LEN; } @@ -214,7 +218,10 @@ static void exfat_free_namebuf(struct exfat_dentry_namebuf *nb) exfat_init_namebuf(nb); } -/* skip iterating emit_dots when dir is empty */ +/* + * Before calling dir_emit*(), sbi->s_lock should be released + * because page fault can occur in dir_emit*(). + */ #define ITER_POS_FILLED_DOTS (2) static int exfat_iterate(struct file *file, struct dir_context *ctx) { @@ -229,11 +236,10 @@ static int exfat_iterate(struct file *file, struct dir_context *ctx) int err = 0, fake_offset = 0; exfat_init_namebuf(nb); - mutex_lock(&EXFAT_SB(sb)->s_lock); cpos = ctx->pos; if (!dir_emit_dots(file, ctx)) - goto unlock; + goto out; if (ctx->pos == ITER_POS_FILLED_DOTS) { cpos = 0; @@ -245,16 +251,18 @@ static int exfat_iterate(struct file *file, struct dir_context *ctx) /* name buffer should be allocated before use */ err = exfat_alloc_namebuf(nb); if (err) - goto unlock; + goto out; get_new: + mutex_lock(&EXFAT_SB(sb)->s_lock); + if (ei->flags == ALLOC_NO_FAT_CHAIN && cpos >= i_size_read(inode)) goto end_of_dir; err = exfat_readdir(inode, &cpos, &de); if (err) { /* - * At least we tried to read a sector. Move cpos to next sector - * position (should be aligned). + * At least we tried to read a sector. + * Move cpos to next sector position (should be aligned). */ if (err == -EIO) { cpos += 1 << (sb->s_blocksize_bits); @@ -277,16 +285,10 @@ get_new: inum = iunique(sb, EXFAT_ROOT_INO); } - /* - * Before calling dir_emit(), sb_lock should be released. - * Because page fault can occur in dir_emit() when the size - * of buffer given from user is larger than one page size. - */ mutex_unlock(&EXFAT_SB(sb)->s_lock); if (!dir_emit(ctx, nb->lfn, strlen(nb->lfn), inum, (de.attr & ATTR_SUBDIR) ? DT_DIR : DT_REG)) - goto out_unlocked; - mutex_lock(&EXFAT_SB(sb)->s_lock); + goto out; ctx->pos = cpos; goto get_new; @@ -294,9 +296,8 @@ end_of_dir: if (!cpos && fake_offset) cpos = ITER_POS_FILLED_DOTS; ctx->pos = cpos; -unlock: mutex_unlock(&EXFAT_SB(sb)->s_lock); -out_unlocked: +out: /* * To improve performance, free namebuf after unlock sb_lock. * If namebuf is not allocated, this function do nothing @@ -305,10 +306,11 @@ out_unlocked: return err; } +WRAP_DIR_ITER(exfat_iterate) // FIXME! const struct file_operations exfat_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = exfat_iterate, + .iterate_shared = shared_exfat_iterate, .unlocked_ioctl = exfat_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = exfat_compat_ioctl, @@ -1079,7 +1081,8 @@ rewind: if (entry_type == TYPE_EXTEND) { unsigned short entry_uniname[16], unichar; - if (step != DIRENT_STEP_NAME) { + if (step != DIRENT_STEP_NAME || + name_len >= MAX_NAME_LENGTH) { step = DIRENT_STEP_FILE; continue; } diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c index 40e624cf7e92..d1dbe47c7975 100644 --- a/fs/exportfs/expfs.c +++ b/fs/exportfs/expfs.c @@ -315,7 +315,7 @@ static int get_name(const struct path *path, char *name, struct dentry *child) goto out; error = -EINVAL; - if (!file->f_op->iterate && !file->f_op->iterate_shared) + if (!file->f_op->iterate_shared) goto out_close; buffer.sequence = 0; diff --git a/fs/file.c b/fs/file.c index 35c62b54c9d6..3fd003a8604f 100644 --- a/fs/file.c +++ b/fs/file.c @@ -1036,12 +1036,28 @@ unsigned long __fdget_raw(unsigned int fd) return __fget_light(fd, 0); } +/* + * Try to avoid f_pos locking. We only need it if the + * file is marked for FMODE_ATOMIC_POS, and it can be + * accessed multiple ways. + * + * Always do it for directories, because pidfd_getfd() + * can make a file accessible even if it otherwise would + * not be, and for directories this is a correctness + * issue, not a "POSIX requirement". + */ +static inline bool file_needs_f_pos_lock(struct file *file) +{ + return (file->f_mode & FMODE_ATOMIC_POS) && + (file_count(file) > 1 || file->f_op->iterate_shared); +} + unsigned long __fdget_pos(unsigned int fd) { unsigned long v = __fdget(fd); struct file *file = (struct file *)(v & ~3); - if (file && (file->f_mode & FMODE_ATOMIC_POS)) { + if (file && file_needs_f_pos_lock(file)) { v |= FDPUT_POS_UNLOCK; mutex_lock(&file->f_pos_lock); } diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 1bf3c4453516..b43fa8b8fc05 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -1578,7 +1578,7 @@ const struct file_operations gfs2_file_fops = { .fsync = gfs2_fsync, .lock = gfs2_lock, .flock = gfs2_flock, - .splice_read = filemap_splice_read, + .splice_read = copy_splice_read, .splice_write = gfs2_file_splice_write, .setlease = simple_nosetlease, .fallocate = gfs2_fallocate, @@ -1609,7 +1609,7 @@ const struct file_operations gfs2_file_fops_nolock = { .open = gfs2_open, .release = gfs2_release, .fsync = gfs2_fsync, - .splice_read = filemap_splice_read, + .splice_read = copy_splice_read, .splice_write = gfs2_file_splice_write, .setlease = generic_setlease, .fallocate = gfs2_fallocate, diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c index ec1631257978..7e835be7032d 100644 --- a/fs/gfs2/trans.c +++ b/fs/gfs2/trans.c @@ -230,9 +230,11 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct super_block *sb = sdp->sd_vfs; struct gfs2_bufdata *bd; struct gfs2_meta_header *mh; struct gfs2_trans *tr = current->journal_info; + bool withdraw = false; lock_buffer(bh); if (buffer_pinned(bh)) { @@ -266,13 +268,15 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) (unsigned long long)bd->bd_bh->b_blocknr); BUG(); } - if (unlikely(test_bit(SDF_FROZEN, &sdp->sd_flags))) { - fs_info(sdp, "GFS2:adding buf while frozen\n"); - gfs2_assert_withdraw(sdp, 0); - } if (unlikely(gfs2_withdrawn(sdp))) { fs_info(sdp, "GFS2:adding buf while withdrawn! 0x%llx\n", (unsigned long long)bd->bd_bh->b_blocknr); + goto out_unlock; + } + if (unlikely(sb->s_writers.frozen == SB_FREEZE_COMPLETE)) { + fs_info(sdp, "GFS2:adding buf while frozen\n"); + withdraw = true; + goto out_unlock; } gfs2_pin(sdp, bd->bd_bh); mh->__pad0 = cpu_to_be64(0); @@ -281,6 +285,8 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) tr->tr_num_buf_new++; out_unlock: gfs2_log_unlock(sdp); + if (withdraw) + gfs2_assert_withdraw(sdp, 0); out: unlock_buffer(bh); } diff --git a/fs/inode.c b/fs/inode.c index 8fefb69e1f84..67611a360031 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -16,7 +16,6 @@ #include <linux/fsnotify.h> #include <linux/mount.h> #include <linux/posix_acl.h> -#include <linux/prefetch.h> #include <linux/buffer_head.h> /* for inode_has_buffers */ #include <linux/ratelimit.h> #include <linux/list_lru.h> @@ -1041,8 +1040,6 @@ struct inode *new_inode(struct super_block *sb) { struct inode *inode; - spin_lock_prefetch(&sb->s_inode_list_lock); - inode = new_inode_pseudo(sb); if (inode) inode_sb_list_add(inode); diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index 9b030297aa64..e98ddb2b1cf2 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c @@ -1535,9 +1535,10 @@ const struct inode_operations jfs_dir_inode_operations = { #endif }; +WRAP_DIR_ITER(jfs_readdir) // FIXME! const struct file_operations jfs_dir_operations = { .read = generic_read_dir, - .iterate = jfs_readdir, + .iterate_shared = shared_jfs_readdir, .fsync = jfs_fsync, .unlocked_ioctl = jfs_ioctl, .compat_ioctl = compat_ptr_ioctl, diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 8a2321d19194..2c9074ab2315 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -956,10 +956,13 @@ nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf, last_page = page + (offset + sd->len - 1) / PAGE_SIZE; for (page += offset / PAGE_SIZE; page <= last_page; page++) { /* - * Skip page replacement when extending the contents - * of the current page. + * Skip page replacement when extending the contents of the + * current page. But note that we may get two zero_pages in a + * row from shmem. */ - if (page == *(rqstp->rq_next_page - 1)) + if (page == *(rqstp->rq_next_page - 1) && + offset_in_page(rqstp->rq_res.page_base + + rqstp->rq_res.page_len)) continue; if (unlikely(!svc_rqst_replace_page(rqstp, page))) return -EIO; diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index a8ce522ac747..35bc79305318 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -1101,9 +1101,17 @@ int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty) int __nilfs_mark_inode_dirty(struct inode *inode, int flags) { + struct the_nilfs *nilfs = inode->i_sb->s_fs_info; struct buffer_head *ibh; int err; + /* + * Do not dirty inodes after the log writer has been detached + * and its nilfs_root struct has been freed. + */ + if (unlikely(nilfs_purging(nilfs))) + return 0; + err = nilfs_load_inode_block(inode, &ibh); if (unlikely(err)) { nilfs_warn(inode->i_sb, diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index c2553024bd25..581691e4be49 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -2845,6 +2845,7 @@ void nilfs_detach_log_writer(struct super_block *sb) nilfs_segctor_destroy(nilfs->ns_writer); nilfs->ns_writer = NULL; } + set_nilfs_purging(nilfs); /* Force to free the list of dirty files */ spin_lock(&nilfs->ns_inode_lock); @@ -2857,4 +2858,5 @@ void nilfs_detach_log_writer(struct super_block *sb) up_write(&nilfs->ns_segctor_sem); nilfs_dispose_list(nilfs, &garbage_list, 1); + clear_nilfs_purging(nilfs); } diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h index 47c7dfbb7ea5..cd4ae1b8ae16 100644 --- a/fs/nilfs2/the_nilfs.h +++ b/fs/nilfs2/the_nilfs.h @@ -29,6 +29,7 @@ enum { THE_NILFS_DISCONTINUED, /* 'next' pointer chain has broken */ THE_NILFS_GC_RUNNING, /* gc process is running */ THE_NILFS_SB_DIRTY, /* super block is dirty */ + THE_NILFS_PURGING, /* disposing dirty files for cleanup */ }; /** @@ -208,6 +209,7 @@ THE_NILFS_FNS(INIT, init) THE_NILFS_FNS(DISCONTINUED, discontinued) THE_NILFS_FNS(GC_RUNNING, gc_running) THE_NILFS_FNS(SB_DIRTY, sb_dirty) +THE_NILFS_FNS(PURGING, purging) /* * Mount option operations diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c index 518c3a21a556..4596c90e7b7c 100644 --- a/fs/ntfs/dir.c +++ b/fs/ntfs/dir.c @@ -1525,10 +1525,11 @@ static int ntfs_dir_fsync(struct file *filp, loff_t start, loff_t end, #endif /* NTFS_RW */ +WRAP_DIR_ITER(ntfs_readdir) // FIXME! const struct file_operations ntfs_dir_ops = { .llseek = generic_file_llseek, /* Seek inside directory. */ .read = generic_read_dir, /* Return -EISDIR. */ - .iterate = ntfs_readdir, /* Read directory contents. */ + .iterate_shared = shared_ntfs_readdir, /* Read directory contents. */ #ifdef NTFS_RW .fsync = ntfs_dir_fsync, /* Sync a directory to disk. */ #endif /* NTFS_RW */ diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 91a194596552..bf2c17ea96a0 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -2793,10 +2793,11 @@ const struct file_operations ocfs2_fops = { .remap_file_range = ocfs2_remap_file_range, }; +WRAP_DIR_ITER(ocfs2_readdir) // FIXME! const struct file_operations ocfs2_dops = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = ocfs2_readdir, + .iterate_shared = shared_ocfs2_readdir, .fsync = ocfs2_sync_file, .release = ocfs2_dir_release, .open = ocfs2_dir_open, @@ -2842,7 +2843,7 @@ const struct file_operations ocfs2_fops_no_plocks = { const struct file_operations ocfs2_dops_no_plocks = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = ocfs2_readdir, + .iterate_shared = shared_ocfs2_readdir, .fsync = ocfs2_sync_file, .release = ocfs2_dir_release, .open = ocfs2_dir_open, diff --git a/fs/open.c b/fs/open.c index 0c55c8e7f837..e6ead0f19964 100644 --- a/fs/open.c +++ b/fs/open.c @@ -1322,7 +1322,7 @@ inline int build_open_flags(const struct open_how *how, struct open_flags *op) lookup_flags |= LOOKUP_IN_ROOT; if (how->resolve & RESOLVE_CACHED) { /* Don't bother even trying for create/truncate/tmpfile open */ - if (flags & (O_TRUNC | O_CREAT | O_TMPFILE)) + if (flags & (O_TRUNC | O_CREAT | __O_TMPFILE)) return -EAGAIN; lookup_flags |= LOOKUP_CACHED; } diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index ee5c4736480f..de39e067ae65 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -954,10 +954,11 @@ static int ovl_dir_open(struct inode *inode, struct file *file) return 0; } +WRAP_DIR_ITER(ovl_iterate) // FIXME! const struct file_operations ovl_dir_operations = { .read = generic_read_dir, .open = ovl_dir_open, - .iterate = ovl_iterate, + .iterate_shared = shared_ovl_iterate, .llseek = ovl_dir_llseek, .fsync = ovl_dir_fsync, .release = ovl_dir_release, diff --git a/fs/proc/base.c b/fs/proc/base.c index 05452c3b9872..9df3f4839662 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -2817,7 +2817,7 @@ static int proc_##LSM##_attr_dir_iterate(struct file *filp, \ \ static const struct file_operations proc_##LSM##_attr_dir_ops = { \ .read = generic_read_dir, \ - .iterate = proc_##LSM##_attr_dir_iterate, \ + .iterate_shared = proc_##LSM##_attr_dir_iterate, \ .llseek = default_llseek, \ }; \ \ diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index 9cb32e1a78a0..23fc24d16b31 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -309,6 +309,8 @@ static void append_kcore_note(char *notes, size_t *i, const char *name, static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter) { + struct file *file = iocb->ki_filp; + char *buf = file->private_data; loff_t *fpos = &iocb->ki_pos; size_t phdrs_offset, notes_offset, data_offset; size_t page_offline_frozen = 1; @@ -555,10 +557,21 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter) case KCORE_VMEMMAP: case KCORE_TEXT: /* - * We use _copy_to_iter() to bypass usermode hardening - * which would otherwise prevent this operation. + * Sadly we must use a bounce buffer here to be able to + * make use of copy_from_kernel_nofault(), as these + * memory regions might not always be mapped on all + * architectures. */ - if (_copy_to_iter((char *)start, tsz, iter) != tsz) { + if (copy_from_kernel_nofault(buf, (void *)start, tsz)) { + if (iov_iter_zero(tsz, iter) != tsz) { + ret = -EFAULT; + goto out; + } + /* + * We know the bounce buffer is safe to copy from, so + * use _copy_to_iter() directly. + */ + } else if (_copy_to_iter(buf, tsz, iter) != tsz) { ret = -EFAULT; goto out; } @@ -595,6 +608,10 @@ static int open_kcore(struct inode *inode, struct file *filp) if (ret) return ret; + filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!filp->private_data) + return -ENOMEM; + if (kcore_need_update) kcore_update_ram(); if (i_size_read(inode) != proc_root_kcore->size) { @@ -605,9 +622,16 @@ static int open_kcore(struct inode *inode, struct file *filp) return 0; } +static int release_kcore(struct inode *inode, struct file *file) +{ + kfree(file->private_data); + return 0; +} + static const struct proc_ops kcore_proc_ops = { .proc_read_iter = read_kcore_iter, .proc_open = open_kcore, + .proc_release = release_kcore, .proc_lseek = default_llseek, }; diff --git a/fs/readdir.c b/fs/readdir.c index b264ce60114d..c8c46e294431 100644 --- a/fs/readdir.c +++ b/fs/readdir.c @@ -25,6 +25,53 @@ #include <asm/unaligned.h> /* + * Some filesystems were never converted to '->iterate_shared()' + * and their directory iterators want the inode lock held for + * writing. This wrapper allows for converting from the shared + * semantics to the exclusive inode use. + */ +int wrap_directory_iterator(struct file *file, + struct dir_context *ctx, + int (*iter)(struct file *, struct dir_context *)) +{ + struct inode *inode = file_inode(file); + int ret; + + /* + * We'd love to have an 'inode_upgrade_trylock()' operation, + * see the comment in mmap_upgrade_trylock() in mm/memory.c. + * + * But considering this is for "filesystems that never got + * converted", it really doesn't matter. + * + * Also note that since we have to return with the lock held + * for reading, we can't use the "killable()" locking here, + * since we do need to get the lock even if we're dying. + * + * We could do the write part killably and then get the read + * lock unconditionally if it mattered, but see above on why + * this does the very simplistic conversion. + */ + up_read(&inode->i_rwsem); + down_write(&inode->i_rwsem); + + /* + * Since we dropped the inode lock, we should do the + * DEADDIR test again. See 'iterate_dir()' below. + * + * Note that we don't need to re-do the f_pos games, + * since the file must be locked wrt f_pos anyway. + */ + ret = -ENOENT; + if (!IS_DEADDIR(inode)) + ret = iter(file, ctx); + + downgrade_write(&inode->i_rwsem); + return ret; +} +EXPORT_SYMBOL(wrap_directory_iterator); + +/* * Note the "unsafe_put_user() semantics: we goto a * label for errors. */ @@ -40,39 +87,28 @@ int iterate_dir(struct file *file, struct dir_context *ctx) { struct inode *inode = file_inode(file); - bool shared = false; int res = -ENOTDIR; - if (file->f_op->iterate_shared) - shared = true; - else if (!file->f_op->iterate) + + if (!file->f_op->iterate_shared) goto out; res = security_file_permission(file, MAY_READ); if (res) goto out; - if (shared) - res = down_read_killable(&inode->i_rwsem); - else - res = down_write_killable(&inode->i_rwsem); + res = down_read_killable(&inode->i_rwsem); if (res) goto out; res = -ENOENT; if (!IS_DEADDIR(inode)) { ctx->pos = file->f_pos; - if (shared) - res = file->f_op->iterate_shared(file, ctx); - else - res = file->f_op->iterate(file, ctx); + res = file->f_op->iterate_shared(file, ctx); file->f_pos = ctx->pos; fsnotify_access(file); file_accessed(file); } - if (shared) - inode_unlock_shared(inode); - else - inode_unlock(inode); + inode_unlock_shared(inode); out: return res; } diff --git a/fs/smb/client/dfs.c b/fs/smb/client/dfs.c index df3fd3b720da..ee772c3d9f00 100644 --- a/fs/smb/client/dfs.c +++ b/fs/smb/client/dfs.c @@ -177,8 +177,12 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx) struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl); rc = dfs_get_referral(mnt_ctx, ref_path + 1, NULL, &tl); - if (rc) + if (rc) { + rc = cifs_mount_get_tcon(mnt_ctx); + if (!rc) + rc = cifs_is_path_remote(mnt_ctx); break; + } tit = dfs_cache_get_tgt_iterator(&tl); if (!tit) { diff --git a/fs/smb/server/smb2misc.c b/fs/smb/server/smb2misc.c index 33b7e6c4ceff..e881df1d10cb 100644 --- a/fs/smb/server/smb2misc.c +++ b/fs/smb/server/smb2misc.c @@ -380,13 +380,13 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work) } if (smb2_req_struct_sizes[command] != pdu->StructureSize2) { - if (command == SMB2_OPLOCK_BREAK_HE && - le16_to_cpu(pdu->StructureSize2) != OP_BREAK_STRUCT_SIZE_20 && - le16_to_cpu(pdu->StructureSize2) != OP_BREAK_STRUCT_SIZE_21) { + if (!(command == SMB2_OPLOCK_BREAK_HE && + (le16_to_cpu(pdu->StructureSize2) == OP_BREAK_STRUCT_SIZE_20 || + le16_to_cpu(pdu->StructureSize2) == OP_BREAK_STRUCT_SIZE_21))) { /* special case for SMB2.1 lease break message */ ksmbd_debug(SMB, - "Illegal request size %d for oplock break\n", - le16_to_cpu(pdu->StructureSize2)); + "Illegal request size %u for command %d\n", + le16_to_cpu(pdu->StructureSize2), command); return 1; } } diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index 9849d7489345..7cc1b0c47d0a 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -2324,9 +2324,16 @@ next: break; buf_len -= next; eabuf = (struct smb2_ea_info *)((char *)eabuf + next); - if (next < (u32)eabuf->EaNameLength + le16_to_cpu(eabuf->EaValueLength)) + if (buf_len < sizeof(struct smb2_ea_info)) { + rc = -EINVAL; break; + } + if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength + + le16_to_cpu(eabuf->EaValueLength)) { + rc = -EINVAL; + break; + } } while (next != 0); kfree(attr_name); diff --git a/fs/vboxsf/dir.c b/fs/vboxsf/dir.c index 075f15c43c78..5f1a14d5b927 100644 --- a/fs/vboxsf/dir.c +++ b/fs/vboxsf/dir.c @@ -179,9 +179,10 @@ static int vboxsf_dir_iterate(struct file *dir, struct dir_context *ctx) return 0; } +WRAP_DIR_ITER(vboxsf_dir_iterate) // FIXME! const struct file_operations vboxsf_dir_fops = { .open = vboxsf_dir_open, - .iterate = vboxsf_dir_iterate, + .iterate_shared = shared_vboxsf_dir_iterate, .release = vboxsf_dir_release, .read = generic_read_dir, .llseek = generic_file_llseek, diff --git a/fs/vboxsf/shfl_hostintf.h b/fs/vboxsf/shfl_hostintf.h index aca829062c12..069a019c9247 100644 --- a/fs/vboxsf/shfl_hostintf.h +++ b/fs/vboxsf/shfl_hostintf.h @@ -68,9 +68,9 @@ struct shfl_string { /** UTF-8 or UTF-16 string. Nul terminated. */ union { - u8 utf8[2]; - u16 utf16[1]; - u16 ucs2[1]; /* misnomer, use utf16. */ + u8 legacy_padding[2]; + DECLARE_FLEX_ARRAY(u8, utf8); + DECLARE_FLEX_ARRAY(u16, utf16); } string; }; VMMDEV_ASSERT_SIZE(shfl_string, 6); diff --git a/fs/zonefs/file.c b/fs/zonefs/file.c index 92c9aaae3663..789cfb74c146 100644 --- a/fs/zonefs/file.c +++ b/fs/zonefs/file.c @@ -341,77 +341,6 @@ static loff_t zonefs_file_llseek(struct file *file, loff_t offset, int whence) return generic_file_llseek_size(file, offset, whence, isize, isize); } -struct zonefs_zone_append_bio { - /* The target inode of the BIO */ - struct inode *inode; - - /* For sync writes, the target append write offset */ - u64 append_offset; - - /* - * This member must come last, bio_alloc_bioset will allocate enough - * bytes for entire zonefs_bio but relies on bio being last. - */ - struct bio bio; -}; - -static inline struct zonefs_zone_append_bio * -zonefs_zone_append_bio(struct bio *bio) -{ - return container_of(bio, struct zonefs_zone_append_bio, bio); -} - -static void zonefs_file_zone_append_dio_bio_end_io(struct bio *bio) -{ - struct zonefs_zone_append_bio *za_bio = zonefs_zone_append_bio(bio); - struct zonefs_zone *z = zonefs_inode_zone(za_bio->inode); - sector_t za_sector; - - if (bio->bi_status != BLK_STS_OK) - goto bio_end; - - /* - * If the file zone was written underneath the file system, the zone - * append operation can still succedd (if the zone is not full) but - * the write append location will not be where we expect it to be. - * Check that we wrote where we intended to, that is, at z->z_wpoffset. - */ - za_sector = z->z_sector + (za_bio->append_offset >> SECTOR_SHIFT); - if (bio->bi_iter.bi_sector != za_sector) { - zonefs_warn(za_bio->inode->i_sb, - "Invalid write sector %llu for zone at %llu\n", - bio->bi_iter.bi_sector, z->z_sector); - bio->bi_status = BLK_STS_IOERR; - } - -bio_end: - iomap_dio_bio_end_io(bio); -} - -static void zonefs_file_zone_append_dio_submit_io(const struct iomap_iter *iter, - struct bio *bio, - loff_t file_offset) -{ - struct zonefs_zone_append_bio *za_bio = zonefs_zone_append_bio(bio); - struct inode *inode = iter->inode; - struct zonefs_zone *z = zonefs_inode_zone(inode); - - /* - * Issue a zone append BIO to process sync dio writes. The append - * file offset is saved to check the zone append write location - * on completion of the BIO. - */ - za_bio->inode = inode; - za_bio->append_offset = file_offset; - - bio->bi_opf &= ~REQ_OP_WRITE; - bio->bi_opf |= REQ_OP_ZONE_APPEND; - bio->bi_iter.bi_sector = z->z_sector; - bio->bi_end_io = zonefs_file_zone_append_dio_bio_end_io; - - submit_bio(bio); -} - static int zonefs_file_write_dio_end_io(struct kiocb *iocb, ssize_t size, int error, unsigned int flags) { @@ -442,14 +371,6 @@ static int zonefs_file_write_dio_end_io(struct kiocb *iocb, ssize_t size, return 0; } -static struct bio_set zonefs_zone_append_bio_set; - -static const struct iomap_dio_ops zonefs_zone_append_dio_ops = { - .submit_io = zonefs_file_zone_append_dio_submit_io, - .end_io = zonefs_file_write_dio_end_io, - .bio_set = &zonefs_zone_append_bio_set, -}; - static const struct iomap_dio_ops zonefs_write_dio_ops = { .end_io = zonefs_file_write_dio_end_io, }; @@ -533,9 +454,6 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from) struct zonefs_inode_info *zi = ZONEFS_I(inode); struct zonefs_zone *z = zonefs_inode_zone(inode); struct super_block *sb = inode->i_sb; - const struct iomap_dio_ops *dio_ops; - bool sync = is_sync_kiocb(iocb); - bool append = false; ssize_t ret, count; /* @@ -543,7 +461,8 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from) * as this can cause write reordering (e.g. the first aio gets EAGAIN * on the inode lock but the second goes through but is now unaligned). */ - if (zonefs_zone_is_seq(z) && !sync && (iocb->ki_flags & IOCB_NOWAIT)) + if (zonefs_zone_is_seq(z) && !is_sync_kiocb(iocb) && + (iocb->ki_flags & IOCB_NOWAIT)) return -EOPNOTSUPP; if (iocb->ki_flags & IOCB_NOWAIT) { @@ -573,18 +492,6 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from) goto inode_unlock; } mutex_unlock(&zi->i_truncate_mutex); - append = sync; - } - - if (append) { - unsigned int max = bdev_max_zone_append_sectors(sb->s_bdev); - - max = ALIGN_DOWN(max << SECTOR_SHIFT, sb->s_blocksize); - iov_iter_truncate(from, max); - - dio_ops = &zonefs_zone_append_dio_ops; - } else { - dio_ops = &zonefs_write_dio_ops; } /* @@ -593,7 +500,7 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from) * the user can make sense of the error. */ ret = iomap_dio_rw(iocb, from, &zonefs_write_iomap_ops, - dio_ops, 0, NULL, 0); + &zonefs_write_dio_ops, 0, NULL, 0); if (ret == -ENOTBLK) ret = -EBUSY; @@ -938,15 +845,3 @@ const struct file_operations zonefs_file_operations = { .splice_write = iter_file_splice_write, .iopoll = iocb_bio_iopoll, }; - -int zonefs_file_bioset_init(void) -{ - return bioset_init(&zonefs_zone_append_bio_set, BIO_POOL_SIZE, - offsetof(struct zonefs_zone_append_bio, bio), - BIOSET_NEED_BVECS); -} - -void zonefs_file_bioset_exit(void) -{ - bioset_exit(&zonefs_zone_append_bio_set); -} diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c index bbe44a26a8e5..9350221abfc5 100644 --- a/fs/zonefs/super.c +++ b/fs/zonefs/super.c @@ -1412,13 +1412,9 @@ static int __init zonefs_init(void) BUILD_BUG_ON(sizeof(struct zonefs_super) != ZONEFS_SUPER_SIZE); - ret = zonefs_file_bioset_init(); - if (ret) - return ret; - ret = zonefs_init_inodecache(); if (ret) - goto destroy_bioset; + return ret; ret = zonefs_sysfs_init(); if (ret) @@ -1434,8 +1430,6 @@ sysfs_exit: zonefs_sysfs_exit(); destroy_inodecache: zonefs_destroy_inodecache(); -destroy_bioset: - zonefs_file_bioset_exit(); return ret; } @@ -1445,7 +1439,6 @@ static void __exit zonefs_exit(void) unregister_filesystem(&zonefs_type); zonefs_sysfs_exit(); zonefs_destroy_inodecache(); - zonefs_file_bioset_exit(); } MODULE_AUTHOR("Damien Le Moal"); diff --git a/fs/zonefs/zonefs.h b/fs/zonefs/zonefs.h index f663b8ebc2cb..8175652241b5 100644 --- a/fs/zonefs/zonefs.h +++ b/fs/zonefs/zonefs.h @@ -279,8 +279,6 @@ extern const struct file_operations zonefs_dir_operations; extern const struct address_space_operations zonefs_file_aops; extern const struct file_operations zonefs_file_operations; int zonefs_file_truncate(struct inode *inode, loff_t isize); -int zonefs_file_bioset_init(void); -void zonefs_file_bioset_exit(void); /* In sysfs.c */ int zonefs_sysfs_register(struct super_block *sb); diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h index 402a8c1c202d..a8f4b653ef4e 100644 --- a/include/asm-generic/mshyperv.h +++ b/include/asm-generic/mshyperv.h @@ -190,7 +190,7 @@ int hv_common_cpu_die(unsigned int cpu); void *hv_alloc_hyperv_page(void); void *hv_alloc_hyperv_zeroed_page(void); -void hv_free_hyperv_page(unsigned long addr); +void hv_free_hyperv_page(void *addr); /** * hv_cpu_number_to_vp_number() - Map CPU to VP. diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h index 20c93f08c993..95a1d214108a 100644 --- a/include/asm-generic/word-at-a-time.h +++ b/include/asm-generic/word-at-a-time.h @@ -38,7 +38,7 @@ static inline long find_zero(unsigned long mask) return (mask >> 8) ? byte : byte + 1; } -static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) +static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) { unsigned long rhs = val | c->low_bits; *data = rhs; diff --git a/include/linux/bio.h b/include/linux/bio.h index c4f5b5228105..11984ed29cb8 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -791,7 +791,7 @@ static inline int bio_integrity_add_page(struct bio *bio, struct page *page, static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) { bio->bi_opf |= REQ_POLLED; - if (!is_sync_kiocb(kiocb)) + if (kiocb->ki_flags & IOCB_NOWAIT) bio->bi_opf |= REQ_NOWAIT; } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index ed44a997f629..87d94be7825a 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -969,7 +969,6 @@ struct blk_plug { bool multiple_queues; bool has_elevator; - bool nowait; struct list_head cb_list; /* md requires an unplug callback */ }; diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 6e6e57ec69e8..e006c719182b 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -70,6 +70,10 @@ extern ssize_t cpu_show_mmio_stale_data(struct device *dev, char *buf); extern ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_spec_rstack_overflow(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_gds(struct device *dev, + struct device_attribute *attr, char *buf); extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 0d2e2a38b92d..f10fb87d49db 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -175,8 +175,8 @@ static inline unsigned int cpumask_first_zero(const struct cpumask *srcp) /** * cpumask_first_and - return the first cpu from *srcp1 & *srcp2 - * @src1p: the first input - * @src2p: the second input + * @srcp1: the first input + * @srcp2: the second input * * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and(). */ @@ -1197,6 +1197,10 @@ cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask, /** * cpumap_print_list_to_buf - copies the cpumask into the buffer as * comma-separated list of cpus + * @buf: the buffer to copy into + * @mask: the cpumask to copy + * @off: in the string from which we are copying, we copy to @buf + * @count: the maximum number of bytes to print * * Everything is same with the above cpumap_print_bitmask_to_buf() * except the print format. diff --git a/include/linux/fs.h b/include/linux/fs.h index 6867512907d6..562f2623c9c9 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1780,7 +1780,6 @@ struct file_operations { ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); int (*iopoll)(struct kiocb *kiocb, struct io_comp_batch *, unsigned int flags); - int (*iterate) (struct file *, struct dir_context *); int (*iterate_shared) (struct file *, struct dir_context *); __poll_t (*poll) (struct file *, struct poll_table_struct *); long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); @@ -1817,6 +1816,13 @@ struct file_operations { unsigned int poll_flags); } __randomize_layout; +/* Wrap a directory iterator that needs exclusive inode access */ +int wrap_directory_iterator(struct file *, struct dir_context *, + int (*) (struct file *, struct dir_context *)); +#define WRAP_DIR_ITER(x) \ + static int shared_##x(struct file *file , struct dir_context *ctx) \ + { return wrap_directory_iterator(file, ctx, x); } + struct inode_operations { struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int); const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *); diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index bfbc37ce223b..3ac3974b3c78 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -1239,9 +1239,6 @@ extern int vmbus_recvpacket_raw(struct vmbus_channel *channel, u32 *buffer_actual_len, u64 *requestid); - -extern void vmbus_ontimer(unsigned long data); - /* Base driver object */ struct hv_driver { const char *name; diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h index e6936cb25047..33f21bd85dbf 100644 --- a/include/linux/intel_rapl.h +++ b/include/linux/intel_rapl.h @@ -100,10 +100,16 @@ struct rapl_package; #define RAPL_DOMAIN_NAME_LENGTH 16 +union rapl_reg { + void __iomem *mmio; + u32 msr; + u64 val; +}; + struct rapl_domain { char name[RAPL_DOMAIN_NAME_LENGTH]; enum rapl_domain_type id; - u64 regs[RAPL_DOMAIN_REG_MAX]; + union rapl_reg regs[RAPL_DOMAIN_REG_MAX]; struct powercap_zone power_zone; struct rapl_domain_data rdd; struct rapl_power_limit rpl[NR_POWER_LIMITS]; @@ -116,7 +122,7 @@ struct rapl_domain { }; struct reg_action { - u64 reg; + union rapl_reg reg; u64 mask; u64 value; int err; @@ -143,8 +149,8 @@ struct rapl_if_priv { enum rapl_if_type type; struct powercap_control_type *control_type; enum cpuhp_state pcap_rapl_online; - u64 reg_unit; - u64 regs[RAPL_DOMAIN_MAX][RAPL_DOMAIN_REG_MAX]; + union rapl_reg reg_unit; + union rapl_reg regs[RAPL_DOMAIN_MAX][RAPL_DOMAIN_REG_MAX]; int limits[RAPL_DOMAIN_MAX]; int (*read_raw)(int id, struct reg_action *ra); int (*write_raw)(int id, struct reg_action *ra); diff --git a/include/linux/prefetch.h b/include/linux/prefetch.h index b83a3f944f28..b068e2e60939 100644 --- a/include/linux/prefetch.h +++ b/include/linux/prefetch.h @@ -25,11 +25,10 @@ struct page; prefetch() should be defined by the architecture, if not, the #define below provides a no-op define. - There are 3 prefetch() macros: + There are 2 prefetch() macros: prefetch(x) - prefetches the cacheline at "x" for read prefetchw(x) - prefetches the cacheline at "x" for write - spin_lock_prefetch(x) - prefetches the spinlock *x for taking there is also PREFETCH_STRIDE which is the architecure-preferred "lookahead" size for prefetching streamed operations. @@ -44,10 +43,6 @@ struct page; #define prefetchw(x) __builtin_prefetch(x,1) #endif -#ifndef ARCH_HAS_SPINLOCK_PREFETCH -#define spin_lock_prefetch(x) prefetchw(x) -#endif - #ifndef PREFETCH_STRIDE #define PREFETCH_STRIDE (4*L1_CACHE_BYTES) #endif diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 054d7911bfc9..c1637515a8a4 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -62,6 +62,7 @@ struct sk_psock_progs { enum sk_psock_state_bits { SK_PSOCK_TX_ENABLED, + SK_PSOCK_RX_STRP_ENABLED, }; struct sk_psock_link { diff --git a/include/linux/spi/corgi_lcd.h b/include/linux/spi/corgi_lcd.h index 0b857616919c..fc6c1515dc54 100644 --- a/include/linux/spi/corgi_lcd.h +++ b/include/linux/spi/corgi_lcd.h @@ -15,4 +15,6 @@ struct corgi_lcd_platform_data { void (*kick_battery)(void); }; +void corgi_lcd_limit_intensity(int limit); + #endif /* __LINUX_SPI_CORGI_LCD_H */ diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index 8e984d75f5b6..6b0a7dc48a4b 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h @@ -101,6 +101,7 @@ struct spi_mem_op { u8 nbytes; u8 buswidth; u8 dtr : 1; + u8 __pad : 7; u16 opcode; } cmd; @@ -108,6 +109,7 @@ struct spi_mem_op { u8 nbytes; u8 buswidth; u8 dtr : 1; + u8 __pad : 7; u64 val; } addr; @@ -115,12 +117,14 @@ struct spi_mem_op { u8 nbytes; u8 buswidth; u8 dtr : 1; + u8 __pad : 7; } dummy; struct { u8 buswidth; u8 dtr : 1; u8 ecc : 1; + u8 __pad : 6; enum spi_mem_data_dir dir; unsigned int nbytes; union { diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 6a1e8f157255..4ee9d13749ad 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -283,6 +283,7 @@ enum tpm_chip_flags { TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED = BIT(6), TPM_CHIP_FLAG_FIRMWARE_UPGRADE = BIT(7), TPM_CHIP_FLAG_SUSPENDED = BIT(8), + TPM_CHIP_FLAG_HWRNG_DISABLED = BIT(9), }; #define to_tpm_chip(d) container_of(d, struct tpm_chip, dev) diff --git a/include/linux/usb.h b/include/linux/usb.h index 25f8e62a30ec..a21074861f91 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -25,7 +25,6 @@ struct usb_device; struct usb_driver; -struct wusb_dev; /*-------------------------------------------------------------------------*/ @@ -425,7 +424,6 @@ struct usb_host_config { struct usb_host_bos { struct usb_bos_descriptor *desc; - /* wireless cap descriptor is handled by wusb */ struct usb_ext_cap_descriptor *ext_cap; struct usb_ss_cap_descriptor *ss_cap; struct usb_ssp_cap_descriptor *ssp_cap; @@ -612,7 +610,6 @@ struct usb3_lpm_parameters { * WUSB devices are not, until we authorize them from user space. * FIXME -- complete doc * @authenticated: Crypto authentication passed - * @wusb: device is Wireless USB * @lpm_capable: device supports LPM * @lpm_devinit_allow: Allow USB3 device initiated LPM, exit latency is in range * @usb2_hw_lpm_capable: device can perform USB2 hardware LPM @@ -634,8 +631,6 @@ struct usb3_lpm_parameters { * @do_remote_wakeup: remote wakeup should be enabled * @reset_resume: needs reset instead of resume * @port_is_suspended: the upstream port is suspended (L2 or U3) - * @wusb_dev: if this is a Wireless USB device, link to the WUSB - * specific data for the device. * @slot_id: Slot ID assigned by xHCI * @removable: Device can be physically removed from this port * @l1_params: best effor service latency for USB2 L1 LPM state, and L1 timeout. @@ -696,7 +691,6 @@ struct usb_device { unsigned have_langid:1; unsigned authorized:1; unsigned authenticated:1; - unsigned wusb:1; unsigned lpm_capable:1; unsigned lpm_devinit_allow:1; unsigned usb2_hw_lpm_capable:1; @@ -727,7 +721,6 @@ struct usb_device { unsigned reset_resume:1; unsigned port_is_suspended:1; - struct wusb_dev *wusb_dev; int slot_id; struct usb2_lpm_parameters l1_params; struct usb3_lpm_parameters u1_params; @@ -1742,11 +1735,6 @@ static inline void usb_fill_bulk_urb(struct urb *urb, * encoding of the endpoint interval, and express polling intervals in * microframes (eight per millisecond) rather than in frames (one per * millisecond). - * - * Wireless USB also uses the logarithmic encoding, but specifies it in units of - * 128us instead of 125us. For Wireless USB devices, the interval is passed - * through to the host controller, rather than being translated into microframe - * units. */ static inline void usb_fill_int_urb(struct urb *urb, struct usb_device *dev, diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h index 969e7dba6358..c93b410b314a 100644 --- a/include/linux/usb/ch9.h +++ b/include/linux/usb/ch9.h @@ -3,7 +3,7 @@ * This file holds USB constants and structures that are needed for * USB device APIs. These are used by the USB device model, which is * defined in chapter 9 of the USB 2.0 specification and in the - * Wireless USB 1.0 (spread around). Linux has several APIs in C that + * Wireless USB 1.0 spec (now defunct). Linux has several APIs in C that * need these: * * - the host side Linux-USB kernel driver API; @@ -14,9 +14,6 @@ * act either as a USB host or as a USB device. That means the host and * device side APIs benefit from working well together. * - * There's also "Wireless USB", using low power short range radios for - * peripheral interconnection but otherwise building on the USB framework. - * * Note all descriptors are declared '__attribute__((packed))' so that: * * [a] they never get padded, either internally (USB spec writers diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h index ee38835ed77c..0b4f2d5faa08 100644 --- a/include/linux/usb/chipidea.h +++ b/include/linux/usb/chipidea.h @@ -63,6 +63,7 @@ struct ci_hdrc_platform_data { #define CI_HDRC_IMX_IS_HSIC BIT(14) #define CI_HDRC_PMQOS BIT(15) #define CI_HDRC_PHY_VBUS_CONTROL BIT(16) +#define CI_HDRC_HAS_PORTSC_PEC_MISSED BIT(17) enum usb_dr_mode dr_mode; #define CI_HDRC_CONTROLLER_RESET_EVENT 0 #define CI_HDRC_CONTROLLER_STOPPED_EVENT 1 diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 07531c4f4350..6014340ba980 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -450,29 +450,6 @@ static inline struct usb_composite_driver *to_cdriver( * * One of these devices is allocated and initialized before the * associated device driver's bind() is called. - * - * OPEN ISSUE: it appears that some WUSB devices will need to be - * built by combining a normal (wired) gadget with a wireless one. - * This revision of the gadget framework should probably try to make - * sure doing that won't hurt too much. - * - * One notion for how to handle Wireless USB devices involves: - * - * (a) a second gadget here, discovery mechanism TBD, but likely - * needing separate "register/unregister WUSB gadget" calls; - * (b) updates to usb_gadget to include flags "is it wireless", - * "is it wired", plus (presumably in a wrapper structure) - * bandgroup and PHY info; - * (c) presumably a wireless_ep wrapping a usb_ep, and reporting - * wireless-specific parameters like maxburst and maxsequence; - * (d) configurations that are specific to wireless links; - * (e) function drivers that understand wireless configs and will - * support wireless for (additional) function instances; - * (f) a function to support association setup (like CBAF), not - * necessarily requiring a wireless adapter; - * (g) composite device setup that can create one or more wireless - * configs, including appropriate association setup support; - * (h) more, TBD. */ struct usb_composite_dev { struct usb_gadget *gadget; diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 4e9623e8492b..61d4f0b793dc 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -154,7 +154,6 @@ struct usb_hcd { /* The next flag is a stopgap, to be removed when all the HCDs * support the new root-hub polling mechanism. */ unsigned uses_new_polling:1; - unsigned wireless:1; /* Wireless USB HCD */ unsigned has_tt:1; /* Integrated TT in root hub */ unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */ unsigned can_do_streams:1; /* HC supports streams */ @@ -249,7 +248,6 @@ struct hc_driver { #define HCD_SHARED 0x0004 /* Two (or more) usb_hcds share HW */ #define HCD_USB11 0x0010 /* USB 1.1 */ #define HCD_USB2 0x0020 /* USB 2.0 */ -#define HCD_USB25 0x0030 /* Wireless USB 1.0 (USB 2.5)*/ #define HCD_USB3 0x0040 /* USB 3.0 */ #define HCD_USB31 0x0050 /* USB 3.1 */ #define HCD_USB32 0x0060 /* USB 3.2 */ diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h index e4de6bc1f69b..b513749582d7 100644 --- a/include/linux/usb/phy.h +++ b/include/linux/usb/phy.h @@ -144,6 +144,10 @@ struct usb_phy { */ int (*set_wakeup)(struct usb_phy *x, bool enabled); + /* notify phy port status change */ + int (*notify_port_status)(struct usb_phy *x, int port, + u16 portstatus, u16 portchange); + /* notify phy connect status change */ int (*notify_connect)(struct usb_phy *x, enum usb_device_speed speed); @@ -317,6 +321,15 @@ usb_phy_set_wakeup(struct usb_phy *x, bool enabled) } static inline int +usb_phy_notify_port_status(struct usb_phy *x, int port, u16 portstatus, u16 portchange) +{ + if (x && x->notify_port_status) + return x->notify_port_status(x, port, portstatus, portchange); + else + return 0; +} + +static inline int usb_phy_notify_connect(struct usb_phy *x, enum usb_device_speed speed) { if (x && x->notify_connect) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 7c7d03aa9d06..d6fa7c8767ad 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -562,6 +562,9 @@ ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband, if (WARN_ON(iftype >= NL80211_IFTYPE_MAX)) return NULL; + if (iftype == NL80211_IFTYPE_AP_VLAN) + iftype = NL80211_IFTYPE_AP; + for (i = 0; i < sband->n_iftype_data; i++) { const struct ieee80211_sband_iftype_data *data = &sband->iftype_data[i]; diff --git a/include/net/gro.h b/include/net/gro.h index 75efa6fb8441..88644b3ca660 100644 --- a/include/net/gro.h +++ b/include/net/gro.h @@ -452,6 +452,49 @@ static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, gro_normal_list(napi); } +/* This function is the alternative of 'inet_iif' and 'inet_sdif' + * functions in case we can not rely on fields of IPCB. + * + * The caller must verify skb_valid_dst(skb) is false and skb->dev is initialized. + * The caller must hold the RCU read lock. + */ +static inline void inet_get_iif_sdif(const struct sk_buff *skb, int *iif, int *sdif) +{ + *iif = inet_iif(skb) ?: skb->dev->ifindex; + *sdif = 0; + +#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) + if (netif_is_l3_slave(skb->dev)) { + struct net_device *master = netdev_master_upper_dev_get_rcu(skb->dev); + + *sdif = *iif; + *iif = master ? master->ifindex : 0; + } +#endif +} + +/* This function is the alternative of 'inet6_iif' and 'inet6_sdif' + * functions in case we can not rely on fields of IP6CB. + * + * The caller must verify skb_valid_dst(skb) is false and skb->dev is initialized. + * The caller must hold the RCU read lock. + */ +static inline void inet6_get_iif_sdif(const struct sk_buff *skb, int *iif, int *sdif) +{ + /* using skb->dev->ifindex because skb_dst(skb) is not initialized */ + *iif = skb->dev->ifindex; + *sdif = 0; + +#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) + if (netif_is_l3_slave(skb->dev)) { + struct net_device *master = netdev_master_upper_dev_get_rcu(skb->dev); + + *sdif = *iif; + *iif = master ? master->ifindex : 0; + } +#endif +} + extern struct list_head offload_base; #endif /* _NET_IPV6_GRO_H */ diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index caa20a905531..0bb32bfc6183 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -107,11 +107,12 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk) static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb) { - if (!sk->sk_mark && - READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept)) + u32 mark = READ_ONCE(sk->sk_mark); + + if (!mark && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept)) return skb->mark; - return sk->sk_mark; + return mark; } static inline int inet_request_bound_dev_if(const struct sock *sk, diff --git a/include/net/ip.h b/include/net/ip.h index 50d435855ae2..332521170d9b 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -93,7 +93,7 @@ static inline void ipcm_init_sk(struct ipcm_cookie *ipcm, { ipcm_init(ipcm); - ipcm->sockc.mark = inet->sk.sk_mark; + ipcm->sockc.mark = READ_ONCE(inet->sk.sk_mark); ipcm->sockc.tsflags = inet->sk.sk_tsflags; ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if); ipcm->addr = inet->inet_saddr; diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 640441a2f926..35870858ddf2 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -512,6 +512,7 @@ struct nft_set_elem_expr { * * @list: table set list node * @bindings: list of set bindings + * @refs: internal refcounting for async set destruction * @table: table this set belongs to * @net: netnamespace this set belongs to * @name: name of the set @@ -541,6 +542,7 @@ struct nft_set_elem_expr { struct nft_set { struct list_head list; struct list_head bindings; + refcount_t refs; struct nft_table *table; possible_net_t net; char *name; @@ -562,7 +564,8 @@ struct nft_set { struct list_head pending_update; /* runtime data below here */ const struct nft_set_ops *ops ____cacheline_aligned; - u16 flags:14, + u16 flags:13, + dead:1, genmask:2; u8 klen; u8 dlen; @@ -596,7 +599,6 @@ struct nft_set *nft_set_lookup_global(const struct net *net, struct nft_set_ext *nft_set_catchall_lookup(const struct net *net, const struct nft_set *set); -void *nft_set_catchall_gc(const struct nft_set *set); static inline unsigned long nft_set_gc_interval(const struct nft_set *set) { @@ -813,62 +815,6 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem, void nf_tables_set_elem_destroy(const struct nft_ctx *ctx, const struct nft_set *set, void *elem); -/** - * struct nft_set_gc_batch_head - nf_tables set garbage collection batch - * - * @rcu: rcu head - * @set: set the elements belong to - * @cnt: count of elements - */ -struct nft_set_gc_batch_head { - struct rcu_head rcu; - const struct nft_set *set; - unsigned int cnt; -}; - -#define NFT_SET_GC_BATCH_SIZE ((PAGE_SIZE - \ - sizeof(struct nft_set_gc_batch_head)) / \ - sizeof(void *)) - -/** - * struct nft_set_gc_batch - nf_tables set garbage collection batch - * - * @head: GC batch head - * @elems: garbage collection elements - */ -struct nft_set_gc_batch { - struct nft_set_gc_batch_head head; - void *elems[NFT_SET_GC_BATCH_SIZE]; -}; - -struct nft_set_gc_batch *nft_set_gc_batch_alloc(const struct nft_set *set, - gfp_t gfp); -void nft_set_gc_batch_release(struct rcu_head *rcu); - -static inline void nft_set_gc_batch_complete(struct nft_set_gc_batch *gcb) -{ - if (gcb != NULL) - call_rcu(&gcb->head.rcu, nft_set_gc_batch_release); -} - -static inline struct nft_set_gc_batch * -nft_set_gc_batch_check(const struct nft_set *set, struct nft_set_gc_batch *gcb, - gfp_t gfp) -{ - if (gcb != NULL) { - if (gcb->head.cnt + 1 < ARRAY_SIZE(gcb->elems)) - return gcb; - nft_set_gc_batch_complete(gcb); - } - return nft_set_gc_batch_alloc(set, gfp); -} - -static inline void nft_set_gc_batch_add(struct nft_set_gc_batch *gcb, - void *elem) -{ - gcb->elems[gcb->head.cnt++] = elem; -} - struct nft_expr_ops; /** * struct nft_expr_type - nf_tables expression type @@ -1557,39 +1503,30 @@ static inline void nft_set_elem_change_active(const struct net *net, #endif /* IS_ENABLED(CONFIG_NF_TABLES) */ -/* - * We use a free bit in the genmask field to indicate the element - * is busy, meaning it is currently being processed either by - * the netlink API or GC. - * - * Even though the genmask is only a single byte wide, this works - * because the extension structure if fully constant once initialized, - * so there are no non-atomic write accesses unless it is already - * marked busy. - */ -#define NFT_SET_ELEM_BUSY_MASK (1 << 2) +#define NFT_SET_ELEM_DEAD_MASK (1 << 2) #if defined(__LITTLE_ENDIAN_BITFIELD) -#define NFT_SET_ELEM_BUSY_BIT 2 +#define NFT_SET_ELEM_DEAD_BIT 2 #elif defined(__BIG_ENDIAN_BITFIELD) -#define NFT_SET_ELEM_BUSY_BIT (BITS_PER_LONG - BITS_PER_BYTE + 2) +#define NFT_SET_ELEM_DEAD_BIT (BITS_PER_LONG - BITS_PER_BYTE + 2) #else #error #endif -static inline int nft_set_elem_mark_busy(struct nft_set_ext *ext) +static inline void nft_set_elem_dead(struct nft_set_ext *ext) { unsigned long *word = (unsigned long *)ext; BUILD_BUG_ON(offsetof(struct nft_set_ext, genmask) != 0); - return test_and_set_bit(NFT_SET_ELEM_BUSY_BIT, word); + set_bit(NFT_SET_ELEM_DEAD_BIT, word); } -static inline void nft_set_elem_clear_busy(struct nft_set_ext *ext) +static inline int nft_set_elem_is_dead(const struct nft_set_ext *ext) { unsigned long *word = (unsigned long *)ext; - clear_bit(NFT_SET_ELEM_BUSY_BIT, word); + BUILD_BUG_ON(offsetof(struct nft_set_ext, genmask) != 0); + return test_bit(NFT_SET_ELEM_DEAD_BIT, word); } /** @@ -1732,6 +1669,38 @@ struct nft_trans_flowtable { #define nft_trans_flowtable_flags(trans) \ (((struct nft_trans_flowtable *)trans->data)->flags) +#define NFT_TRANS_GC_BATCHCOUNT 256 + +struct nft_trans_gc { + struct list_head list; + struct net *net; + struct nft_set *set; + u32 seq; + u8 count; + void *priv[NFT_TRANS_GC_BATCHCOUNT]; + struct rcu_head rcu; +}; + +struct nft_trans_gc *nft_trans_gc_alloc(struct nft_set *set, + unsigned int gc_seq, gfp_t gfp); +void nft_trans_gc_destroy(struct nft_trans_gc *trans); + +struct nft_trans_gc *nft_trans_gc_queue_async(struct nft_trans_gc *gc, + unsigned int gc_seq, gfp_t gfp); +void nft_trans_gc_queue_async_done(struct nft_trans_gc *gc); + +struct nft_trans_gc *nft_trans_gc_queue_sync(struct nft_trans_gc *gc, gfp_t gfp); +void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans); + +void nft_trans_gc_elem_add(struct nft_trans_gc *gc, void *priv); + +struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc, + unsigned int gc_seq); + +void nft_setelem_data_deactivate(const struct net *net, + const struct nft_set *set, + struct nft_set_elem *elem); + int __init nft_chain_filter_init(void); void nft_chain_filter_fini(void); @@ -1758,6 +1727,7 @@ struct nftables_pernet { struct mutex commit_mutex; u64 table_handle; unsigned int base_seq; + unsigned int gc_seq; }; extern unsigned int nf_tables_net_id; diff --git a/include/net/route.h b/include/net/route.h index 5a5c726472bd..8c2a8e7d8f8e 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -168,7 +168,7 @@ static inline struct rtable *ip_route_output_ports(struct net *net, struct flowi __be16 dport, __be16 sport, __u8 proto, __u8 tos, int oif) { - flowi4_init_output(fl4, oif, sk ? sk->sk_mark : 0, tos, + flowi4_init_output(fl4, oif, sk ? READ_ONCE(sk->sk_mark) : 0, tos, RT_SCOPE_UNIVERSE, proto, sk ? inet_sk_flowi_flags(sk) : 0, daddr, saddr, dport, sport, sock_net_uid(net, sk)); @@ -301,7 +301,7 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst, if (inet_sk(sk)->transparent) flow_flags |= FLOWI_FLAG_ANYSRC; - flowi4_init_output(fl4, oif, sk->sk_mark, ip_sock_rt_tos(sk), + flowi4_init_output(fl4, oif, READ_ONCE(sk->sk_mark), ip_sock_rt_tos(sk), ip_sock_rt_scope(sk), protocol, flow_flags, dst, src, dport, sport, sk->sk_uid); } diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 1648240c9668..6a9f8a5f387c 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -556,12 +556,12 @@ static inline void vxlan_flag_attr_error(int attrtype, } static inline bool vxlan_fdb_nh_path_select(struct nexthop *nh, - int hash, + u32 hash, struct vxlan_rdst *rdst) { struct fib_nh_common *nhc; - nhc = nexthop_path_fdb_result(nh, hash); + nhc = nexthop_path_fdb_result(nh, hash >> 1); if (unlikely(!nhc)) return false; diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 75b2235b99e2..b9230b6add04 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -194,6 +194,7 @@ struct scsi_device { unsigned no_start_on_add:1; /* do not issue start on add */ unsigned allow_restart:1; /* issue START_UNIT in error handler */ unsigned manage_start_stop:1; /* Let HLD (sd) manage start/stop */ + unsigned no_start_on_resume:1; /* Do not issue START_STOP_UNIT on resume */ unsigned start_stop_pwr_cond:1; /* Set power cond. in START_STOP_UNIT */ unsigned no_uld_attach:1; /* disable connecting to upper level drivers */ unsigned select_no_atn:1; diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h index fc3001483e62..a5ef84944a06 100644 --- a/include/soc/tegra/mc.h +++ b/include/soc/tegra/mc.h @@ -175,6 +175,9 @@ struct tegra_mc_icc_ops { int (*get_bw)(struct icc_node *node, u32 *avg, u32 *peak); }; +struct icc_node *tegra_mc_icc_xlate(struct of_phandle_args *spec, void *data); +extern const struct tegra_mc_icc_ops tegra_mc_icc_ops; + struct tegra_mc_ops { /* * @probe: Callback to set up SoC-specific bits of the memory controller. This is called diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h index bf06db8d2046..7b1ddffa3dfc 100644 --- a/include/trace/events/tcp.h +++ b/include/trace/events/tcp.h @@ -381,6 +381,7 @@ TRACE_EVENT(tcp_cong_state_set, __field(const void *, skaddr) __field(__u16, sport) __field(__u16, dport) + __field(__u16, family) __array(__u8, saddr, 4) __array(__u8, daddr, 4) __array(__u8, saddr_v6, 16) @@ -396,6 +397,7 @@ TRACE_EVENT(tcp_cong_state_set, __entry->sport = ntohs(inet->inet_sport); __entry->dport = ntohs(inet->inet_dport); + __entry->family = sk->sk_family; p32 = (__be32 *) __entry->saddr; *p32 = inet->inet_saddr; @@ -409,7 +411,8 @@ TRACE_EVENT(tcp_cong_state_set, __entry->cong_state = ca_state; ), - TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c cong_state=%u", + TP_printk("family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c cong_state=%u", + show_family_name(__entry->family), __entry->sport, __entry->dport, __entry->saddr, __entry->daddr, __entry->saddr_v6, __entry->daddr_v6, diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 7865f5a9885b..4f3932bb712d 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -710,9 +710,11 @@ enum { TCA_FLOWER_KEY_CFM_OPT_UNSPEC, TCA_FLOWER_KEY_CFM_MD_LEVEL, TCA_FLOWER_KEY_CFM_OPCODE, - TCA_FLOWER_KEY_CFM_OPT_MAX, + __TCA_FLOWER_KEY_CFM_OPT_MAX, }; +#define TCA_FLOWER_KEY_CFM_OPT_MAX (__TCA_FLOWER_KEY_CFM_OPT_MAX - 1) + #define TCA_FLOWER_MASK_FLAGS_RANGE (1 << 0) /* Range-based match */ /* Match-all classifier */ diff --git a/include/uapi/linux/usb/ch11.h b/include/uapi/linux/usb/ch11.h index fb0cd24c392c..ce4c83f2e66a 100644 --- a/include/uapi/linux/usb/ch11.h +++ b/include/uapi/linux/usb/ch11.h @@ -15,10 +15,8 @@ /* This is arbitrary. * From USB 2.0 spec Table 11-13, offset 7, a hub can * have up to 255 ports. The most yet reported is 10. - * - * Current Wireless USB host hardware (Intel i1480 for example) allows - * up to 22 devices to connect. Upcoming hardware might raise that - * limit. Because the arrays need to add a bit for hub status data, we + * Upcoming hardware might raise that limit. + * Because the arrays need to add a bit for hub status data, we * use 31, so plus one evens out to four bytes. */ #define USB_MAXCHILDREN 31 diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index 62d318377379..8a147abfc680 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -3,7 +3,7 @@ * This file holds USB constants and structures that are needed for * USB device APIs. These are used by the USB device model, which is * defined in chapter 9 of the USB 2.0 specification and in the - * Wireless USB 1.0 (spread around). Linux has several APIs in C that + * Wireless USB 1.0 spec (now defunct). Linux has several APIs in C that * need these: * * - the master/host side Linux-USB kernel driver API; @@ -14,9 +14,6 @@ * act either as a USB master/host or as a USB slave/device. That means * the master and slave side APIs benefit from working well together. * - * There's also "Wireless USB", using low power short range radios for - * peripheral interconnection but otherwise building on the USB framework. - * * Note all descriptors are declared '__attribute__((packed))' so that: * * [a] they never get padded, either internally (USB spec writers diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index f4591b912ea8..93db3e4e7b68 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -3470,6 +3470,8 @@ static unsigned long io_uring_mmu_get_unmapped_area(struct file *filp, * - use the kernel virtual address of the shared io_uring context * (instead of the userspace-provided address, which has to be 0UL * anyway). + * - use the same pgoff which the get_unmapped_area() uses to + * calculate the page colouring. * For architectures without such aliasing requirements, the * architecture will return any suitable mapping because addr is 0. */ @@ -3478,6 +3480,7 @@ static unsigned long io_uring_mmu_get_unmapped_area(struct file *filp, pgoff = 0; /* has been translated to ptr above */ #ifdef SHM_COLOUR addr = (uintptr_t) ptr; + pgoff = addr >> PAGE_SHIFT; #else addr = 0UL; #endif diff --git a/io_uring/openclose.c b/io_uring/openclose.c index 10ca57f5bd24..e3fae26e025d 100644 --- a/io_uring/openclose.c +++ b/io_uring/openclose.c @@ -35,9 +35,11 @@ static bool io_openat_force_async(struct io_open *open) { /* * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open, - * it'll always -EAGAIN + * it'll always -EAGAIN. Note that we test for __O_TMPFILE because + * O_TMPFILE includes O_DIRECTORY, which isn't a flag we need to force + * async for. */ - return open->how.flags & (O_TRUNC | O_CREAT | O_TMPFILE); + return open->how.flags & (O_TRUNC | O_CREAT | __O_TMPFILE); } static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index 6ae02be7a48e..286ab3db0fde 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -28,6 +28,7 @@ #include <linux/sched.h> #include <linux/workqueue.h> #include <linux/kthread.h> +#include <linux/completion.h> #include <trace/events/xdp.h> #include <linux/btf_ids.h> @@ -73,6 +74,7 @@ struct bpf_cpu_map_entry { struct rcu_head rcu; struct work_struct kthread_stop_wq; + struct completion kthread_running; }; struct bpf_cpu_map { @@ -129,11 +131,17 @@ static void __cpu_map_ring_cleanup(struct ptr_ring *ring) * invoked cpu_map_kthread_stop(). Catch any broken behaviour * gracefully and warn once. */ - struct xdp_frame *xdpf; + void *ptr; - while ((xdpf = ptr_ring_consume(ring))) - if (WARN_ON_ONCE(xdpf)) - xdp_return_frame(xdpf); + while ((ptr = ptr_ring_consume(ring))) { + WARN_ON_ONCE(1); + if (unlikely(__ptr_test_bit(0, &ptr))) { + __ptr_clear_bit(0, &ptr); + kfree_skb(ptr); + continue; + } + xdp_return_frame(ptr); + } } static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) @@ -153,7 +161,6 @@ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) static void cpu_map_kthread_stop(struct work_struct *work) { struct bpf_cpu_map_entry *rcpu; - int err; rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq); @@ -163,14 +170,7 @@ static void cpu_map_kthread_stop(struct work_struct *work) rcu_barrier(); /* kthread_stop will wake_up_process and wait for it to complete */ - err = kthread_stop(rcpu->kthread); - if (err) { - /* kthread_stop may be called before cpu_map_kthread_run - * is executed, so we need to release the memory related - * to rcpu. - */ - put_cpu_map_entry(rcpu); - } + kthread_stop(rcpu->kthread); } static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu, @@ -298,11 +298,11 @@ static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames, return nframes; } - static int cpu_map_kthread_run(void *data) { struct bpf_cpu_map_entry *rcpu = data; + complete(&rcpu->kthread_running); set_current_state(TASK_INTERRUPTIBLE); /* When kthread gives stop order, then rcpu have been disconnected @@ -467,6 +467,7 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value, goto free_ptr_ring; /* Setup kthread */ + init_completion(&rcpu->kthread_running); rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa, "cpumap/%d/map:%d", cpu, map->id); @@ -480,6 +481,12 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value, kthread_bind(rcpu->kthread, cpu); wake_up_process(rcpu->kthread); + /* Make sure kthread has been running, so kthread_stop() will not + * stop the kthread prematurely and all pending frames or skbs + * will be handled by the kthread before kthread_stop() returns. + */ + wait_for_completion(&rcpu->kthread_running); + return rcpu; free_prog: diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index e1b4bfa938dd..2b4a946a6ff5 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -1166,7 +1166,7 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr, int error; if (!hibernation_available()) - return 0; + return n; if (len && buf[len-1] == '\n') len--; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 5f2dcabad202..bd1a42b23f3f 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -661,8 +661,7 @@ static DEFINE_PER_CPU(int, bpf_trace_nest_level); BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, u64, flags, void *, data, u64, size) { - struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds); - int nest_level = this_cpu_inc_return(bpf_trace_nest_level); + struct bpf_trace_sample_data *sds; struct perf_raw_record raw = { .frag = { .size = size, @@ -670,7 +669,11 @@ BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, }, }; struct perf_sample_data *sd; - int err; + int nest_level, err; + + preempt_disable(); + sds = this_cpu_ptr(&bpf_trace_sds); + nest_level = this_cpu_inc_return(bpf_trace_nest_level); if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) { err = -EBUSY; @@ -688,9 +691,9 @@ BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, perf_sample_save_raw_data(sd, &raw); err = __bpf_perf_event_output(regs, map, flags, sd); - out: this_cpu_dec(bpf_trace_nest_level); + preempt_enable(); return err; } @@ -715,7 +718,6 @@ static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds); u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) { - int nest_level = this_cpu_inc_return(bpf_event_output_nest_level); struct perf_raw_frag frag = { .copy = ctx_copy, .size = ctx_size, @@ -732,8 +734,12 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, }; struct perf_sample_data *sd; struct pt_regs *regs; + int nest_level; u64 ret; + preempt_disable(); + nest_level = this_cpu_inc_return(bpf_event_output_nest_level); + if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) { ret = -EBUSY; goto out; @@ -748,6 +754,7 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, ret = __bpf_perf_event_output(regs, map, flags, sd); out: this_cpu_dec(bpf_event_output_nest_level); + preempt_enable(); return ret; } diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 02a8f402eeb5..800b4208dba9 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -52,6 +52,7 @@ #include <linux/sched/debug.h> #include <linux/nmi.h> #include <linux/kvm_para.h> +#include <linux/delay.h> #include "workqueue_internal.h" @@ -338,8 +339,10 @@ static cpumask_var_t *wq_numa_possible_cpumask; * Per-cpu work items which run for longer than the following threshold are * automatically considered CPU intensive and excluded from concurrency * management to prevent them from noticeably delaying other per-cpu work items. + * ULONG_MAX indicates that the user hasn't overridden it with a boot parameter. + * The actual value is initialized in wq_cpu_intensive_thresh_init(). */ -static unsigned long wq_cpu_intensive_thresh_us = 10000; +static unsigned long wq_cpu_intensive_thresh_us = ULONG_MAX; module_param_named(cpu_intensive_thresh_us, wq_cpu_intensive_thresh_us, ulong, 0644); static bool wq_disable_numa; @@ -6513,6 +6516,42 @@ void __init workqueue_init_early(void) !system_freezable_power_efficient_wq); } +static void __init wq_cpu_intensive_thresh_init(void) +{ + unsigned long thresh; + unsigned long bogo; + + /* if the user set it to a specific value, keep it */ + if (wq_cpu_intensive_thresh_us != ULONG_MAX) + return; + + /* + * The default of 10ms is derived from the fact that most modern (as of + * 2023) processors can do a lot in 10ms and that it's just below what + * most consider human-perceivable. However, the kernel also runs on a + * lot slower CPUs including microcontrollers where the threshold is way + * too low. + * + * Let's scale up the threshold upto 1 second if BogoMips is below 4000. + * This is by no means accurate but it doesn't have to be. The mechanism + * is still useful even when the threshold is fully scaled up. Also, as + * the reports would usually be applicable to everyone, some machines + * operating on longer thresholds won't significantly diminish their + * usefulness. + */ + thresh = 10 * USEC_PER_MSEC; + + /* see init/calibrate.c for lpj -> BogoMIPS calculation */ + bogo = max_t(unsigned long, loops_per_jiffy / 500000 * HZ, 1); + if (bogo < 4000) + thresh = min_t(unsigned long, thresh * 4000 / bogo, USEC_PER_SEC); + + pr_debug("wq_cpu_intensive_thresh: lpj=%lu BogoMIPS=%lu thresh_us=%lu\n", + loops_per_jiffy, bogo, thresh); + + wq_cpu_intensive_thresh_us = thresh; +} + /** * workqueue_init - bring workqueue subsystem fully online * @@ -6528,6 +6567,8 @@ void __init workqueue_init(void) struct worker_pool *pool; int cpu, bkt; + wq_cpu_intensive_thresh_init(); + /* * It'd be simpler to initialize NUMA in workqueue_init_early() but * CPU to node mapping may not be available that early on some diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index fbc89baf7de6..d6798513a8c2 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1200,7 +1200,7 @@ config WQ_CPU_INTENSIVE_REPORT help Say Y here to enable reporting of concurrency-managed per-cpu work items that hog CPUs for longer than - workqueue.cpu_intensive_threshold_us. Workqueue automatically + workqueue.cpu_intensive_thresh_us. Workqueue automatically detects and excludes them from concurrency management to prevent them from stalling other per-cpu work items. Occassional triggering may not necessarily indicate a problem. Repeated diff --git a/lib/Makefile b/lib/Makefile index 42d307ade225..1ffae65bb7ee 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -82,7 +82,13 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o obj-$(CONFIG_TEST_DYNAMIC_DEBUG) += test_dynamic_debug.o obj-$(CONFIG_TEST_PRINTF) += test_printf.o obj-$(CONFIG_TEST_SCANF) += test_scanf.o + obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o +ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_KASAN),yy) +# FIXME: Clang breaks test_bitmap_const_eval when KASAN and GCOV are enabled +GCOV_PROFILE_test_bitmap.o := n +endif + obj-$(CONFIG_TEST_UUID) += test_uuid.o obj-$(CONFIG_TEST_XARRAY) += test_xarray.o obj-$(CONFIG_TEST_MAPLE_TREE) += test_maple_tree.o diff --git a/lib/cpumask.c b/lib/cpumask.c index de356f16773a..a7fd02b5ae26 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -45,6 +45,7 @@ EXPORT_SYMBOL(cpumask_next_wrap); * alloc_cpumask_var_node - allocate a struct cpumask on a given node * @mask: pointer to cpumask_var_t where the cpumask is returned * @flags: GFP_ flags + * @node: memory node from which to allocate or %NUMA_NO_NODE * * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is * a nop returning a constant 1 (in <linux/cpumask.h>) @@ -157,7 +158,9 @@ EXPORT_SYMBOL(cpumask_local_spread); static DEFINE_PER_CPU(int, distribute_cpu_mask_prev); /** - * cpumask_any_and_distribute - Return an arbitrary cpu within srcp1 & srcp2. + * cpumask_any_and_distribute - Return an arbitrary cpu within src1p & src2p. + * @src1p: first &cpumask for intersection + * @src2p: second &cpumask for intersection * * Iterated calls using the same srcp1 and srcp2 will be distributed within * their intersection. diff --git a/lib/scatterlist.c b/lib/scatterlist.c index e86231a44c3d..c65566b4dc66 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -1148,7 +1148,7 @@ static ssize_t extract_user_to_sg(struct iov_iter *iter, failed: while (sgtable->nents > sgtable->orig_nents) - put_page(sg_page(&sgtable->sgl[--sgtable->nents])); + unpin_user_page(sg_page(&sgtable->sgl[--sgtable->nents])); return res; } diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c index 187f5b2db4cf..f2ea9f30c7c5 100644 --- a/lib/test_bitmap.c +++ b/lib/test_bitmap.c @@ -1161,6 +1161,10 @@ static void __init test_bitmap_print_buf(void) } } +/* + * FIXME: Clang breaks compile-time evaluations when KASAN and GCOV are enabled. + * To workaround it, GCOV is force-disabled in Makefile for this configuration. + */ static void __init test_bitmap_const_eval(void) { DECLARE_BITMAP(bitmap, BITS_PER_LONG); @@ -1186,11 +1190,7 @@ static void __init test_bitmap_const_eval(void) * the compiler is fixed. */ bitmap_clear(bitmap, 0, BITS_PER_LONG); -#if defined(__s390__) && defined(__clang__) - if (!const_test_bit(7, bitmap)) -#else if (!test_bit(7, bitmap)) -#endif bitmap_set(bitmap, 5, 2); /* Equals to `unsigned long bitopvar = BIT(20)` */ diff --git a/mm/compaction.c b/mm/compaction.c index dbc9f86b1934..eacca2794e47 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -912,11 +912,12 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, /* * Check if the pageblock has already been marked skipped. - * Only the aligned PFN is checked as the caller isolates + * Only the first PFN is checked as the caller isolates * COMPACT_CLUSTER_MAX at a time so the second call must * not falsely conclude that the block should be skipped. */ - if (!valid_page && pageblock_aligned(low_pfn)) { + if (!valid_page && (pageblock_aligned(low_pfn) || + low_pfn == cc->zone->zone_start_pfn)) { if (!isolation_suitable(cc, page)) { low_pfn = end_pfn; folio = NULL; @@ -2002,7 +2003,8 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc) * before making it "skip" so other compaction instances do * not scan the same block. */ - if (pageblock_aligned(low_pfn) && + if ((pageblock_aligned(low_pfn) || + low_pfn == cc->zone->zone_start_pfn) && !fast_find_block && !isolation_suitable(cc, page)) continue; diff --git a/mm/damon/core.c b/mm/damon/core.c index 91cff7f2997e..eb9580942a5c 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -273,6 +273,7 @@ struct damos_filter *damos_new_filter(enum damos_filter_type type, return NULL; filter->type = type; filter->matching = matching; + INIT_LIST_HEAD(&filter->list); return filter; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 64a3239b6407..6da626bfb52e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1579,9 +1579,37 @@ static inline void destroy_compound_gigantic_folio(struct folio *folio, unsigned int order) { } #endif +static inline void __clear_hugetlb_destructor(struct hstate *h, + struct folio *folio) +{ + lockdep_assert_held(&hugetlb_lock); + + /* + * Very subtle + * + * For non-gigantic pages set the destructor to the normal compound + * page dtor. This is needed in case someone takes an additional + * temporary ref to the page, and freeing is delayed until they drop + * their reference. + * + * For gigantic pages set the destructor to the null dtor. This + * destructor will never be called. Before freeing the gigantic + * page destroy_compound_gigantic_folio will turn the folio into a + * simple group of pages. After this the destructor does not + * apply. + * + */ + if (hstate_is_gigantic(h)) + folio_set_compound_dtor(folio, NULL_COMPOUND_DTOR); + else + folio_set_compound_dtor(folio, COMPOUND_PAGE_DTOR); +} + /* - * Remove hugetlb folio from lists, and update dtor so that the folio appears - * as just a compound page. + * Remove hugetlb folio from lists. + * If vmemmap exists for the folio, update dtor so that the folio appears + * as just a compound page. Otherwise, wait until after allocating vmemmap + * to update dtor. * * A reference is held on the folio, except in the case of demote. * @@ -1612,31 +1640,19 @@ static void __remove_hugetlb_folio(struct hstate *h, struct folio *folio, } /* - * Very subtle - * - * For non-gigantic pages set the destructor to the normal compound - * page dtor. This is needed in case someone takes an additional - * temporary ref to the page, and freeing is delayed until they drop - * their reference. - * - * For gigantic pages set the destructor to the null dtor. This - * destructor will never be called. Before freeing the gigantic - * page destroy_compound_gigantic_folio will turn the folio into a - * simple group of pages. After this the destructor does not - * apply. - * - * This handles the case where more than one ref is held when and - * after update_and_free_hugetlb_folio is called. - * - * In the case of demote we do not ref count the page as it will soon - * be turned into a page of smaller size. + * We can only clear the hugetlb destructor after allocating vmemmap + * pages. Otherwise, someone (memory error handling) may try to write + * to tail struct pages. + */ + if (!folio_test_hugetlb_vmemmap_optimized(folio)) + __clear_hugetlb_destructor(h, folio); + + /* + * In the case of demote we do not ref count the page as it will soon + * be turned into a page of smaller size. */ if (!demote) folio_ref_unfreeze(folio, 1); - if (hstate_is_gigantic(h)) - folio_set_compound_dtor(folio, NULL_COMPOUND_DTOR); - else - folio_set_compound_dtor(folio, COMPOUND_PAGE_DTOR); h->nr_huge_pages--; h->nr_huge_pages_node[nid]--; @@ -1705,6 +1721,7 @@ static void __update_and_free_hugetlb_folio(struct hstate *h, { int i; struct page *subpage; + bool clear_dtor = folio_test_hugetlb_vmemmap_optimized(folio); if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) return; @@ -1735,6 +1752,16 @@ static void __update_and_free_hugetlb_folio(struct hstate *h, if (unlikely(folio_test_hwpoison(folio))) folio_clear_hugetlb_hwpoison(folio); + /* + * If vmemmap pages were allocated above, then we need to clear the + * hugetlb destructor under the hugetlb lock. + */ + if (clear_dtor) { + spin_lock_irq(&hugetlb_lock); + __clear_hugetlb_destructor(h, folio); + spin_unlock_irq(&hugetlb_lock); + } + for (i = 0; i < pages_per_huge_page(h); i++) { subpage = folio_page(folio, i); subpage->flags &= ~(1 << PG_locked | 1 << PG_error | @@ -2784,6 +2784,8 @@ struct page *ksm_might_need_to_copy(struct page *page, anon_vma->root == vma->anon_vma->root) { return page; /* still no need to copy it */ } + if (PageHWPoison(page)) + return ERR_PTR(-EHWPOISON); if (!PageUptodate(page)) return page; /* let do_swap_page report the error */ diff --git a/mm/memory-failure.c b/mm/memory-failure.c index ece5d481b5ff..9a285038d765 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -2466,7 +2466,7 @@ int unpoison_memory(unsigned long pfn) { struct folio *folio; struct page *p; - int ret = -EBUSY; + int ret = -EBUSY, ghp; unsigned long count = 1; bool huge = false; static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL, @@ -2499,6 +2499,13 @@ int unpoison_memory(unsigned long pfn) goto unlock_mutex; } + if (folio_test_slab(folio) || PageTable(&folio->page) || folio_test_reserved(folio)) + goto unlock_mutex; + + /* + * Note that folio->_mapcount is overloaded in SLAB, so the simple test + * in folio_mapped() has to be done after folio_test_slab() is checked. + */ if (folio_mapped(folio)) { unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n", pfn, &unpoison_rs); @@ -2511,32 +2518,28 @@ int unpoison_memory(unsigned long pfn) goto unlock_mutex; } - if (folio_test_slab(folio) || PageTable(&folio->page) || folio_test_reserved(folio)) - goto unlock_mutex; - - ret = get_hwpoison_page(p, MF_UNPOISON); - if (!ret) { + ghp = get_hwpoison_page(p, MF_UNPOISON); + if (!ghp) { if (PageHuge(p)) { huge = true; count = folio_free_raw_hwp(folio, false); - if (count == 0) { - ret = -EBUSY; + if (count == 0) goto unlock_mutex; - } } ret = folio_test_clear_hwpoison(folio) ? 0 : -EBUSY; - } else if (ret < 0) { - if (ret == -EHWPOISON) { + } else if (ghp < 0) { + if (ghp == -EHWPOISON) { ret = put_page_back_buddy(p) ? 0 : -EBUSY; - } else + } else { + ret = ghp; unpoison_pr_info("Unpoison: failed to grab page %#lx\n", pfn, &unpoison_rs); + } } else { if (PageHuge(p)) { huge = true; count = folio_free_raw_hwp(folio, false); if (count == 0) { - ret = -EBUSY; folio_put(folio); goto unlock_mutex; } diff --git a/mm/memory.c b/mm/memory.c index 603b2f419948..1ec1ef3418bf 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5705,6 +5705,9 @@ int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, if (mmap_read_lock_killable(mm)) return 0; + /* Untag the address before looking up the VMA */ + addr = untagged_addr_remote(mm, addr); + /* Avoid triggering the temporary warning in __get_user_pages */ if (!vma_lookup(mm, addr) && !expand_stack(mm, addr)) return 0; diff --git a/mm/swapfile.c b/mm/swapfile.c index 8e6dde68b389..b15112b1f1a8 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1746,7 +1746,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, struct page *swapcache; spinlock_t *ptl; pte_t *pte, new_pte, old_pte; - bool hwposioned = false; + bool hwpoisoned = PageHWPoison(page); int ret = 1; swapcache = page; @@ -1754,7 +1754,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, if (unlikely(!page)) return -ENOMEM; else if (unlikely(PTR_ERR(page) == -EHWPOISON)) - hwposioned = true; + hwpoisoned = true; pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); if (unlikely(!pte || !pte_same_as_swp(ptep_get(pte), @@ -1765,11 +1765,11 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, old_pte = ptep_get(pte); - if (unlikely(hwposioned || !PageUptodate(page))) { + if (unlikely(hwpoisoned || !PageUptodate(page))) { swp_entry_t swp_entry; dec_mm_counter(vma->vm_mm, MM_SWAPENTS); - if (hwposioned) { + if (hwpoisoned) { swp_entry = make_hwpoison_entry(swapcache); page = swapcache; } else { diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 3f057970504e..32916d28d9d9 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1798,6 +1798,7 @@ static void replace_sub_page(struct size_class *class, struct zspage *zspage, static bool zs_page_isolate(struct page *page, isolate_mode_t mode) { + struct zs_pool *pool; struct zspage *zspage; /* @@ -1807,9 +1808,10 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode) VM_BUG_ON_PAGE(PageIsolated(page), page); zspage = get_zspage(page); - migrate_write_lock(zspage); + pool = zspage->pool; + spin_lock(&pool->lock); inc_zspage_isolation(zspage); - migrate_write_unlock(zspage); + spin_unlock(&pool->lock); return true; } @@ -1875,12 +1877,12 @@ static int zs_page_migrate(struct page *newpage, struct page *page, kunmap_atomic(s_addr); replace_sub_page(class, zspage, newpage, page); + dec_zspage_isolation(zspage); /* * Since we complete the data copy and set up new zspage structure, * it's okay to release the pool's lock. */ spin_unlock(&pool->lock); - dec_zspage_isolation(zspage); migrate_write_unlock(zspage); get_page(newpage); @@ -1897,14 +1899,16 @@ static int zs_page_migrate(struct page *newpage, struct page *page, static void zs_page_putback(struct page *page) { + struct zs_pool *pool; struct zspage *zspage; VM_BUG_ON_PAGE(!PageIsolated(page), page); zspage = get_zspage(page); - migrate_write_lock(zspage); + pool = zspage->pool; + spin_lock(&pool->lock); dec_zspage_isolation(zspage); - migrate_write_unlock(zspage); + spin_unlock(&pool->lock); } static const struct movable_operations zsmalloc_mops = { diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index e40aa3e3641c..b3662119ddbc 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -384,8 +384,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, dev->name); vlan_vid_add(dev, htons(ETH_P_8021Q), 0); } - if (event == NETDEV_DOWN && - (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) + if (event == NETDEV_DOWN) vlan_vid_del(dev, htons(ETH_P_8021Q), 0); vlan_info = rtnl_dereference(dev->vlan_info); diff --git a/net/can/raw.c b/net/can/raw.c index ba6b52b1d776..e10f59375659 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -865,7 +865,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) skb->dev = dev; skb->priority = sk->sk_priority; - skb->mark = sk->sk_mark; + skb->mark = READ_ONCE(sk->sk_mark); skb->tstamp = sockc.transmit_time; skb_setup_tx_timestamp(skb, sockc.tsflags); diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 11c04e7d928e..658a6f2320cf 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -3334,17 +3334,24 @@ static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq) int ret; dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); - ret = wait_for_completion_interruptible(&lreq->reg_commit_wait); + ret = wait_for_completion_killable(&lreq->reg_commit_wait); return ret ?: lreq->reg_commit_error; } -static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq) +static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq, + unsigned long timeout) { - int ret; + long left; dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); - ret = wait_for_completion_interruptible(&lreq->notify_finish_wait); - return ret ?: lreq->notify_finish_error; + left = wait_for_completion_killable_timeout(&lreq->notify_finish_wait, + ceph_timeout_jiffies(timeout)); + if (left <= 0) + left = left ?: -ETIMEDOUT; + else + left = lreq->notify_finish_error; /* completed */ + + return left; } /* @@ -4896,7 +4903,8 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc, linger_submit(lreq); ret = linger_reg_commit_wait(lreq); if (!ret) - ret = linger_notify_finish_wait(lreq); + ret = linger_notify_finish_wait(lreq, + msecs_to_jiffies(2 * timeout * MSEC_PER_SEC)); else dout("lreq %p failed to initiate notify %d\n", lreq, ret); diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c index d4172534dfa8..cca7594be92e 100644 --- a/net/core/bpf_sk_storage.c +++ b/net/core/bpf_sk_storage.c @@ -496,8 +496,11 @@ bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs) return ERR_PTR(-EPERM); nla_for_each_nested(nla, nla_stgs, rem) { - if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD) + if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD) { + if (nla_len(nla) != sizeof(u32)) + return ERR_PTR(-EINVAL); nr_maps++; + } } diag = kzalloc(struct_size(diag, maps, nr_maps), GFP_KERNEL); diff --git a/net/core/filter.c b/net/core/filter.c index 06ba0e56e369..28a59596987a 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -4116,12 +4116,6 @@ BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset) if (unlikely(data_end > data_hard_end)) return -EINVAL; - /* ALL drivers MUST init xdp->frame_sz, chicken check below */ - if (unlikely(xdp->frame_sz > PAGE_SIZE)) { - WARN_ONCE(1, "Too BIG xdp->frame_sz = %d\n", xdp->frame_sz); - return -EINVAL; - } - if (unlikely(data_end < xdp->data + ETH_HLEN)) return -EINVAL; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 3ad4e030846d..aef25aa5cf1d 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -5140,13 +5140,17 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); if (br_spec) { nla_for_each_nested(attr, br_spec, rem) { - if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { + if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !have_flags) { if (nla_len(attr) < sizeof(flags)) return -EINVAL; have_flags = true; flags = nla_get_u16(attr); - break; + } + + if (nla_type(attr) == IFLA_BRIDGE_MODE) { + if (nla_len(attr) < sizeof(u16)) + return -EINVAL; } } } diff --git a/net/core/skmsg.c b/net/core/skmsg.c index a29508e1ff35..ef1a2eb6520b 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -1120,13 +1120,19 @@ static void sk_psock_strp_data_ready(struct sock *sk) int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock) { + int ret; + static const struct strp_callbacks cb = { .rcv_msg = sk_psock_strp_read, .read_sock_done = sk_psock_strp_read_done, .parse_msg = sk_psock_strp_parse, }; - return strp_init(&psock->strp, sk, &cb); + ret = strp_init(&psock->strp, sk, &cb); + if (!ret) + sk_psock_set_state(psock, SK_PSOCK_RX_STRP_ENABLED); + + return ret; } void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock) @@ -1154,7 +1160,7 @@ void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock) static void sk_psock_done_strp(struct sk_psock *psock) { /* Parser has been stopped */ - if (psock->progs.stream_parser) + if (sk_psock_test_state(psock, SK_PSOCK_RX_STRP_ENABLED)) strp_done(&psock->strp); } #else diff --git a/net/core/sock.c b/net/core/sock.c index 9370fd50aa2c..732fc37a4771 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -429,6 +429,7 @@ static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen, { struct __kernel_sock_timeval tv; int err = sock_copy_user_timeval(&tv, optval, optlen, old_timeval); + long val; if (err) return err; @@ -439,7 +440,7 @@ static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen, if (tv.tv_sec < 0) { static int warned __read_mostly; - *timeo_p = 0; + WRITE_ONCE(*timeo_p, 0); if (warned < 10 && net_ratelimit()) { warned++; pr_info("%s: `%s' (pid %d) tries to set negative timeout\n", @@ -447,11 +448,12 @@ static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen, } return 0; } - *timeo_p = MAX_SCHEDULE_TIMEOUT; - if (tv.tv_sec == 0 && tv.tv_usec == 0) - return 0; - if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) - *timeo_p = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec, USEC_PER_SEC / HZ); + val = MAX_SCHEDULE_TIMEOUT; + if ((tv.tv_sec || tv.tv_usec) && + (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1))) + val = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec, + USEC_PER_SEC / HZ); + WRITE_ONCE(*timeo_p, val); return 0; } @@ -804,7 +806,7 @@ EXPORT_SYMBOL(sock_no_linger); void sock_set_priority(struct sock *sk, u32 priority) { lock_sock(sk); - sk->sk_priority = priority; + WRITE_ONCE(sk->sk_priority, priority); release_sock(sk); } EXPORT_SYMBOL(sock_set_priority); @@ -813,9 +815,9 @@ void sock_set_sndtimeo(struct sock *sk, s64 secs) { lock_sock(sk); if (secs && secs < MAX_SCHEDULE_TIMEOUT / HZ - 1) - sk->sk_sndtimeo = secs * HZ; + WRITE_ONCE(sk->sk_sndtimeo, secs * HZ); else - sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; + WRITE_ONCE(sk->sk_sndtimeo, MAX_SCHEDULE_TIMEOUT); release_sock(sk); } EXPORT_SYMBOL(sock_set_sndtimeo); @@ -988,7 +990,7 @@ EXPORT_SYMBOL(sock_set_rcvbuf); static void __sock_set_mark(struct sock *sk, u32 val) { if (val != sk->sk_mark) { - sk->sk_mark = val; + WRITE_ONCE(sk->sk_mark, val); sk_dst_reset(sk); } } @@ -1007,7 +1009,7 @@ static void sock_release_reserved_memory(struct sock *sk, int bytes) bytes = round_down(bytes, PAGE_SIZE); WARN_ON(bytes > sk->sk_reserved_mem); - sk->sk_reserved_mem -= bytes; + WRITE_ONCE(sk->sk_reserved_mem, sk->sk_reserved_mem - bytes); sk_mem_reclaim(sk); } @@ -1044,7 +1046,8 @@ static int sock_reserve_memory(struct sock *sk, int bytes) } sk->sk_forward_alloc += pages << PAGE_SHIFT; - sk->sk_reserved_mem += pages << PAGE_SHIFT; + WRITE_ONCE(sk->sk_reserved_mem, + sk->sk_reserved_mem + (pages << PAGE_SHIFT)); return 0; } @@ -1213,7 +1216,7 @@ set_sndbuf: if ((val >= 0 && val <= 6) || sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) || sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) - sk->sk_priority = val; + WRITE_ONCE(sk->sk_priority, val); else ret = -EPERM; break; @@ -1438,7 +1441,8 @@ set_sndbuf: cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED); - sk->sk_max_pacing_rate = ulval; + /* Pairs with READ_ONCE() from sk_getsockopt() */ + WRITE_ONCE(sk->sk_max_pacing_rate, ulval); sk->sk_pacing_rate = min(sk->sk_pacing_rate, ulval); break; } @@ -1533,7 +1537,9 @@ set_sndbuf: } if ((u8)val == SOCK_TXREHASH_DEFAULT) val = READ_ONCE(sock_net(sk)->core.sysctl_txrehash); - /* Paired with READ_ONCE() in tcp_rtx_synack() */ + /* Paired with READ_ONCE() in tcp_rtx_synack() + * and sk_getsockopt(). + */ WRITE_ONCE(sk->sk_txrehash, (u8)val); break; @@ -1633,11 +1639,11 @@ int sk_getsockopt(struct sock *sk, int level, int optname, break; case SO_SNDBUF: - v.val = sk->sk_sndbuf; + v.val = READ_ONCE(sk->sk_sndbuf); break; case SO_RCVBUF: - v.val = sk->sk_rcvbuf; + v.val = READ_ONCE(sk->sk_rcvbuf); break; case SO_REUSEADDR: @@ -1679,7 +1685,7 @@ int sk_getsockopt(struct sock *sk, int level, int optname, break; case SO_PRIORITY: - v.val = sk->sk_priority; + v.val = READ_ONCE(sk->sk_priority); break; case SO_LINGER: @@ -1717,16 +1723,18 @@ int sk_getsockopt(struct sock *sk, int level, int optname, case SO_RCVTIMEO_OLD: case SO_RCVTIMEO_NEW: - lv = sock_get_timeout(sk->sk_rcvtimeo, &v, SO_RCVTIMEO_OLD == optname); + lv = sock_get_timeout(READ_ONCE(sk->sk_rcvtimeo), &v, + SO_RCVTIMEO_OLD == optname); break; case SO_SNDTIMEO_OLD: case SO_SNDTIMEO_NEW: - lv = sock_get_timeout(sk->sk_sndtimeo, &v, SO_SNDTIMEO_OLD == optname); + lv = sock_get_timeout(READ_ONCE(sk->sk_sndtimeo), &v, + SO_SNDTIMEO_OLD == optname); break; case SO_RCVLOWAT: - v.val = sk->sk_rcvlowat; + v.val = READ_ONCE(sk->sk_rcvlowat); break; case SO_SNDLOWAT: @@ -1770,7 +1778,7 @@ int sk_getsockopt(struct sock *sk, int level, int optname, spin_unlock(&sk->sk_peer_lock); if (!peer_pid) - return -ESRCH; + return -ENODATA; pidfd = pidfd_prepare(peer_pid, 0, &pidfd_file); put_pid(peer_pid); @@ -1843,7 +1851,7 @@ int sk_getsockopt(struct sock *sk, int level, int optname, optval, optlen, len); case SO_MARK: - v.val = sk->sk_mark; + v.val = READ_ONCE(sk->sk_mark); break; case SO_RCVMARK: @@ -1862,7 +1870,7 @@ int sk_getsockopt(struct sock *sk, int level, int optname, if (!sock->ops->set_peek_off) return -EOPNOTSUPP; - v.val = sk->sk_peek_off; + v.val = READ_ONCE(sk->sk_peek_off); break; case SO_NOFCS: v.val = sock_flag(sk, SOCK_NOFCS); @@ -1892,7 +1900,7 @@ int sk_getsockopt(struct sock *sk, int level, int optname, #ifdef CONFIG_NET_RX_BUSY_POLL case SO_BUSY_POLL: - v.val = sk->sk_ll_usec; + v.val = READ_ONCE(sk->sk_ll_usec); break; case SO_PREFER_BUSY_POLL: v.val = READ_ONCE(sk->sk_prefer_busy_poll); @@ -1900,12 +1908,14 @@ int sk_getsockopt(struct sock *sk, int level, int optname, #endif case SO_MAX_PACING_RATE: + /* The READ_ONCE() pair with the WRITE_ONCE() in sk_setsockopt() */ if (sizeof(v.ulval) != sizeof(v.val) && len >= sizeof(v.ulval)) { lv = sizeof(v.ulval); - v.ulval = sk->sk_max_pacing_rate; + v.ulval = READ_ONCE(sk->sk_max_pacing_rate); } else { /* 32bit version */ - v.val = min_t(unsigned long, sk->sk_max_pacing_rate, ~0U); + v.val = min_t(unsigned long, ~0U, + READ_ONCE(sk->sk_max_pacing_rate)); } break; @@ -1973,11 +1983,12 @@ int sk_getsockopt(struct sock *sk, int level, int optname, break; case SO_RESERVE_MEM: - v.val = sk->sk_reserved_mem; + v.val = READ_ONCE(sk->sk_reserved_mem); break; case SO_TXREHASH: - v.val = sk->sk_txrehash; + /* Paired with WRITE_ONCE() in sk_setsockopt() */ + v.val = READ_ONCE(sk->sk_txrehash); break; default: @@ -3168,7 +3179,7 @@ EXPORT_SYMBOL(__sk_mem_reclaim); int sk_set_peek_off(struct sock *sk, int val) { - sk->sk_peek_off = val; + WRITE_ONCE(sk->sk_peek_off, val); return 0; } EXPORT_SYMBOL_GPL(sk_set_peek_off); diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 19538d628714..8f07fea39d9e 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -115,7 +115,6 @@ static void sock_map_sk_acquire(struct sock *sk) __acquires(&sk->sk_lock.slock) { lock_sock(sk); - preempt_disable(); rcu_read_lock(); } @@ -123,7 +122,6 @@ static void sock_map_sk_release(struct sock *sk) __releases(&sk->sk_lock.slock) { rcu_read_unlock(); - preempt_enable(); release_sock(sk); } @@ -148,13 +146,13 @@ static void sock_map_del_link(struct sock *sk, list_for_each_entry_safe(link, tmp, &psock->link, list) { if (link->link_raw == link_raw) { struct bpf_map *map = link->map; - struct bpf_stab *stab = container_of(map, struct bpf_stab, - map); - if (psock->saved_data_ready && stab->progs.stream_parser) + struct sk_psock_progs *progs = sock_map_progs(map); + + if (psock->saved_data_ready && progs->stream_parser) strp_stop = true; - if (psock->saved_data_ready && stab->progs.stream_verdict) + if (psock->saved_data_ready && progs->stream_verdict) verdict_stop = true; - if (psock->saved_data_ready && stab->progs.skb_verdict) + if (psock->saved_data_ready && progs->skb_verdict) verdict_stop = true; list_del(&link->list); sk_psock_free_link(link); diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index c0c438128575..2e6b8c8fd2de 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -980,7 +980,7 @@ static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(data, DCB_BCN_ATTR_MAX, - tb[DCB_ATTR_BCN], dcbnl_pfc_up_nest, + tb[DCB_ATTR_BCN], dcbnl_bcn_nest, NULL); if (ret) return ret; diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 7249ef218178..d29d1163203d 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -238,8 +238,8 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req opt = ireq->ipv6_opt; if (!opt) opt = rcu_dereference(np->opt); - err = ip6_xmit(sk, skb, &fl6, sk->sk_mark, opt, np->tclass, - sk->sk_priority); + err = ip6_xmit(sk, skb, &fl6, READ_ONCE(sk->sk_mark), opt, + np->tclass, sk->sk_priority); rcu_read_unlock(); err = net_xmit_eval(err); } diff --git a/net/dccp/output.c b/net/dccp/output.c index b8a24734385e..fd2eb148d24d 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c @@ -187,7 +187,7 @@ unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu) /* And store cached results */ icsk->icsk_pmtu_cookie = pmtu; - dp->dccps_mss_cache = cur_mps; + WRITE_ONCE(dp->dccps_mss_cache, cur_mps); return cur_mps; } diff --git a/net/dccp/proto.c b/net/dccp/proto.c index f331e5977a84..4e3266e4d7c3 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -630,7 +630,7 @@ static int do_dccp_getsockopt(struct sock *sk, int level, int optname, return dccp_getsockopt_service(sk, len, (__be32 __user *)optval, optlen); case DCCP_SOCKOPT_GET_CUR_MPS: - val = dp->dccps_mss_cache; + val = READ_ONCE(dp->dccps_mss_cache); break; case DCCP_SOCKOPT_AVAILABLE_CCIDS: return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen); @@ -739,7 +739,7 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) trace_dccp_probe(sk, len); - if (len > dp->dccps_mss_cache) + if (len > READ_ONCE(dp->dccps_mss_cache)) return -EMSGSIZE; lock_sock(sk); @@ -772,6 +772,12 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) goto out_discard; } + /* We need to check dccps_mss_cache after socket is locked. */ + if (len > dp->dccps_mss_cache) { + rc = -EMSGSIZE; + goto out_discard; + } + skb_reserve(skb, sk->sk_prot->max_header); rc = memcpy_from_msg(skb_put(skb, len), msg, len); if (rc != 0) diff --git a/net/dsa/port.c b/net/dsa/port.c index 0ce8fd311c78..2f6195d7b741 100644 --- a/net/dsa/port.c +++ b/net/dsa/port.c @@ -1727,8 +1727,15 @@ int dsa_port_phylink_create(struct dsa_port *dp) ds->ops->phylink_mac_an_restart) dp->pl_config.legacy_pre_march2020 = true; - if (ds->ops->phylink_get_caps) + if (ds->ops->phylink_get_caps) { ds->ops->phylink_get_caps(ds, dp->index, &dp->pl_config); + } else { + /* For legacy drivers */ + __set_bit(PHY_INTERFACE_MODE_INTERNAL, + dp->pl_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_GMII, + dp->pl_config.supported_interfaces); + } pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn), mode, &dsa_port_phylink_mac_ops); diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index b812eb36f0e3..f7426926a104 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -150,7 +150,7 @@ int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb, } #endif - if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, sk->sk_mark)) + if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, READ_ONCE(sk->sk_mark))) goto errout; if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) || @@ -799,7 +799,7 @@ int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk) entry.ifindex = sk->sk_bound_dev_if; entry.userlocks = sk_fullsock(sk) ? sk->sk_userlocks : 0; if (sk_fullsock(sk)) - entry.mark = sk->sk_mark; + entry.mark = READ_ONCE(sk->sk_mark); else if (sk->sk_state == TCP_NEW_SYN_RECV) entry.mark = inet_rsk(inet_reqsk(sk))->ir_mark; else if (sk->sk_state == TCP_TIME_WAIT) diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 6e70839257f7..6ba1a0fafbaa 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -184,9 +184,9 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk, ip_options_build(skb, &opt->opt, daddr, rt); } - skb->priority = sk->sk_priority; + skb->priority = READ_ONCE(sk->sk_priority); if (!skb->mark) - skb->mark = sk->sk_mark; + skb->mark = READ_ONCE(sk->sk_mark); /* Send it out. */ return ip_local_out(net, skb->sk, skb); @@ -528,8 +528,8 @@ packet_routed: skb_shinfo(skb)->gso_segs ?: 1); /* TODO : should we use skb->sk here instead of sk ? */ - skb->priority = sk->sk_priority; - skb->mark = sk->sk_mark; + skb->priority = READ_ONCE(sk->sk_priority); + skb->mark = READ_ONCE(sk->sk_mark); res = ip_local_out(net, sk, skb); rcu_read_unlock(); @@ -1158,10 +1158,15 @@ alloc_new_skb: } copy = datalen - transhdrlen - fraggap - pagedlen; + /* [!] NOTE: copy will be negative if pagedlen>0 + * because then the equation reduces to -fraggap. + */ if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) { err = -EFAULT; kfree_skb(skb); goto error; + } else if (flags & MSG_SPLICE_PAGES) { + copy = 0; } offset += copy; @@ -1209,6 +1214,10 @@ alloc_new_skb: } else if (flags & MSG_SPLICE_PAGES) { struct msghdr *msg = from; + err = -EIO; + if (WARN_ON_ONCE(copy > msg->msg_iter.count)) + goto error; + err = skb_splice_from_iter(skb, &msg->msg_iter, copy, sk->sk_allocation); if (err < 0) diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 8e97d8d4cc9d..d41bce8927b2 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -592,7 +592,7 @@ void __ip_sock_set_tos(struct sock *sk, int val) } if (inet_sk(sk)->tos != val) { inet_sk(sk)->tos = val; - sk->sk_priority = rt_tos2priority(val); + WRITE_ONCE(sk->sk_priority, rt_tos2priority(val)); sk_dst_reset(sk); } } diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 92c02c886fe7..586b1b3e35b8 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -224,7 +224,7 @@ static int iptunnel_pmtud_build_icmp(struct sk_buff *skb, int mtu) .un.frag.__unused = 0, .un.frag.mtu = htons(mtu), }; - icmph->checksum = ip_compute_csum(icmph, len); + icmph->checksum = csum_fold(skb_checksum(skb, 0, len, 0)); skb_reset_transport_header(skb); niph = skb_push(skb, sizeof(*niph)); diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c index f95142e56da0..be5498f5dd31 100644 --- a/net/ipv4/nexthop.c +++ b/net/ipv4/nexthop.c @@ -3221,13 +3221,9 @@ static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb) &rtm_dump_nexthop_cb, &filter); if (err < 0) { if (likely(skb->len)) - goto out; - goto out_err; + err = skb->len; } -out: - err = skb->len; -out_err: cb->seq = net->nexthop.seq; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); return err; @@ -3367,25 +3363,19 @@ static int rtm_dump_nexthop_bucket_nh(struct sk_buff *skb, dd->filter.res_bucket_nh_id != nhge->nh->id) continue; + dd->ctx->bucket_index = bucket_index; err = nh_fill_res_bucket(skb, nh, bucket, bucket_index, RTM_NEWNEXTHOPBUCKET, portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->extack); - if (err < 0) { - if (likely(skb->len)) - goto out; - goto out_err; - } + if (err) + return err; } dd->ctx->done_nh_idx = dd->ctx->nh.idx + 1; - bucket_index = 0; + dd->ctx->bucket_index = 0; -out: - err = skb->len; -out_err: - dd->ctx->bucket_index = bucket_index; - return err; + return 0; } static int rtm_dump_nexthop_bucket_cb(struct sk_buff *skb, @@ -3434,13 +3424,9 @@ static int rtm_dump_nexthop_bucket(struct sk_buff *skb, if (err < 0) { if (likely(skb->len)) - goto out; - goto out_err; + err = skb->len; } -out: - err = skb->len; -out_err: cb->seq = net->nexthop.seq; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); return err; diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 7782ff5e6539..cb381f5aa464 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -348,7 +348,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, goto error; skb_reserve(skb, hlen); - skb->priority = sk->sk_priority; + skb->priority = READ_ONCE(sk->sk_priority); skb->mark = sockc->mark; skb->tstamp = sockc->transmit_time; skb_dst_set(skb, &rt->dst); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 98d7e6ba7493..92fede388d52 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -518,7 +518,7 @@ static void __build_flow_key(const struct net *net, struct flowi4 *fl4, const struct inet_sock *inet = inet_sk(sk); oif = sk->sk_bound_dev_if; - mark = sk->sk_mark; + mark = READ_ONCE(sk->sk_mark); tos = ip_sock_rt_tos(sk); scope = ip_sock_rt_scope(sk); prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol; @@ -552,7 +552,7 @@ static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk) inet_opt = rcu_dereference(inet->inet_opt); if (inet_opt && inet_opt->opt.srr) daddr = inet_opt->opt.faddr; - flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, + flowi4_init_output(fl4, sk->sk_bound_dev_if, READ_ONCE(sk->sk_mark), ip_sock_rt_tos(sk) & IPTOS_RT_MASK, ip_sock_rt_scope(sk), inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 069642014636..a59cc4b83861 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -931,9 +931,9 @@ static void tcp_v4_send_ack(const struct sock *sk, ctl_sk = this_cpu_read(ipv4_tcp_sk); sock_net_set(ctl_sk, net); ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ? - inet_twsk(sk)->tw_mark : sk->sk_mark; + inet_twsk(sk)->tw_mark : READ_ONCE(sk->sk_mark); ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ? - inet_twsk(sk)->tw_priority : sk->sk_priority; + inet_twsk(sk)->tw_priority : READ_ONCE(sk->sk_priority); transmit_time = tcp_transmit_time(sk); ip_send_unicast_reply(ctl_sk, skb, &TCP_SKB_CB(skb)->header.h4.opt, diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index 82f4575f9cd9..99ac5efe244d 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -40,7 +40,7 @@ struct tcp_fastopen_metrics { struct tcp_metrics_block { struct tcp_metrics_block __rcu *tcpm_next; - possible_net_t tcpm_net; + struct net *tcpm_net; struct inetpeer_addr tcpm_saddr; struct inetpeer_addr tcpm_daddr; unsigned long tcpm_stamp; @@ -51,34 +51,38 @@ struct tcp_metrics_block { struct rcu_head rcu_head; }; -static inline struct net *tm_net(struct tcp_metrics_block *tm) +static inline struct net *tm_net(const struct tcp_metrics_block *tm) { - return read_pnet(&tm->tcpm_net); + /* Paired with the WRITE_ONCE() in tcpm_new() */ + return READ_ONCE(tm->tcpm_net); } static bool tcp_metric_locked(struct tcp_metrics_block *tm, enum tcp_metric_index idx) { - return tm->tcpm_lock & (1 << idx); + /* Paired with WRITE_ONCE() in tcpm_suck_dst() */ + return READ_ONCE(tm->tcpm_lock) & (1 << idx); } -static u32 tcp_metric_get(struct tcp_metrics_block *tm, +static u32 tcp_metric_get(const struct tcp_metrics_block *tm, enum tcp_metric_index idx) { - return tm->tcpm_vals[idx]; + /* Paired with WRITE_ONCE() in tcp_metric_set() */ + return READ_ONCE(tm->tcpm_vals[idx]); } static void tcp_metric_set(struct tcp_metrics_block *tm, enum tcp_metric_index idx, u32 val) { - tm->tcpm_vals[idx] = val; + /* Paired with READ_ONCE() in tcp_metric_get() */ + WRITE_ONCE(tm->tcpm_vals[idx], val); } static bool addr_same(const struct inetpeer_addr *a, const struct inetpeer_addr *b) { - return inetpeer_addr_cmp(a, b) == 0; + return (a->family == b->family) && !inetpeer_addr_cmp(a, b); } struct tcpm_hash_bucket { @@ -89,6 +93,7 @@ static struct tcpm_hash_bucket *tcp_metrics_hash __read_mostly; static unsigned int tcp_metrics_hash_log __read_mostly; static DEFINE_SPINLOCK(tcp_metrics_lock); +static DEFINE_SEQLOCK(fastopen_seqlock); static void tcpm_suck_dst(struct tcp_metrics_block *tm, const struct dst_entry *dst, @@ -97,7 +102,7 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm, u32 msval; u32 val; - tm->tcpm_stamp = jiffies; + WRITE_ONCE(tm->tcpm_stamp, jiffies); val = 0; if (dst_metric_locked(dst, RTAX_RTT)) @@ -110,30 +115,42 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm, val |= 1 << TCP_METRIC_CWND; if (dst_metric_locked(dst, RTAX_REORDERING)) val |= 1 << TCP_METRIC_REORDERING; - tm->tcpm_lock = val; + /* Paired with READ_ONCE() in tcp_metric_locked() */ + WRITE_ONCE(tm->tcpm_lock, val); msval = dst_metric_raw(dst, RTAX_RTT); - tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC; + tcp_metric_set(tm, TCP_METRIC_RTT, msval * USEC_PER_MSEC); msval = dst_metric_raw(dst, RTAX_RTTVAR); - tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC; - tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH); - tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND); - tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING); + tcp_metric_set(tm, TCP_METRIC_RTTVAR, msval * USEC_PER_MSEC); + tcp_metric_set(tm, TCP_METRIC_SSTHRESH, + dst_metric_raw(dst, RTAX_SSTHRESH)); + tcp_metric_set(tm, TCP_METRIC_CWND, + dst_metric_raw(dst, RTAX_CWND)); + tcp_metric_set(tm, TCP_METRIC_REORDERING, + dst_metric_raw(dst, RTAX_REORDERING)); if (fastopen_clear) { + write_seqlock(&fastopen_seqlock); tm->tcpm_fastopen.mss = 0; tm->tcpm_fastopen.syn_loss = 0; tm->tcpm_fastopen.try_exp = 0; tm->tcpm_fastopen.cookie.exp = false; tm->tcpm_fastopen.cookie.len = 0; + write_sequnlock(&fastopen_seqlock); } } #define TCP_METRICS_TIMEOUT (60 * 60 * HZ) -static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst) +static void tcpm_check_stamp(struct tcp_metrics_block *tm, + const struct dst_entry *dst) { - if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT))) + unsigned long limit; + + if (!tm) + return; + limit = READ_ONCE(tm->tcpm_stamp) + TCP_METRICS_TIMEOUT; + if (unlikely(time_after(jiffies, limit))) tcpm_suck_dst(tm, dst, false); } @@ -174,20 +191,23 @@ static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst, oldest = deref_locked(tcp_metrics_hash[hash].chain); for (tm = deref_locked(oldest->tcpm_next); tm; tm = deref_locked(tm->tcpm_next)) { - if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp)) + if (time_before(READ_ONCE(tm->tcpm_stamp), + READ_ONCE(oldest->tcpm_stamp))) oldest = tm; } tm = oldest; } else { - tm = kmalloc(sizeof(*tm), GFP_ATOMIC); + tm = kzalloc(sizeof(*tm), GFP_ATOMIC); if (!tm) goto out_unlock; } - write_pnet(&tm->tcpm_net, net); + /* Paired with the READ_ONCE() in tm_net() */ + WRITE_ONCE(tm->tcpm_net, net); + tm->tcpm_saddr = *saddr; tm->tcpm_daddr = *daddr; - tcpm_suck_dst(tm, dst, true); + tcpm_suck_dst(tm, dst, reclaim); if (likely(!reclaim)) { tm->tcpm_next = tcp_metrics_hash[hash].chain; @@ -434,7 +454,7 @@ void tcp_update_metrics(struct sock *sk) tp->reordering); } } - tm->tcpm_stamp = jiffies; + WRITE_ONCE(tm->tcpm_stamp, jiffies); out_unlock: rcu_read_unlock(); } @@ -539,8 +559,6 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst) return ret; } -static DEFINE_SEQLOCK(fastopen_seqlock); - void tcp_fastopen_cache_get(struct sock *sk, u16 *mss, struct tcp_fastopen_cookie *cookie) { @@ -647,7 +665,7 @@ static int tcp_metrics_fill_info(struct sk_buff *msg, } if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE, - jiffies - tm->tcpm_stamp, + jiffies - READ_ONCE(tm->tcpm_stamp), TCP_METRICS_ATTR_PAD) < 0) goto nla_put_failure; @@ -658,7 +676,7 @@ static int tcp_metrics_fill_info(struct sk_buff *msg, if (!nest) goto nla_put_failure; for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) { - u32 val = tm->tcpm_vals[i]; + u32 val = tcp_metric_get(tm, i); if (!val) continue; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 42a96b3547c9..abfa860367aa 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -114,6 +114,7 @@ #include <net/sock_reuseport.h> #include <net/addrconf.h> #include <net/udp_tunnel.h> +#include <net/gro.h> #if IS_ENABLED(CONFIG_IPV6) #include <net/ipv6_stubs.h> #endif @@ -555,10 +556,13 @@ struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb, { const struct iphdr *iph = ip_hdr(skb); struct net *net = dev_net(skb->dev); + int iif, sdif; + + inet_get_iif_sdif(skb, &iif, &sdif); return __udp4_lib_lookup(net, iph->saddr, sport, - iph->daddr, dport, inet_iif(skb), - inet_sdif(skb), net->ipv4.udp_table, NULL); + iph->daddr, dport, iif, + sdif, net->ipv4.udp_table, NULL); } /* Must be called under rcu_read_lock(). diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index f402946da344..0f46b3c2e4ac 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -609,10 +609,13 @@ static struct sock *udp4_gro_lookup_skb(struct sk_buff *skb, __be16 sport, { const struct iphdr *iph = skb_gro_network_header(skb); struct net *net = dev_net(skb->dev); + int iif, sdif; + + inet_get_iif_sdif(skb, &iif, &sdif); return __udp4_lib_lookup(net, iph->saddr, sport, - iph->daddr, dport, inet_iif(skb), - inet_sdif(skb), net->ipv4.udp_table, NULL); + iph->daddr, dport, iif, + sdif, net->ipv4.udp_table, NULL); } INDIRECT_CALLABLE_SCOPE diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index cc3d5ad17257..67a3b8f6e72b 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1073,7 +1073,7 @@ static int ip6mr_cache_report(const struct mr_table *mrt, struct sk_buff *pkt, And all this only to mangle msg->im6_msgtype and to set msg->im6_mbz to "mbz" :-) */ - skb_push(skb, -skb_network_offset(pkt)); + __skb_pull(skb, skb_network_offset(pkt)); skb_push(skb, sizeof(*msg)); skb_reset_transport_header(skb); diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 18634ebd20a4..a42be96ae209 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -197,7 +197,8 @@ static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur, static inline int ndisc_is_useropt(const struct net_device *dev, struct nd_opt_hdr *opt) { - return opt->nd_opt_type == ND_OPT_RDNSS || + return opt->nd_opt_type == ND_OPT_PREFIX_INFO || + opt->nd_opt_type == ND_OPT_RDNSS || opt->nd_opt_type == ND_OPT_DNSSL || opt->nd_opt_type == ND_OPT_CAPTIVE_PORTAL || opt->nd_opt_type == ND_OPT_PREF64 || diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index f804c11e2146..c2c291827a2c 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c @@ -120,7 +120,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) ipcm6_init_sk(&ipc6, np); ipc6.sockc.tsflags = sk->sk_tsflags; - ipc6.sockc.mark = sk->sk_mark; + ipc6.sockc.mark = READ_ONCE(sk->sk_mark); fl6.flowi6_oif = oif; diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index ac1cef094c5f..49381f35b623 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -614,7 +614,7 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length, skb_reserve(skb, hlen); skb->protocol = htons(ETH_P_IPV6); - skb->priority = sk->sk_priority; + skb->priority = READ_ONCE(sk->sk_priority); skb->mark = sockc->mark; skb->tstamp = sockc->transmit_time; @@ -774,12 +774,12 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) */ memset(&fl6, 0, sizeof(fl6)); - fl6.flowi6_mark = sk->sk_mark; + fl6.flowi6_mark = READ_ONCE(sk->sk_mark); fl6.flowi6_uid = sk->sk_uid; ipcm6_init(&ipc6); ipc6.sockc.tsflags = sk->sk_tsflags; - ipc6.sockc.mark = sk->sk_mark; + ipc6.sockc.mark = fl6.flowi6_mark; if (sin6) { if (addr_len < SIN6_LEN_RFC2133) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 64e873f5895f..56a55585eb79 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2951,7 +2951,8 @@ void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu) if (!oif && skb->dev) oif = l3mdev_master_ifindex(skb->dev); - ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid); + ip6_update_pmtu(skb, sock_net(sk), mtu, oif, READ_ONCE(sk->sk_mark), + sk->sk_uid); dst = __sk_dst_get(sk); if (!dst || !dst->obsolete || @@ -3172,8 +3173,8 @@ void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif) void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk) { - ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark, - sk->sk_uid); + ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, + READ_ONCE(sk->sk_mark), sk->sk_uid); } EXPORT_SYMBOL_GPL(ip6_sk_redirect); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 4714eb695913..6e86721e1cdb 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -564,8 +564,8 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, opt = ireq->ipv6_opt; if (!opt) opt = rcu_dereference(np->opt); - err = ip6_xmit(sk, skb, fl6, skb->mark ? : sk->sk_mark, opt, - tclass, sk->sk_priority); + err = ip6_xmit(sk, skb, fl6, skb->mark ? : READ_ONCE(sk->sk_mark), + opt, tclass, sk->sk_priority); rcu_read_unlock(); err = net_xmit_eval(err); } @@ -939,7 +939,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 if (sk->sk_state == TCP_TIME_WAIT) mark = inet_twsk(sk)->tw_mark; else - mark = sk->sk_mark; + mark = READ_ONCE(sk->sk_mark); skb_set_delivery_time(buff, tcp_transmit_time(sk), true); } if (txhash) { @@ -1128,7 +1128,8 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, tcp_time_stamp_raw() + tcp_rsk(req)->ts_off, READ_ONCE(req->ts_recent), sk->sk_bound_dev_if, tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index), - ipv6_get_dsfield(ipv6_hdr(skb)), 0, sk->sk_priority, + ipv6_get_dsfield(ipv6_hdr(skb)), 0, + READ_ONCE(sk->sk_priority), READ_ONCE(tcp_rsk(req)->txhash)); } diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index b7c972aa09a7..f787e6b8424c 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -51,6 +51,7 @@ #include <net/inet6_hashtables.h> #include <net/busy_poll.h> #include <net/sock_reuseport.h> +#include <net/gro.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> @@ -300,10 +301,13 @@ struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb, { const struct ipv6hdr *iph = ipv6_hdr(skb); struct net *net = dev_net(skb->dev); + int iif, sdif; + + inet6_get_iif_sdif(skb, &iif, &sdif); return __udp6_lib_lookup(net, &iph->saddr, sport, - &iph->daddr, dport, inet6_iif(skb), - inet6_sdif(skb), net->ipv4.udp_table, NULL); + &iph->daddr, dport, iif, + sdif, net->ipv4.udp_table, NULL); } /* Must be called under rcu_read_lock(). @@ -624,7 +628,7 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, if (type == NDISC_REDIRECT) { if (tunnel) { ip6_redirect(skb, sock_net(sk), inet6_iif(skb), - sk->sk_mark, sk->sk_uid); + READ_ONCE(sk->sk_mark), sk->sk_uid); } else { ip6_sk_redirect(skb, sk); } @@ -1356,7 +1360,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) ipcm6_init(&ipc6); ipc6.gso_size = READ_ONCE(up->gso_size); ipc6.sockc.tsflags = sk->sk_tsflags; - ipc6.sockc.mark = sk->sk_mark; + ipc6.sockc.mark = READ_ONCE(sk->sk_mark); /* destination address check */ if (sin6) { diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index 09fa7a42cb93..6b95ba241ebe 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -118,10 +118,13 @@ static struct sock *udp6_gro_lookup_skb(struct sk_buff *skb, __be16 sport, { const struct ipv6hdr *iph = skb_gro_network_header(skb); struct net *net = dev_net(skb->dev); + int iif, sdif; + + inet6_get_iif_sdif(skb, &iif, &sdif); return __udp6_lib_lookup(net, &iph->saddr, sport, - &iph->daddr, dport, inet6_iif(skb), - inet6_sdif(skb), net->ipv4.udp_table, NULL); + &iph->daddr, dport, iif, + sdif, net->ipv4.udp_table, NULL); } INDIRECT_CALLABLE_SCOPE diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index b1623f9c4f92..ff78217f0cb1 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -519,7 +519,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) /* Get and verify the address */ memset(&fl6, 0, sizeof(fl6)); - fl6.flowi6_mark = sk->sk_mark; + fl6.flowi6_mark = READ_ONCE(sk->sk_mark); fl6.flowi6_uid = sk->sk_uid; ipcm6_init(&ipc6); diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 3317d1cca156..d80658547836 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -2335,7 +2335,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, lock_sock_nested(ssk, SINGLE_DEPTH_NESTING); - if (flags & MPTCP_CF_FASTCLOSE) { + if ((flags & MPTCP_CF_FASTCLOSE) && !__mptcp_check_fallback(msk)) { /* be sure to force the tcp_disconnect() path, * to generate the egress reset */ @@ -3328,7 +3328,7 @@ static void mptcp_release_cb(struct sock *sk) if (__test_and_clear_bit(MPTCP_CLEAN_UNA, &msk->cb_flags)) __mptcp_clean_una_wakeup(sk); - if (unlikely(&msk->cb_flags)) { + if (unlikely(msk->cb_flags)) { /* be sure to set the current sk state before tacking actions * depending on sk_state, that is processing MPTCP_ERROR_REPORT */ diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 37fbe22e2433..ba2a873a4d2e 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -325,7 +325,6 @@ struct mptcp_sock { u32 subflow_id; u32 setsockopt_seq; char ca_name[TCP_CA_NAME_MAX]; - struct mptcp_sock *dl_next; }; #define mptcp_data_lock(sk) spin_lock_bh(&(sk)->sk_lock.slock) diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c index 63f7a09335c5..a3f1fe810cc9 100644 --- a/net/mptcp/sockopt.c +++ b/net/mptcp/sockopt.c @@ -103,7 +103,7 @@ static void mptcp_sol_socket_sync_intval(struct mptcp_sock *msk, int optname, in break; case SO_MARK: if (READ_ONCE(ssk->sk_mark) != sk->sk_mark) { - ssk->sk_mark = sk->sk_mark; + WRITE_ONCE(ssk->sk_mark, sk->sk_mark); sk_dst_reset(ssk); } break; diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 9ee3b7abbaf6..94ae7dd01c65 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -1793,16 +1793,31 @@ static void subflow_state_change(struct sock *sk) void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk) { struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue; - struct mptcp_sock *msk, *next, *head = NULL; - struct request_sock *req; - struct sock *sk; + struct request_sock *req, *head, *tail; + struct mptcp_subflow_context *subflow; + struct sock *sk, *ssk; - /* build a list of all unaccepted mptcp sockets */ + /* Due to lock dependencies no relevant lock can be acquired under rskq_lock. + * Splice the req list, so that accept() can not reach the pending ssk after + * the listener socket is released below. + */ spin_lock_bh(&queue->rskq_lock); - for (req = queue->rskq_accept_head; req; req = req->dl_next) { - struct mptcp_subflow_context *subflow; - struct sock *ssk = req->sk; + head = queue->rskq_accept_head; + tail = queue->rskq_accept_tail; + queue->rskq_accept_head = NULL; + queue->rskq_accept_tail = NULL; + spin_unlock_bh(&queue->rskq_lock); + + if (!head) + return; + /* can't acquire the msk socket lock under the subflow one, + * or will cause ABBA deadlock + */ + release_sock(listener_ssk); + + for (req = head; req; req = req->dl_next) { + ssk = req->sk; if (!sk_is_mptcp(ssk)) continue; @@ -1810,32 +1825,10 @@ void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_s if (!subflow || !subflow->conn) continue; - /* skip if already in list */ sk = subflow->conn; - msk = mptcp_sk(sk); - if (msk->dl_next || msk == head) - continue; - sock_hold(sk); - msk->dl_next = head; - head = msk; - } - spin_unlock_bh(&queue->rskq_lock); - if (!head) - return; - - /* can't acquire the msk socket lock under the subflow one, - * or will cause ABBA deadlock - */ - release_sock(listener_ssk); - - for (msk = head; msk; msk = next) { - sk = (struct sock *)msk; lock_sock_nested(sk, SINGLE_DEPTH_NESTING); - next = msk->dl_next; - msk->dl_next = NULL; - __mptcp_unaccepted_force_close(sk); release_sock(sk); @@ -1859,6 +1852,13 @@ void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_s /* we are still under the listener msk socket lock */ lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING); + + /* restore the listener queue, to let the TCP code clean it up */ + spin_lock_bh(&queue->rskq_lock); + WARN_ON_ONCE(queue->rskq_accept_head); + queue->rskq_accept_head = head; + queue->rskq_accept_tail = tail; + spin_unlock_bh(&queue->rskq_lock); } static int subflow_ulp_init(struct sock *sk) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index d3c6ecd1f5a6..c62227ae7746 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -31,7 +31,9 @@ static LIST_HEAD(nf_tables_expressions); static LIST_HEAD(nf_tables_objects); static LIST_HEAD(nf_tables_flowtables); static LIST_HEAD(nf_tables_destroy_list); +static LIST_HEAD(nf_tables_gc_list); static DEFINE_SPINLOCK(nf_tables_destroy_list_lock); +static DEFINE_SPINLOCK(nf_tables_gc_list_lock); enum { NFT_VALIDATE_SKIP = 0, @@ -120,6 +122,9 @@ static void nft_validate_state_update(struct nft_table *table, u8 new_validate_s static void nf_tables_trans_destroy_work(struct work_struct *w); static DECLARE_WORK(trans_destroy_work, nf_tables_trans_destroy_work); +static void nft_trans_gc_work(struct work_struct *work); +static DECLARE_WORK(trans_gc_work, nft_trans_gc_work); + static void nft_ctx_init(struct nft_ctx *ctx, struct net *net, const struct sk_buff *skb, @@ -582,10 +587,6 @@ static int nft_trans_set_add(const struct nft_ctx *ctx, int msg_type, return __nft_trans_set_add(ctx, msg_type, set, NULL); } -static void nft_setelem_data_deactivate(const struct net *net, - const struct nft_set *set, - struct nft_set_elem *elem); - static int nft_mapelem_deactivate(const struct nft_ctx *ctx, struct nft_set *set, const struct nft_set_iter *iter, @@ -5055,6 +5056,7 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info, INIT_LIST_HEAD(&set->bindings); INIT_LIST_HEAD(&set->catchall_list); + refcount_set(&set->refs, 1); set->table = table; write_pnet(&set->net, net); set->ops = ops; @@ -5122,6 +5124,14 @@ static void nft_set_catchall_destroy(const struct nft_ctx *ctx, } } +static void nft_set_put(struct nft_set *set) +{ + if (refcount_dec_and_test(&set->refs)) { + kfree(set->name); + kvfree(set); + } +} + static void nft_set_destroy(const struct nft_ctx *ctx, struct nft_set *set) { int i; @@ -5134,8 +5144,7 @@ static void nft_set_destroy(const struct nft_ctx *ctx, struct nft_set *set) set->ops->destroy(ctx, set); nft_set_catchall_destroy(ctx, set); - kfree(set->name); - kvfree(set); + nft_set_put(set); } static int nf_tables_delset(struct sk_buff *skb, const struct nfnl_info *info, @@ -5602,8 +5611,12 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx, const struct nft_set_iter *iter, struct nft_set_elem *elem) { + const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); struct nft_set_dump_args *args; + if (nft_set_elem_expired(ext)) + return 0; + args = container_of(iter, struct nft_set_dump_args, iter); return nf_tables_fill_setelem(args->skb, set, elem, args->reset); } @@ -6274,7 +6287,8 @@ struct nft_set_ext *nft_set_catchall_lookup(const struct net *net, list_for_each_entry_rcu(catchall, &set->catchall_list, list) { ext = nft_set_elem_ext(set, catchall->elem); if (nft_set_elem_active(ext, genmask) && - !nft_set_elem_expired(ext)) + !nft_set_elem_expired(ext) && + !nft_set_elem_is_dead(ext)) return ext; } @@ -6282,29 +6296,6 @@ struct nft_set_ext *nft_set_catchall_lookup(const struct net *net, } EXPORT_SYMBOL_GPL(nft_set_catchall_lookup); -void *nft_set_catchall_gc(const struct nft_set *set) -{ - struct nft_set_elem_catchall *catchall, *next; - struct nft_set_ext *ext; - void *elem = NULL; - - list_for_each_entry_safe(catchall, next, &set->catchall_list, list) { - ext = nft_set_elem_ext(set, catchall->elem); - - if (!nft_set_elem_expired(ext) || - nft_set_elem_mark_busy(ext)) - continue; - - elem = catchall->elem; - list_del_rcu(&catchall->list); - kfree_rcu(catchall, rcu); - break; - } - - return elem; -} -EXPORT_SYMBOL_GPL(nft_set_catchall_gc); - static int nft_setelem_catchall_insert(const struct net *net, struct nft_set *set, const struct nft_set_elem *elem, @@ -6366,7 +6357,6 @@ static void nft_setelem_activate(struct net *net, struct nft_set *set, if (nft_setelem_is_catchall(set, elem)) { nft_set_elem_change_active(net, set, ext); - nft_set_elem_clear_busy(ext); } else { set->ops->activate(net, set, elem); } @@ -6381,8 +6371,7 @@ static int nft_setelem_catchall_deactivate(const struct net *net, list_for_each_entry(catchall, &set->catchall_list, list) { ext = nft_set_elem_ext(set, catchall->elem); - if (!nft_is_active(net, ext) || - nft_set_elem_mark_busy(ext)) + if (!nft_is_active(net, ext)) continue; kfree(elem->priv); @@ -6777,7 +6766,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, goto err_elem_free; } - ext->genmask = nft_genmask_cur(ctx->net) | NFT_SET_ELEM_BUSY_MASK; + ext->genmask = nft_genmask_cur(ctx->net); err = nft_setelem_insert(ctx->net, set, &elem, &ext2, flags); if (err) { @@ -6929,9 +6918,9 @@ static void nft_setelem_data_activate(const struct net *net, nft_use_inc_restore(&(*nft_set_ext_obj(ext))->use); } -static void nft_setelem_data_deactivate(const struct net *net, - const struct nft_set *set, - struct nft_set_elem *elem) +void nft_setelem_data_deactivate(const struct net *net, + const struct nft_set *set, + struct nft_set_elem *elem) { const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); @@ -7095,8 +7084,7 @@ static int nft_set_catchall_flush(const struct nft_ctx *ctx, list_for_each_entry_rcu(catchall, &set->catchall_list, list) { ext = nft_set_elem_ext(set, catchall->elem); - if (!nft_set_elem_active(ext, genmask) || - nft_set_elem_mark_busy(ext)) + if (!nft_set_elem_active(ext, genmask)) continue; elem.priv = catchall->elem; @@ -7170,29 +7158,6 @@ static int nf_tables_delsetelem(struct sk_buff *skb, return err; } -void nft_set_gc_batch_release(struct rcu_head *rcu) -{ - struct nft_set_gc_batch *gcb; - unsigned int i; - - gcb = container_of(rcu, struct nft_set_gc_batch, head.rcu); - for (i = 0; i < gcb->head.cnt; i++) - nft_set_elem_destroy(gcb->head.set, gcb->elems[i], true); - kfree(gcb); -} - -struct nft_set_gc_batch *nft_set_gc_batch_alloc(const struct nft_set *set, - gfp_t gfp) -{ - struct nft_set_gc_batch *gcb; - - gcb = kzalloc(sizeof(*gcb), gfp); - if (gcb == NULL) - return gcb; - gcb->head.set = set; - return gcb; -} - /* * Stateful objects */ @@ -9414,6 +9379,207 @@ void nft_chain_del(struct nft_chain *chain) list_del_rcu(&chain->list); } +static void nft_trans_gc_setelem_remove(struct nft_ctx *ctx, + struct nft_trans_gc *trans) +{ + void **priv = trans->priv; + unsigned int i; + + for (i = 0; i < trans->count; i++) { + struct nft_set_elem elem = { + .priv = priv[i], + }; + + nft_setelem_data_deactivate(ctx->net, trans->set, &elem); + nft_setelem_remove(ctx->net, trans->set, &elem); + } +} + +void nft_trans_gc_destroy(struct nft_trans_gc *trans) +{ + nft_set_put(trans->set); + put_net(trans->net); + kfree(trans); +} + +static void nft_trans_gc_trans_free(struct rcu_head *rcu) +{ + struct nft_set_elem elem = {}; + struct nft_trans_gc *trans; + struct nft_ctx ctx = {}; + unsigned int i; + + trans = container_of(rcu, struct nft_trans_gc, rcu); + ctx.net = read_pnet(&trans->set->net); + + for (i = 0; i < trans->count; i++) { + elem.priv = trans->priv[i]; + if (!nft_setelem_is_catchall(trans->set, &elem)) + atomic_dec(&trans->set->nelems); + + nf_tables_set_elem_destroy(&ctx, trans->set, elem.priv); + } + + nft_trans_gc_destroy(trans); +} + +static bool nft_trans_gc_work_done(struct nft_trans_gc *trans) +{ + struct nftables_pernet *nft_net; + struct nft_ctx ctx = {}; + + nft_net = nft_pernet(trans->net); + + mutex_lock(&nft_net->commit_mutex); + + /* Check for race with transaction, otherwise this batch refers to + * stale objects that might not be there anymore. Skip transaction if + * set has been destroyed from control plane transaction in case gc + * worker loses race. + */ + if (READ_ONCE(nft_net->gc_seq) != trans->seq || trans->set->dead) { + mutex_unlock(&nft_net->commit_mutex); + return false; + } + + ctx.net = trans->net; + ctx.table = trans->set->table; + + nft_trans_gc_setelem_remove(&ctx, trans); + mutex_unlock(&nft_net->commit_mutex); + + return true; +} + +static void nft_trans_gc_work(struct work_struct *work) +{ + struct nft_trans_gc *trans, *next; + LIST_HEAD(trans_gc_list); + + spin_lock(&nf_tables_destroy_list_lock); + list_splice_init(&nf_tables_gc_list, &trans_gc_list); + spin_unlock(&nf_tables_destroy_list_lock); + + list_for_each_entry_safe(trans, next, &trans_gc_list, list) { + list_del(&trans->list); + if (!nft_trans_gc_work_done(trans)) { + nft_trans_gc_destroy(trans); + continue; + } + call_rcu(&trans->rcu, nft_trans_gc_trans_free); + } +} + +struct nft_trans_gc *nft_trans_gc_alloc(struct nft_set *set, + unsigned int gc_seq, gfp_t gfp) +{ + struct net *net = read_pnet(&set->net); + struct nft_trans_gc *trans; + + trans = kzalloc(sizeof(*trans), gfp); + if (!trans) + return NULL; + + refcount_inc(&set->refs); + trans->set = set; + trans->net = get_net(net); + trans->seq = gc_seq; + + return trans; +} + +void nft_trans_gc_elem_add(struct nft_trans_gc *trans, void *priv) +{ + trans->priv[trans->count++] = priv; +} + +static void nft_trans_gc_queue_work(struct nft_trans_gc *trans) +{ + spin_lock(&nf_tables_gc_list_lock); + list_add_tail(&trans->list, &nf_tables_gc_list); + spin_unlock(&nf_tables_gc_list_lock); + + schedule_work(&trans_gc_work); +} + +static int nft_trans_gc_space(struct nft_trans_gc *trans) +{ + return NFT_TRANS_GC_BATCHCOUNT - trans->count; +} + +struct nft_trans_gc *nft_trans_gc_queue_async(struct nft_trans_gc *gc, + unsigned int gc_seq, gfp_t gfp) +{ + if (nft_trans_gc_space(gc)) + return gc; + + nft_trans_gc_queue_work(gc); + + return nft_trans_gc_alloc(gc->set, gc_seq, gfp); +} + +void nft_trans_gc_queue_async_done(struct nft_trans_gc *trans) +{ + if (trans->count == 0) { + nft_trans_gc_destroy(trans); + return; + } + + nft_trans_gc_queue_work(trans); +} + +struct nft_trans_gc *nft_trans_gc_queue_sync(struct nft_trans_gc *gc, gfp_t gfp) +{ + if (WARN_ON_ONCE(!lockdep_commit_lock_is_held(gc->net))) + return NULL; + + if (nft_trans_gc_space(gc)) + return gc; + + call_rcu(&gc->rcu, nft_trans_gc_trans_free); + + return nft_trans_gc_alloc(gc->set, 0, gfp); +} + +void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans) +{ + WARN_ON_ONCE(!lockdep_commit_lock_is_held(trans->net)); + + if (trans->count == 0) { + nft_trans_gc_destroy(trans); + return; + } + + call_rcu(&trans->rcu, nft_trans_gc_trans_free); +} + +struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc, + unsigned int gc_seq) +{ + struct nft_set_elem_catchall *catchall; + const struct nft_set *set = gc->set; + struct nft_set_ext *ext; + + list_for_each_entry_rcu(catchall, &set->catchall_list, list) { + ext = nft_set_elem_ext(set, catchall->elem); + + if (!nft_set_elem_expired(ext)) + continue; + if (nft_set_elem_is_dead(ext)) + goto dead_elem; + + nft_set_elem_dead(ext); +dead_elem: + gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC); + if (!gc) + return NULL; + + nft_trans_gc_elem_add(gc, catchall->elem); + } + + return gc; +} + static void nf_tables_module_autoload_cleanup(struct net *net) { struct nftables_pernet *nft_net = nft_pernet(net); @@ -9576,11 +9742,11 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) { struct nftables_pernet *nft_net = nft_pernet(net); struct nft_trans *trans, *next; + unsigned int base_seq, gc_seq; LIST_HEAD(set_update_list); struct nft_trans_elem *te; struct nft_chain *chain; struct nft_table *table; - unsigned int base_seq; LIST_HEAD(adl); int err; @@ -9657,6 +9823,10 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) WRITE_ONCE(nft_net->base_seq, base_seq); + /* Bump gc counter, it becomes odd, this is the busy mark. */ + gc_seq = READ_ONCE(nft_net->gc_seq); + WRITE_ONCE(nft_net->gc_seq, ++gc_seq); + /* step 3. Start new generation, rules_gen_X now in use. */ net->nft.gencursor = nft_gencursor_next(net); @@ -9764,6 +9934,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) break; case NFT_MSG_DELSET: case NFT_MSG_DESTROYSET: + nft_trans_set(trans)->dead = 1; list_del_rcu(&nft_trans_set(trans)->list); nf_tables_set_notify(&trans->ctx, nft_trans_set(trans), trans->msg_type, GFP_KERNEL); @@ -9866,6 +10037,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) nft_commit_notify(net, NETLINK_CB(skb).portid); nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN); nf_tables_commit_audit_log(&adl, nft_net->base_seq); + + WRITE_ONCE(nft_net->gc_seq, ++gc_seq); nf_tables_commit_release(net); return 0; @@ -10915,6 +11088,7 @@ static int __net_init nf_tables_init_net(struct net *net) INIT_LIST_HEAD(&nft_net->notify_list); mutex_init(&nft_net->commit_mutex); nft_net->base_seq = 1; + nft_net->gc_seq = 0; return 0; } @@ -10943,10 +11117,16 @@ static void __net_exit nf_tables_exit_net(struct net *net) WARN_ON_ONCE(!list_empty(&nft_net->notify_list)); } +static void nf_tables_exit_batch(struct list_head *net_exit_list) +{ + flush_work(&trans_gc_work); +} + static struct pernet_operations nf_tables_net_ops = { .init = nf_tables_init_net, .pre_exit = nf_tables_pre_exit_net, .exit = nf_tables_exit_net, + .exit_batch = nf_tables_exit_batch, .id = &nf_tables_net_id, .size = sizeof(struct nftables_pernet), }; @@ -11018,6 +11198,7 @@ static void __exit nf_tables_module_exit(void) nft_chain_filter_fini(); nft_chain_route_fini(); unregister_pernet_subsys(&nf_tables_net_ops); + cancel_work_sync(&trans_gc_work); cancel_work_sync(&trans_destroy_work); rcu_barrier(); rhltable_destroy(&nft_objname_ht); diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index 0b73cb0e752f..cef5df846000 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c @@ -59,6 +59,8 @@ static inline int nft_rhash_cmp(struct rhashtable_compare_arg *arg, if (memcmp(nft_set_ext_key(&he->ext), x->key, x->set->klen)) return 1; + if (nft_set_elem_is_dead(&he->ext)) + return 1; if (nft_set_elem_expired(&he->ext)) return 1; if (!nft_set_elem_active(&he->ext, x->genmask)) @@ -188,7 +190,6 @@ static void nft_rhash_activate(const struct net *net, const struct nft_set *set, struct nft_rhash_elem *he = elem->priv; nft_set_elem_change_active(net, set, &he->ext); - nft_set_elem_clear_busy(&he->ext); } static bool nft_rhash_flush(const struct net *net, @@ -196,12 +197,9 @@ static bool nft_rhash_flush(const struct net *net, { struct nft_rhash_elem *he = priv; - if (!nft_set_elem_mark_busy(&he->ext) || - !nft_is_active(net, &he->ext)) { - nft_set_elem_change_active(net, set, &he->ext); - return true; - } - return false; + nft_set_elem_change_active(net, set, &he->ext); + + return true; } static void *nft_rhash_deactivate(const struct net *net, @@ -218,9 +216,8 @@ static void *nft_rhash_deactivate(const struct net *net, rcu_read_lock(); he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params); - if (he != NULL && - !nft_rhash_flush(net, set, he)) - he = NULL; + if (he) + nft_set_elem_change_active(net, set, &he->ext); rcu_read_unlock(); @@ -252,7 +249,9 @@ static bool nft_rhash_delete(const struct nft_set *set, if (he == NULL) return false; - return rhashtable_remove_fast(&priv->ht, &he->node, nft_rhash_params) == 0; + nft_set_elem_dead(&he->ext); + + return true; } static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set, @@ -278,8 +277,6 @@ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set, if (iter->count < iter->skip) goto cont; - if (nft_set_elem_expired(&he->ext)) - goto cont; if (!nft_set_elem_active(&he->ext, iter->genmask)) goto cont; @@ -314,25 +311,48 @@ static bool nft_rhash_expr_needs_gc_run(const struct nft_set *set, static void nft_rhash_gc(struct work_struct *work) { + struct nftables_pernet *nft_net; struct nft_set *set; struct nft_rhash_elem *he; struct nft_rhash *priv; - struct nft_set_gc_batch *gcb = NULL; struct rhashtable_iter hti; + struct nft_trans_gc *gc; + struct net *net; + u32 gc_seq; priv = container_of(work, struct nft_rhash, gc_work.work); set = nft_set_container_of(priv); + net = read_pnet(&set->net); + nft_net = nft_pernet(net); + gc_seq = READ_ONCE(nft_net->gc_seq); + + gc = nft_trans_gc_alloc(set, gc_seq, GFP_KERNEL); + if (!gc) + goto done; rhashtable_walk_enter(&priv->ht, &hti); rhashtable_walk_start(&hti); while ((he = rhashtable_walk_next(&hti))) { if (IS_ERR(he)) { - if (PTR_ERR(he) != -EAGAIN) - break; + if (PTR_ERR(he) != -EAGAIN) { + nft_trans_gc_destroy(gc); + gc = NULL; + goto try_later; + } continue; } + /* Ruleset has been updated, try later. */ + if (READ_ONCE(nft_net->gc_seq) != gc_seq) { + nft_trans_gc_destroy(gc); + gc = NULL; + goto try_later; + } + + if (nft_set_elem_is_dead(&he->ext)) + goto dead_elem; + if (nft_set_ext_exists(&he->ext, NFT_SET_EXT_EXPRESSIONS) && nft_rhash_expr_needs_gc_run(set, &he->ext)) goto needs_gc_run; @@ -340,26 +360,26 @@ static void nft_rhash_gc(struct work_struct *work) if (!nft_set_elem_expired(&he->ext)) continue; needs_gc_run: - if (nft_set_elem_mark_busy(&he->ext)) - continue; + nft_set_elem_dead(&he->ext); +dead_elem: + gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC); + if (!gc) + goto try_later; - gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC); - if (gcb == NULL) - break; - rhashtable_remove_fast(&priv->ht, &he->node, nft_rhash_params); - atomic_dec(&set->nelems); - nft_set_gc_batch_add(gcb, he); + nft_trans_gc_elem_add(gc, he); } + + gc = nft_trans_gc_catchall(gc, gc_seq); + +try_later: + /* catchall list iteration requires rcu read side lock. */ rhashtable_walk_stop(&hti); rhashtable_walk_exit(&hti); - he = nft_set_catchall_gc(set); - if (he) { - gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC); - if (gcb) - nft_set_gc_batch_add(gcb, he); - } - nft_set_gc_batch_complete(gcb); + if (gc) + nft_trans_gc_queue_async_done(gc); + +done: queue_delayed_work(system_power_efficient_wq, &priv->gc_work, nft_set_gc_interval(set)); } @@ -394,7 +414,7 @@ static int nft_rhash_init(const struct nft_set *set, return err; INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rhash_gc); - if (set->flags & NFT_SET_TIMEOUT) + if (set->flags & (NFT_SET_TIMEOUT | NFT_SET_EVAL)) nft_rhash_gc_init(set); return 0; @@ -422,7 +442,6 @@ static void nft_rhash_destroy(const struct nft_ctx *ctx, }; cancel_delayed_work_sync(&priv->gc_work); - rcu_barrier(); rhashtable_free_and_destroy(&priv->ht, nft_rhash_elem_destroy, (void *)&rhash_ctx); } diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index 49915a2a58eb..a5b8301afe4a 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -566,8 +566,7 @@ next_match: goto out; if (last) { - if (nft_set_elem_expired(&f->mt[b].e->ext) || - (genmask && + if ((genmask && !nft_set_elem_active(&f->mt[b].e->ext, genmask))) goto next_match; @@ -601,8 +600,17 @@ out: static void *nft_pipapo_get(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem, unsigned int flags) { - return pipapo_get(net, set, (const u8 *)elem->key.val.data, - nft_genmask_cur(net)); + struct nft_pipapo_elem *ret; + + ret = pipapo_get(net, set, (const u8 *)elem->key.val.data, + nft_genmask_cur(net)); + if (IS_ERR(ret)) + return ret; + + if (nft_set_elem_expired(&ret->ext)) + return ERR_PTR(-ENOENT); + + return ret; } /** @@ -1528,16 +1536,34 @@ static void pipapo_drop(struct nft_pipapo_match *m, } } +static void nft_pipapo_gc_deactivate(struct net *net, struct nft_set *set, + struct nft_pipapo_elem *e) + +{ + struct nft_set_elem elem = { + .priv = e, + }; + + nft_setelem_data_deactivate(net, set, &elem); +} + /** * pipapo_gc() - Drop expired entries from set, destroy start and end elements * @set: nftables API set representation * @m: Matching data */ -static void pipapo_gc(const struct nft_set *set, struct nft_pipapo_match *m) +static void pipapo_gc(const struct nft_set *_set, struct nft_pipapo_match *m) { + struct nft_set *set = (struct nft_set *) _set; struct nft_pipapo *priv = nft_set_priv(set); + struct net *net = read_pnet(&set->net); int rules_f0, first_rule = 0; struct nft_pipapo_elem *e; + struct nft_trans_gc *gc; + + gc = nft_trans_gc_alloc(set, 0, GFP_KERNEL); + if (!gc) + return; while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) { union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS]; @@ -1561,13 +1587,20 @@ static void pipapo_gc(const struct nft_set *set, struct nft_pipapo_match *m) f--; i--; e = f->mt[rulemap[i].to].e; - if (nft_set_elem_expired(&e->ext) && - !nft_set_elem_mark_busy(&e->ext)) { + + /* synchronous gc never fails, there is no need to set on + * NFT_SET_ELEM_DEAD_BIT. + */ + if (nft_set_elem_expired(&e->ext)) { priv->dirty = true; - pipapo_drop(m, rulemap); - rcu_barrier(); - nft_set_elem_destroy(set, e, true); + gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC); + if (!gc) + break; + + nft_pipapo_gc_deactivate(net, set, e); + pipapo_drop(m, rulemap); + nft_trans_gc_elem_add(gc, e); /* And check again current first rule, which is now the * first we haven't checked. @@ -1577,11 +1610,11 @@ static void pipapo_gc(const struct nft_set *set, struct nft_pipapo_match *m) } } - e = nft_set_catchall_gc(set); - if (e) - nft_set_elem_destroy(set, e, true); - - priv->last_gc = jiffies; + gc = nft_trans_gc_catchall(gc, 0); + if (gc) { + nft_trans_gc_queue_sync_done(gc); + priv->last_gc = jiffies; + } } /** @@ -1706,7 +1739,6 @@ static void nft_pipapo_activate(const struct net *net, return; nft_set_elem_change_active(net, set, &e->ext); - nft_set_elem_clear_busy(&e->ext); } /** @@ -2005,8 +2037,6 @@ static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set, goto cont; e = f->mt[r].e; - if (nft_set_elem_expired(&e->ext)) - goto cont; elem.priv = e; diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 8d73fffd2d09..f9d4c8fcbbf8 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -46,6 +46,12 @@ static int nft_rbtree_cmp(const struct nft_set *set, set->klen); } +static bool nft_rbtree_elem_expired(const struct nft_rbtree_elem *rbe) +{ + return nft_set_elem_expired(&rbe->ext) || + nft_set_elem_is_dead(&rbe->ext); +} + static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set, const u32 *key, const struct nft_set_ext **ext, unsigned int seq) @@ -80,7 +86,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set continue; } - if (nft_set_elem_expired(&rbe->ext)) + if (nft_rbtree_elem_expired(rbe)) return false; if (nft_rbtree_interval_end(rbe)) { @@ -98,7 +104,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set if (set->flags & NFT_SET_INTERVAL && interval != NULL && nft_set_elem_active(&interval->ext, genmask) && - !nft_set_elem_expired(&interval->ext) && + !nft_rbtree_elem_expired(interval) && nft_rbtree_interval_start(interval)) { *ext = &interval->ext; return true; @@ -215,6 +221,18 @@ static void *nft_rbtree_get(const struct net *net, const struct nft_set *set, return rbe; } +static void nft_rbtree_gc_remove(struct net *net, struct nft_set *set, + struct nft_rbtree *priv, + struct nft_rbtree_elem *rbe) +{ + struct nft_set_elem elem = { + .priv = rbe, + }; + + nft_setelem_data_deactivate(net, set, &elem); + rb_erase(&rbe->node, &priv->root); +} + static int nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv, struct nft_rbtree_elem *rbe, @@ -222,11 +240,12 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set, { struct nft_set *set = (struct nft_set *)__set; struct rb_node *prev = rb_prev(&rbe->node); + struct net *net = read_pnet(&set->net); struct nft_rbtree_elem *rbe_prev; - struct nft_set_gc_batch *gcb; + struct nft_trans_gc *gc; - gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC); - if (!gcb) + gc = nft_trans_gc_alloc(set, 0, GFP_ATOMIC); + if (!gc) return -ENOMEM; /* search for end interval coming before this element. @@ -244,17 +263,28 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set, if (prev) { rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node); + nft_rbtree_gc_remove(net, set, priv, rbe_prev); - rb_erase(&rbe_prev->node, &priv->root); - atomic_dec(&set->nelems); - nft_set_gc_batch_add(gcb, rbe_prev); + /* There is always room in this trans gc for this element, + * memory allocation never actually happens, hence, the warning + * splat in such case. No need to set NFT_SET_ELEM_DEAD_BIT, + * this is synchronous gc which never fails. + */ + gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC); + if (WARN_ON_ONCE(!gc)) + return -ENOMEM; + + nft_trans_gc_elem_add(gc, rbe_prev); } - rb_erase(&rbe->node, &priv->root); - atomic_dec(&set->nelems); + nft_rbtree_gc_remove(net, set, priv, rbe); + gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC); + if (WARN_ON_ONCE(!gc)) + return -ENOMEM; + + nft_trans_gc_elem_add(gc, rbe); - nft_set_gc_batch_add(gcb, rbe); - nft_set_gc_batch_complete(gcb); + nft_trans_gc_queue_sync_done(gc); return 0; } @@ -482,7 +512,6 @@ static void nft_rbtree_activate(const struct net *net, struct nft_rbtree_elem *rbe = elem->priv; nft_set_elem_change_active(net, set, &rbe->ext); - nft_set_elem_clear_busy(&rbe->ext); } static bool nft_rbtree_flush(const struct net *net, @@ -490,12 +519,9 @@ static bool nft_rbtree_flush(const struct net *net, { struct nft_rbtree_elem *rbe = priv; - if (!nft_set_elem_mark_busy(&rbe->ext) || - !nft_is_active(net, &rbe->ext)) { - nft_set_elem_change_active(net, set, &rbe->ext); - return true; - } - return false; + nft_set_elem_change_active(net, set, &rbe->ext); + + return true; } static void *nft_rbtree_deactivate(const struct net *net, @@ -552,8 +578,6 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx, if (iter->count < iter->skip) goto cont; - if (nft_set_elem_expired(&rbe->ext)) - goto cont; if (!nft_set_elem_active(&rbe->ext, iter->genmask)) goto cont; @@ -572,26 +596,40 @@ cont: static void nft_rbtree_gc(struct work_struct *work) { - struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL; - struct nft_set_gc_batch *gcb = NULL; + struct nft_rbtree_elem *rbe, *rbe_end = NULL; + struct nftables_pernet *nft_net; struct nft_rbtree *priv; + struct nft_trans_gc *gc; struct rb_node *node; struct nft_set *set; + unsigned int gc_seq; struct net *net; - u8 genmask; priv = container_of(work, struct nft_rbtree, gc_work.work); set = nft_set_container_of(priv); net = read_pnet(&set->net); - genmask = nft_genmask_cur(net); + nft_net = nft_pernet(net); + gc_seq = READ_ONCE(nft_net->gc_seq); + + gc = nft_trans_gc_alloc(set, gc_seq, GFP_KERNEL); + if (!gc) + goto done; write_lock_bh(&priv->lock); write_seqcount_begin(&priv->count); for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) { + + /* Ruleset has been updated, try later. */ + if (READ_ONCE(nft_net->gc_seq) != gc_seq) { + nft_trans_gc_destroy(gc); + gc = NULL; + goto try_later; + } + rbe = rb_entry(node, struct nft_rbtree_elem, node); - if (!nft_set_elem_active(&rbe->ext, genmask)) - continue; + if (nft_set_elem_is_dead(&rbe->ext)) + goto dead_elem; /* elements are reversed in the rbtree for historical reasons, * from highest to lowest value, that is why end element is @@ -604,46 +642,36 @@ static void nft_rbtree_gc(struct work_struct *work) if (!nft_set_elem_expired(&rbe->ext)) continue; - if (nft_set_elem_mark_busy(&rbe->ext)) { - rbe_end = NULL; + nft_set_elem_dead(&rbe->ext); + + if (!rbe_end) continue; - } - if (rbe_prev) { - rb_erase(&rbe_prev->node, &priv->root); - rbe_prev = NULL; - } - gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC); - if (!gcb) - break; + nft_set_elem_dead(&rbe_end->ext); - atomic_dec(&set->nelems); - nft_set_gc_batch_add(gcb, rbe); - rbe_prev = rbe; + gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC); + if (!gc) + goto try_later; - if (rbe_end) { - atomic_dec(&set->nelems); - nft_set_gc_batch_add(gcb, rbe_end); - rb_erase(&rbe_end->node, &priv->root); - rbe_end = NULL; - } - node = rb_next(node); - if (!node) - break; + nft_trans_gc_elem_add(gc, rbe_end); + rbe_end = NULL; +dead_elem: + gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC); + if (!gc) + goto try_later; + + nft_trans_gc_elem_add(gc, rbe); } - if (rbe_prev) - rb_erase(&rbe_prev->node, &priv->root); + + gc = nft_trans_gc_catchall(gc, gc_seq); + +try_later: write_seqcount_end(&priv->count); write_unlock_bh(&priv->lock); - rbe = nft_set_catchall_gc(set); - if (rbe) { - gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC); - if (gcb) - nft_set_gc_batch_add(gcb, rbe); - } - nft_set_gc_batch_complete(gcb); - + if (gc) + nft_trans_gc_queue_async_done(gc); +done: queue_delayed_work(system_power_efficient_wq, &priv->gc_work, nft_set_gc_interval(set)); } diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c index 84def74698b7..9ed85be79452 100644 --- a/net/netfilter/nft_socket.c +++ b/net/netfilter/nft_socket.c @@ -107,7 +107,7 @@ static void nft_socket_eval(const struct nft_expr *expr, break; case NFT_SOCKET_MARK: if (sk_fullsock(sk)) { - *dest = sk->sk_mark; + *dest = READ_ONCE(sk->sk_mark); } else { regs->verdict.code = NFT_BREAK; return; diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index 7013f55f05d1..76e01f292aaf 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c @@ -77,7 +77,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par, if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard && transparent && sk_fullsock(sk)) - pskb->mark = sk->sk_mark; + pskb->mark = READ_ONCE(sk->sk_mark); if (sk != skb->sk) sock_gen_put(sk); @@ -138,7 +138,7 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par) if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard && transparent && sk_fullsock(sk)) - pskb->mark = sk->sk_mark; + pskb->mark = READ_ONCE(sk->sk_mark); if (sk != skb->sk) sock_gen_put(sk); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 8e3ddec4c3d5..a2935bd18ed9 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -401,18 +401,20 @@ static void __packet_set_status(struct packet_sock *po, void *frame, int status) { union tpacket_uhdr h; + /* WRITE_ONCE() are paired with READ_ONCE() in __packet_get_status */ + h.raw = frame; switch (po->tp_version) { case TPACKET_V1: - h.h1->tp_status = status; + WRITE_ONCE(h.h1->tp_status, status); flush_dcache_page(pgv_to_page(&h.h1->tp_status)); break; case TPACKET_V2: - h.h2->tp_status = status; + WRITE_ONCE(h.h2->tp_status, status); flush_dcache_page(pgv_to_page(&h.h2->tp_status)); break; case TPACKET_V3: - h.h3->tp_status = status; + WRITE_ONCE(h.h3->tp_status, status); flush_dcache_page(pgv_to_page(&h.h3->tp_status)); break; default: @@ -429,17 +431,19 @@ static int __packet_get_status(const struct packet_sock *po, void *frame) smp_rmb(); + /* READ_ONCE() are paired with WRITE_ONCE() in __packet_set_status */ + h.raw = frame; switch (po->tp_version) { case TPACKET_V1: flush_dcache_page(pgv_to_page(&h.h1->tp_status)); - return h.h1->tp_status; + return READ_ONCE(h.h1->tp_status); case TPACKET_V2: flush_dcache_page(pgv_to_page(&h.h2->tp_status)); - return h.h2->tp_status; + return READ_ONCE(h.h2->tp_status); case TPACKET_V3: flush_dcache_page(pgv_to_page(&h.h3->tp_status)); - return h.h3->tp_status; + return READ_ONCE(h.h3->tp_status); default: WARN(1, "TPACKET version not supported.\n"); BUG(); @@ -2050,8 +2054,8 @@ retry: skb->protocol = proto; skb->dev = dev; - skb->priority = sk->sk_priority; - skb->mark = sk->sk_mark; + skb->priority = READ_ONCE(sk->sk_priority); + skb->mark = READ_ONCE(sk->sk_mark); skb->tstamp = sockc.transmit_time; skb_setup_tx_timestamp(skb, sockc.tsflags); @@ -2585,8 +2589,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, skb->protocol = proto; skb->dev = dev; - skb->priority = po->sk.sk_priority; - skb->mark = po->sk.sk_mark; + skb->priority = READ_ONCE(po->sk.sk_priority); + skb->mark = READ_ONCE(po->sk.sk_mark); skb->tstamp = sockc->transmit_time; skb_setup_tx_timestamp(skb, sockc->tsflags); skb_zcopy_set_nouarg(skb, ph.raw); @@ -2988,7 +2992,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) goto out_unlock; sockcm_init(&sockc, sk); - sockc.mark = sk->sk_mark; + sockc.mark = READ_ONCE(sk->sk_mark); if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); if (unlikely(err)) @@ -3061,7 +3065,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) skb->protocol = proto; skb->dev = dev; - skb->priority = sk->sk_priority; + skb->priority = READ_ONCE(sk->sk_priority); skb->mark = sockc.mark; skb->tstamp = sockc.transmit_time; diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 8da9d039d964..9f0711da9c95 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -776,7 +776,8 @@ mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = { [TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL] = { .type = NLA_U32 }, }; -static const struct nla_policy cfm_opt_policy[TCA_FLOWER_KEY_CFM_OPT_MAX] = { +static const struct nla_policy +cfm_opt_policy[TCA_FLOWER_KEY_CFM_OPT_MAX + 1] = { [TCA_FLOWER_KEY_CFM_MD_LEVEL] = NLA_POLICY_MAX(NLA_U8, FLOW_DIS_CFM_MDL_MAX), [TCA_FLOWER_KEY_CFM_OPCODE] = { .type = NLA_U8 }, @@ -1709,7 +1710,7 @@ static int fl_set_key_cfm(struct nlattr **tb, struct fl_flow_key *mask, struct netlink_ext_ack *extack) { - struct nlattr *nla_cfm_opt[TCA_FLOWER_KEY_CFM_OPT_MAX]; + struct nlattr *nla_cfm_opt[TCA_FLOWER_KEY_CFM_OPT_MAX + 1]; int err; if (!tb[TCA_FLOWER_KEY_CFM]) diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index 8641f8059317..c49d6af0e048 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c @@ -267,7 +267,6 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, return -ENOBUFS; fnew->id = f->id; - fnew->res = f->res; fnew->ifindex = f->ifindex; fnew->tp = f->tp; diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index d0c53724d3e8..1e20bbd687f1 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c @@ -513,7 +513,6 @@ static int route4_change(struct net *net, struct sk_buff *in_skb, if (fold) { f->id = fold->id; f->iif = fold->iif; - f->res = fold->res; f->handle = fold->handle; f->tp = fold->tp; diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 5abf31e432ca..da4c179a4d41 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -826,7 +826,6 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp, new->ifindex = n->ifindex; new->fshift = n->fshift; - new->res = n->res; new->flags = n->flags; RCU_INIT_POINTER(new->ht_down, ht); @@ -1024,18 +1023,62 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, return -EINVAL; } + /* At this point, we need to derive the new handle that will be used to + * uniquely map the identity of this table match entry. The + * identity of the entry that we need to construct is 32 bits made of: + * htid(12b):bucketid(8b):node/entryid(12b) + * + * At this point _we have the table(ht)_ in which we will insert this + * entry. We carry the table's id in variable "htid". + * Note that earlier code picked the ht selection either by a) the user + * providing the htid specified via TCA_U32_HASH attribute or b) when + * no such attribute is passed then the root ht, is default to at ID + * 0x[800][00][000]. Rule: the root table has a single bucket with ID 0. + * If OTOH the user passed us the htid, they may also pass a bucketid of + * choice. 0 is fine. For example a user htid is 0x[600][01][000] it is + * indicating hash bucketid of 1. Rule: the entry/node ID _cannot_ be + * passed via the htid, so even if it was non-zero it will be ignored. + * + * We may also have a handle, if the user passed one. The handle also + * carries the same addressing of htid(12b):bucketid(8b):node/entryid(12b). + * Rule: the bucketid on the handle is ignored even if one was passed; + * rather the value on "htid" is always assumed to be the bucketid. + */ if (handle) { + /* Rule: The htid from handle and tableid from htid must match */ if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) { NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch"); return -EINVAL; } - handle = htid | TC_U32_NODE(handle); - err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, handle, - GFP_KERNEL); - if (err) - return err; - } else + /* Ok, so far we have a valid htid(12b):bucketid(8b) but we + * need to finalize the table entry identification with the last + * part - the node/entryid(12b)). Rule: Nodeid _cannot be 0_ for + * entries. Rule: nodeid of 0 is reserved only for tables(see + * earlier code which processes TC_U32_DIVISOR attribute). + * Rule: The nodeid can only be derived from the handle (and not + * htid). + * Rule: if the handle specified zero for the node id example + * 0x60000000, then pick a new nodeid from the pool of IDs + * this hash table has been allocating from. + * If OTOH it is specified (i.e for example the user passed a + * handle such as 0x60000123), then we use it generate our final + * handle which is used to uniquely identify the match entry. + */ + if (!TC_U32_NODE(handle)) { + handle = gen_new_kid(ht, htid); + } else { + handle = htid | TC_U32_NODE(handle); + err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, + handle, GFP_KERNEL); + if (err) + return err; + } + } else { + /* The user did not give us a handle; lets just generate one + * from the table's pool of nodeids. + */ handle = gen_new_kid(ht, htid); + } if (tb[TCA_U32_SEL] == NULL) { NL_SET_ERR_MSG_MOD(extack, "Selector not specified"); diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index af85a73c4c54..6fdba069f6bf 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -568,7 +568,7 @@ META_COLLECTOR(int_sk_rcvtimeo) *err = -1; return; } - dst->value = sk->sk_rcvtimeo / HZ; + dst->value = READ_ONCE(sk->sk_rcvtimeo) / HZ; } META_COLLECTOR(int_sk_sndtimeo) @@ -579,7 +579,7 @@ META_COLLECTOR(int_sk_sndtimeo) *err = -1; return; } - dst->value = sk->sk_sndtimeo / HZ; + dst->value = READ_ONCE(sk->sk_sndtimeo) / HZ; } META_COLLECTOR(int_sk_sendmsg_off) diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 717ae51d94a0..8c9cfff7fd05 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -1015,6 +1015,11 @@ static const struct nla_policy taprio_tc_policy[TCA_TAPRIO_TC_ENTRY_MAX + 1] = { TC_FP_PREEMPTIBLE), }; +static struct netlink_range_validation_signed taprio_cycle_time_range = { + .min = 0, + .max = INT_MAX, +}; + static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = { [TCA_TAPRIO_ATTR_PRIOMAP] = { .len = sizeof(struct tc_mqprio_qopt) @@ -1023,7 +1028,8 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = { [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 }, [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED }, [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 }, - [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 }, + [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = + NLA_POLICY_FULL_RANGE_SIGNED(NLA_S64, &taprio_cycle_time_range), [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 }, [TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 }, [TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 }, @@ -1159,6 +1165,11 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb, return -EINVAL; } + if (cycle < 0 || cycle > INT_MAX) { + NL_SET_ERR_MSG(extack, "'cycle_time' is too big"); + return -EINVAL; + } + new->cycle_time = cycle; } @@ -1347,7 +1358,7 @@ static void setup_txtime(struct taprio_sched *q, struct sched_gate_list *sched, ktime_t base) { struct sched_entry *entry; - u32 interval = 0; + u64 interval = 0; list_for_each_entry(entry, &sched->entries, list) { entry->next_txtime = ktime_add_ns(base, interval); diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index a7f887d91d89..f5834af5fad5 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -378,8 +378,8 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock, sk->sk_state = SMC_INIT; sk->sk_destruct = smc_destruct; sk->sk_protocol = protocol; - WRITE_ONCE(sk->sk_sndbuf, READ_ONCE(net->smc.sysctl_wmem)); - WRITE_ONCE(sk->sk_rcvbuf, READ_ONCE(net->smc.sysctl_rmem)); + WRITE_ONCE(sk->sk_sndbuf, 2 * READ_ONCE(net->smc.sysctl_wmem)); + WRITE_ONCE(sk->sk_rcvbuf, 2 * READ_ONCE(net->smc.sysctl_rmem)); smc = smc_sk(sk); INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work); INIT_WORK(&smc->connect_work, smc_connect_work); @@ -436,24 +436,9 @@ out: return rc; } -static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk, - unsigned long mask) -{ - /* options we don't get control via setsockopt for */ - nsk->sk_type = osk->sk_type; - nsk->sk_sndbuf = osk->sk_sndbuf; - nsk->sk_rcvbuf = osk->sk_rcvbuf; - nsk->sk_sndtimeo = osk->sk_sndtimeo; - nsk->sk_rcvtimeo = osk->sk_rcvtimeo; - nsk->sk_mark = osk->sk_mark; - nsk->sk_priority = osk->sk_priority; - nsk->sk_rcvlowat = osk->sk_rcvlowat; - nsk->sk_bound_dev_if = osk->sk_bound_dev_if; - nsk->sk_err = osk->sk_err; - - nsk->sk_flags &= ~mask; - nsk->sk_flags |= osk->sk_flags & mask; -} +/* copy only relevant settings and flags of SOL_SOCKET level from smc to + * clc socket (since smc is not called for these options from net/core) + */ #define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \ (1UL << SOCK_KEEPOPEN) | \ @@ -470,9 +455,55 @@ static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk, (1UL << SOCK_NOFCS) | \ (1UL << SOCK_FILTER_LOCKED) | \ (1UL << SOCK_TSTAMP_NEW)) -/* copy only relevant settings and flags of SOL_SOCKET level from smc to - * clc socket (since smc is not called for these options from net/core) - */ + +/* if set, use value set by setsockopt() - else use IPv4 or SMC sysctl value */ +static void smc_adjust_sock_bufsizes(struct sock *nsk, struct sock *osk, + unsigned long mask) +{ + struct net *nnet = sock_net(nsk); + + nsk->sk_userlocks = osk->sk_userlocks; + if (osk->sk_userlocks & SOCK_SNDBUF_LOCK) { + nsk->sk_sndbuf = osk->sk_sndbuf; + } else { + if (mask == SK_FLAGS_SMC_TO_CLC) + WRITE_ONCE(nsk->sk_sndbuf, + READ_ONCE(nnet->ipv4.sysctl_tcp_wmem[1])); + else + WRITE_ONCE(nsk->sk_sndbuf, + 2 * READ_ONCE(nnet->smc.sysctl_wmem)); + } + if (osk->sk_userlocks & SOCK_RCVBUF_LOCK) { + nsk->sk_rcvbuf = osk->sk_rcvbuf; + } else { + if (mask == SK_FLAGS_SMC_TO_CLC) + WRITE_ONCE(nsk->sk_rcvbuf, + READ_ONCE(nnet->ipv4.sysctl_tcp_rmem[1])); + else + WRITE_ONCE(nsk->sk_rcvbuf, + 2 * READ_ONCE(nnet->smc.sysctl_rmem)); + } +} + +static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk, + unsigned long mask) +{ + /* options we don't get control via setsockopt for */ + nsk->sk_type = osk->sk_type; + nsk->sk_sndtimeo = osk->sk_sndtimeo; + nsk->sk_rcvtimeo = osk->sk_rcvtimeo; + nsk->sk_mark = READ_ONCE(osk->sk_mark); + nsk->sk_priority = osk->sk_priority; + nsk->sk_rcvlowat = osk->sk_rcvlowat; + nsk->sk_bound_dev_if = osk->sk_bound_dev_if; + nsk->sk_err = osk->sk_err; + + nsk->sk_flags &= ~mask; + nsk->sk_flags |= osk->sk_flags & mask; + + smc_adjust_sock_bufsizes(nsk, osk, mask); +} + static void smc_copy_sock_settings_to_clc(struct smc_sock *smc) { smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC); @@ -2479,8 +2510,6 @@ static void smc_tcp_listen_work(struct work_struct *work) sock_hold(lsk); /* sock_put in smc_listen_work */ INIT_WORK(&new_smc->smc_listen_work, smc_listen_work); smc_copy_sock_settings_to_smc(new_smc); - new_smc->sk.sk_sndbuf = lsmc->sk.sk_sndbuf; - new_smc->sk.sk_rcvbuf = lsmc->sk.sk_rcvbuf; sock_hold(&new_smc->sk); /* sock_put in passive closing */ if (!queue_work(smc_hs_wq, &new_smc->smc_listen_work)) sock_put(&new_smc->sk); diff --git a/net/smc/smc.h b/net/smc/smc.h index 2eeea4cdc718..1f2b912c43d1 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -161,7 +161,7 @@ struct smc_connection { struct smc_buf_desc *sndbuf_desc; /* send buffer descriptor */ struct smc_buf_desc *rmb_desc; /* RMBE descriptor */ - int rmbe_size_short;/* compressed notation */ + int rmbe_size_comp; /* compressed notation */ int rmbe_update_limit; /* lower limit for consumer * cursor update diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index b9b8b07aa702..c90d9e5dda54 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c @@ -1007,7 +1007,7 @@ static int smc_clc_send_confirm_accept(struct smc_sock *smc, clc->d0.gid = conn->lgr->smcd->ops->get_local_gid(conn->lgr->smcd); clc->d0.token = conn->rmb_desc->token; - clc->d0.dmbe_size = conn->rmbe_size_short; + clc->d0.dmbe_size = conn->rmbe_size_comp; clc->d0.dmbe_idx = 0; memcpy(&clc->d0.linkid, conn->lgr->id, SMC_LGR_ID_SIZE); if (version == SMC_V1) { @@ -1050,7 +1050,7 @@ static int smc_clc_send_confirm_accept(struct smc_sock *smc, clc->r0.qp_mtu = min(link->path_mtu, link->peer_mtu); break; } - clc->r0.rmbe_size = conn->rmbe_size_short; + clc->r0.rmbe_size = conn->rmbe_size_comp; clc->r0.rmb_dma_addr = conn->rmb_desc->is_vm ? cpu_to_be64((uintptr_t)conn->rmb_desc->cpu_addr) : cpu_to_be64((u64)sg_dma_address diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 3f465faf2b68..6b78075404d7 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -2309,31 +2309,30 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb) struct smc_connection *conn = &smc->conn; struct smc_link_group *lgr = conn->lgr; struct list_head *buf_list; - int bufsize, bufsize_short; + int bufsize, bufsize_comp; struct rw_semaphore *lock; /* lock buffer list */ bool is_dgraded = false; - int sk_buf_size; if (is_rmb) /* use socket recv buffer size (w/o overhead) as start value */ - sk_buf_size = smc->sk.sk_rcvbuf; + bufsize = smc->sk.sk_rcvbuf / 2; else /* use socket send buffer size (w/o overhead) as start value */ - sk_buf_size = smc->sk.sk_sndbuf; + bufsize = smc->sk.sk_sndbuf / 2; - for (bufsize_short = smc_compress_bufsize(sk_buf_size, is_smcd, is_rmb); - bufsize_short >= 0; bufsize_short--) { + for (bufsize_comp = smc_compress_bufsize(bufsize, is_smcd, is_rmb); + bufsize_comp >= 0; bufsize_comp--) { if (is_rmb) { lock = &lgr->rmbs_lock; - buf_list = &lgr->rmbs[bufsize_short]; + buf_list = &lgr->rmbs[bufsize_comp]; } else { lock = &lgr->sndbufs_lock; - buf_list = &lgr->sndbufs[bufsize_short]; + buf_list = &lgr->sndbufs[bufsize_comp]; } - bufsize = smc_uncompress_bufsize(bufsize_short); + bufsize = smc_uncompress_bufsize(bufsize_comp); /* check for reusable slot in the link group */ - buf_desc = smc_buf_get_slot(bufsize_short, lock, buf_list); + buf_desc = smc_buf_get_slot(bufsize_comp, lock, buf_list); if (buf_desc) { buf_desc->is_dma_need_sync = 0; SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize); @@ -2377,8 +2376,8 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb) if (is_rmb) { conn->rmb_desc = buf_desc; - conn->rmbe_size_short = bufsize_short; - smc->sk.sk_rcvbuf = bufsize; + conn->rmbe_size_comp = bufsize_comp; + smc->sk.sk_rcvbuf = bufsize * 2; atomic_set(&conn->bytes_to_rcv, 0); conn->rmbe_update_limit = smc_rmb_wnd_update_limit(buf_desc->len); @@ -2386,7 +2385,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb) smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */ } else { conn->sndbuf_desc = buf_desc; - smc->sk.sk_sndbuf = bufsize; + smc->sk.sk_sndbuf = bufsize * 2; atomic_set(&conn->sndbuf_space, bufsize); } return 0; diff --git a/net/smc/smc_sysctl.c b/net/smc/smc_sysctl.c index b6f79fabb9d3..0b2a957ca5f5 100644 --- a/net/smc/smc_sysctl.c +++ b/net/smc/smc_sysctl.c @@ -21,6 +21,10 @@ static int min_sndbuf = SMC_BUF_MIN_SIZE; static int min_rcvbuf = SMC_BUF_MIN_SIZE; +static int max_sndbuf = INT_MAX / 2; +static int max_rcvbuf = INT_MAX / 2; +static const int net_smc_wmem_init = (64 * 1024); +static const int net_smc_rmem_init = (64 * 1024); static struct ctl_table smc_table[] = { { @@ -53,6 +57,7 @@ static struct ctl_table smc_table[] = { .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_sndbuf, + .extra2 = &max_sndbuf, }, { .procname = "rmem", @@ -61,6 +66,7 @@ static struct ctl_table smc_table[] = { .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_rcvbuf, + .extra2 = &max_rcvbuf, }, { } }; @@ -88,8 +94,8 @@ int __net_init smc_sysctl_net_init(struct net *net) net->smc.sysctl_autocorking_size = SMC_AUTOCORKING_DEFAULT_SIZE; net->smc.sysctl_smcr_buf_type = SMCR_PHYS_CONT_BUFS; net->smc.sysctl_smcr_testlink_time = SMC_LLC_TESTLINK_DEFAULT_TIME; - WRITE_ONCE(net->smc.sysctl_wmem, READ_ONCE(net->ipv4.sysctl_tcp_wmem[1])); - WRITE_ONCE(net->smc.sysctl_rmem, READ_ONCE(net->ipv4.sysctl_tcp_rmem[1])); + WRITE_ONCE(net->smc.sysctl_wmem, net_smc_wmem_init); + WRITE_ONCE(net->smc.sysctl_rmem, net_smc_rmem_init); return 0; diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 2021fe557e50..529101eb20bd 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -52,6 +52,8 @@ static LIST_HEAD(tls_device_list); static LIST_HEAD(tls_device_down_list); static DEFINE_SPINLOCK(tls_device_lock); +static struct page *dummy_page; + static void tls_device_free_ctx(struct tls_context *ctx) { if (ctx->tx_conf == TLS_HW) { @@ -312,36 +314,33 @@ static int tls_push_record(struct sock *sk, return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags); } -static int tls_device_record_close(struct sock *sk, - struct tls_context *ctx, - struct tls_record_info *record, - struct page_frag *pfrag, - unsigned char record_type) +static void tls_device_record_close(struct sock *sk, + struct tls_context *ctx, + struct tls_record_info *record, + struct page_frag *pfrag, + unsigned char record_type) { struct tls_prot_info *prot = &ctx->prot_info; - int ret; + struct page_frag dummy_tag_frag; /* append tag * device will fill in the tag, we just need to append a placeholder * use socket memory to improve coalescing (re-using a single buffer * increases frag count) - * if we can't allocate memory now, steal some back from data + * if we can't allocate memory now use the dummy page */ - if (likely(skb_page_frag_refill(prot->tag_size, pfrag, - sk->sk_allocation))) { - ret = 0; - tls_append_frag(record, pfrag, prot->tag_size); - } else { - ret = prot->tag_size; - if (record->len <= prot->overhead_size) - return -ENOMEM; + if (unlikely(pfrag->size - pfrag->offset < prot->tag_size) && + !skb_page_frag_refill(prot->tag_size, pfrag, sk->sk_allocation)) { + dummy_tag_frag.page = dummy_page; + dummy_tag_frag.offset = 0; + pfrag = &dummy_tag_frag; } + tls_append_frag(record, pfrag, prot->tag_size); /* fill prepend */ tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]), record->len - prot->overhead_size, record_type); - return ret; } static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx, @@ -541,18 +540,8 @@ last_record: if (done || record->len >= max_open_record_len || (record->num_frags >= MAX_SKB_FRAGS - 1)) { - rc = tls_device_record_close(sk, tls_ctx, record, - pfrag, record_type); - if (rc) { - if (rc > 0) { - size += rc; - } else { - size = orig_size; - destroy_record(record); - ctx->open_record = NULL; - break; - } - } + tls_device_record_close(sk, tls_ctx, record, + pfrag, record_type); rc = tls_push_record(sk, tls_ctx, @@ -1450,14 +1439,26 @@ int __init tls_device_init(void) { int err; - destruct_wq = alloc_workqueue("ktls_device_destruct", 0, 0); - if (!destruct_wq) + dummy_page = alloc_page(GFP_KERNEL); + if (!dummy_page) return -ENOMEM; + destruct_wq = alloc_workqueue("ktls_device_destruct", 0, 0); + if (!destruct_wq) { + err = -ENOMEM; + goto err_free_dummy; + } + err = register_netdevice_notifier(&tls_dev_notifier); if (err) - destroy_workqueue(destruct_wq); + goto err_destroy_wq; + return 0; + +err_destroy_wq: + destroy_workqueue(destruct_wq); +err_free_dummy: + put_page(dummy_page); return err; } @@ -1466,4 +1467,5 @@ void __exit tls_device_cleanup(void) unregister_netdevice_notifier(&tls_dev_notifier); destroy_workqueue(destruct_wq); clean_acked_data_flush(); + put_page(dummy_page); } diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index b6896126bb92..4a8ee2f6badb 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -139,9 +139,6 @@ int tls_push_sg(struct sock *sk, ctx->splicing_pages = true; while (1) { - if (sg_is_last(sg)) - msg.msg_flags = flags; - /* is sending application-limited? */ tcp_rate_check_app_limited(sk); p = sg_page(sg); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 78585217f61a..86930a8ed012 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -790,7 +790,7 @@ static int unix_set_peek_off(struct sock *sk, int val) if (mutex_lock_interruptible(&u->iolock)) return -EINTR; - sk->sk_peek_off = val; + WRITE_ONCE(sk->sk_peek_off, val); mutex_unlock(&u->iolock); return 0; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 0da2e6a2a7ea..8bcf8e293308 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -5430,8 +5430,11 @@ nl80211_parse_mbssid_elems(struct wiphy *wiphy, struct nlattr *attrs) if (!wiphy->mbssid_max_interfaces) return ERR_PTR(-EINVAL); - nla_for_each_nested(nl_elems, attrs, rem_elems) + nla_for_each_nested(nl_elems, attrs, rem_elems) { + if (num_elems >= 255) + return ERR_PTR(-EINVAL); num_elems++; + } elems = kzalloc(struct_size(elems, elem, num_elems), GFP_KERNEL); if (!elems) diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 8bf00caf5d29..0cf1ce7b6934 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -657,7 +657,7 @@ static int cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies, ret = cfg80211_calc_short_ssid(ies, &ssid_elem, &s_ssid_tmp); if (ret) - return ret; + return 0; for_each_element_id(elem, WLAN_EID_REDUCED_NEIGHBOR_REPORT, ies->data, ies->len) { diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 31dca4ecb2c5..10ea85c03147 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -505,7 +505,7 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs, skb->dev = dev; skb->priority = xs->sk.sk_priority; - skb->mark = xs->sk.sk_mark; + skb->mark = READ_ONCE(xs->sk.sk_mark); skb_shinfo(skb)->destructor_arg = (void *)(long)desc->addr; skb->destructor = xsk_destruct_skb; @@ -994,6 +994,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) err = xp_alloc_tx_descs(xs->pool, xs); if (err) { xp_put_pool(xs->pool); + xs->pool = NULL; sockfd_put(sock); goto out_unlock; } diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index e7617c9959c3..d6b405782b63 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2250,7 +2250,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir, match = xfrm_selector_match(&pol->selector, fl, family); if (match) { - if ((sk->sk_mark & pol->mark.m) != pol->mark.v || + if ((READ_ONCE(sk->sk_mark) & pol->mark.m) != pol->mark.v || pol->if_id != if_id) { pol = NULL; goto out; diff --git a/rust/Makefile b/rust/Makefile index 7c9d9f11aec5..4124bfa01798 100644 --- a/rust/Makefile +++ b/rust/Makefile @@ -257,7 +257,7 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \ -fno-partial-inlining -fplugin-arg-arm_ssp_per_task_plugin-% \ -fno-reorder-blocks -fno-allow-store-data-races -fasan-shadow-offset=% \ -fzero-call-used-regs=% -fno-stack-clash-protection \ - -fno-inline-functions-called-once \ + -fno-inline-functions-called-once -fsanitize=bounds-strict \ --param=% --param asan-% # Derived from `scripts/Makefile.clang`. diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h index 3e601ce2548d..058954961bfc 100644 --- a/rust/bindings/bindings_helper.h +++ b/rust/bindings/bindings_helper.h @@ -13,5 +13,6 @@ #include <linux/sched.h> /* `bindgen` gets confused at certain things. */ +const size_t BINDINGS_ARCH_SLAB_MINALIGN = ARCH_SLAB_MINALIGN; const gfp_t BINDINGS_GFP_KERNEL = GFP_KERNEL; const gfp_t BINDINGS___GFP_ZERO = __GFP_ZERO; diff --git a/rust/kernel/allocator.rs b/rust/kernel/allocator.rs index 397a3dd57a9b..9363b527be66 100644 --- a/rust/kernel/allocator.rs +++ b/rust/kernel/allocator.rs @@ -9,6 +9,36 @@ use crate::bindings; struct KernelAllocator; +/// Calls `krealloc` with a proper size to alloc a new object aligned to `new_layout`'s alignment. +/// +/// # Safety +/// +/// - `ptr` can be either null or a pointer which has been allocated by this allocator. +/// - `new_layout` must have a non-zero size. +unsafe fn krealloc_aligned(ptr: *mut u8, new_layout: Layout, flags: bindings::gfp_t) -> *mut u8 { + // Customized layouts from `Layout::from_size_align()` can have size < align, so pad first. + let layout = new_layout.pad_to_align(); + + let mut size = layout.size(); + + if layout.align() > bindings::BINDINGS_ARCH_SLAB_MINALIGN { + // The alignment requirement exceeds the slab guarantee, thus try to enlarge the size + // to use the "power-of-two" size/alignment guarantee (see comments in `kmalloc()` for + // more information). + // + // Note that `layout.size()` (after padding) is guaranteed to be a multiple of + // `layout.align()`, so `next_power_of_two` gives enough alignment guarantee. + size = size.next_power_of_two(); + } + + // SAFETY: + // - `ptr` is either null or a pointer returned from a previous `k{re}alloc()` by the + // function safety requirement. + // - `size` is greater than 0 since it's either a `layout.size()` (which cannot be zero + // according to the function safety requirement) or a result from `next_power_of_two()`. + unsafe { bindings::krealloc(ptr as *const core::ffi::c_void, size, flags) as *mut u8 } +} + unsafe impl GlobalAlloc for KernelAllocator { unsafe fn alloc(&self, layout: Layout) -> *mut u8 { // `krealloc()` is used instead of `kmalloc()` because the latter is @@ -30,10 +60,20 @@ static ALLOCATOR: KernelAllocator = KernelAllocator; // to extract the object file that has them from the archive. For the moment, // let's generate them ourselves instead. // +// Note: Although these are *safe* functions, they are called by the compiler +// with parameters that obey the same `GlobalAlloc` function safety +// requirements: size and align should form a valid layout, and size is +// greater than 0. +// // Note that `#[no_mangle]` implies exported too, nowadays. #[no_mangle] -fn __rust_alloc(size: usize, _align: usize) -> *mut u8 { - unsafe { bindings::krealloc(core::ptr::null(), size, bindings::GFP_KERNEL) as *mut u8 } +fn __rust_alloc(size: usize, align: usize) -> *mut u8 { + // SAFETY: See assumption above. + let layout = unsafe { Layout::from_size_align_unchecked(size, align) }; + + // SAFETY: `ptr::null_mut()` is null, per assumption above the size of `layout` is greater + // than 0. + unsafe { krealloc_aligned(ptr::null_mut(), layout, bindings::GFP_KERNEL) } } #[no_mangle] @@ -42,23 +82,27 @@ fn __rust_dealloc(ptr: *mut u8, _size: usize, _align: usize) { } #[no_mangle] -fn __rust_realloc(ptr: *mut u8, _old_size: usize, _align: usize, new_size: usize) -> *mut u8 { - unsafe { - bindings::krealloc( - ptr as *const core::ffi::c_void, - new_size, - bindings::GFP_KERNEL, - ) as *mut u8 - } +fn __rust_realloc(ptr: *mut u8, _old_size: usize, align: usize, new_size: usize) -> *mut u8 { + // SAFETY: See assumption above. + let new_layout = unsafe { Layout::from_size_align_unchecked(new_size, align) }; + + // SAFETY: Per assumption above, `ptr` is allocated by `__rust_*` before, and the size of + // `new_layout` is greater than 0. + unsafe { krealloc_aligned(ptr, new_layout, bindings::GFP_KERNEL) } } #[no_mangle] -fn __rust_alloc_zeroed(size: usize, _align: usize) -> *mut u8 { +fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8 { + // SAFETY: See assumption above. + let layout = unsafe { Layout::from_size_align_unchecked(size, align) }; + + // SAFETY: `ptr::null_mut()` is null, per assumption above the size of `layout` is greater + // than 0. unsafe { - bindings::krealloc( - core::ptr::null(), - size, + krealloc_aligned( + ptr::null_mut(), + layout, bindings::GFP_KERNEL | bindings::__GFP_ZERO, - ) as *mut u8 + ) } } diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs index a89843cacaad..172f563976a9 100644 --- a/rust/kernel/sync/arc.rs +++ b/rust/kernel/sync/arc.rs @@ -243,8 +243,7 @@ impl<T: 'static> ForeignOwnable for Arc<T> { let inner = NonNull::new(ptr as *mut ArcInner<T>).unwrap(); // SAFETY: The safety requirements of `from_foreign` ensure that the object remains alive - // for the lifetime of the returned value. Additionally, the safety requirements of - // `ForeignOwnable::borrow_mut` ensure that no new mutable references are created. + // for the lifetime of the returned value. unsafe { ArcBorrow::new(inner) } } diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs index 1e5380b16ed5..d479f8da8f38 100644 --- a/rust/kernel/types.rs +++ b/rust/kernel/types.rs @@ -35,34 +35,16 @@ pub trait ForeignOwnable: Sized { /// /// `ptr` must have been returned by a previous call to [`ForeignOwnable::into_foreign`] for /// which a previous matching [`ForeignOwnable::from_foreign`] hasn't been called yet. - /// Additionally, all instances (if any) of values returned by [`ForeignOwnable::borrow_mut`] - /// for this object must have been dropped. unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> Self::Borrowed<'a>; - /// Mutably borrows a foreign-owned object. - /// - /// # Safety - /// - /// `ptr` must have been returned by a previous call to [`ForeignOwnable::into_foreign`] for - /// which a previous matching [`ForeignOwnable::from_foreign`] hasn't been called yet. - /// Additionally, all instances (if any) of values returned by [`ForeignOwnable::borrow`] and - /// [`ForeignOwnable::borrow_mut`] for this object must have been dropped. - unsafe fn borrow_mut(ptr: *const core::ffi::c_void) -> ScopeGuard<Self, fn(Self)> { - // SAFETY: The safety requirements ensure that `ptr` came from a previous call to - // `into_foreign`. - ScopeGuard::new_with_data(unsafe { Self::from_foreign(ptr) }, |d| { - d.into_foreign(); - }) - } - /// Converts a foreign-owned object back to a Rust-owned one. /// /// # Safety /// /// `ptr` must have been returned by a previous call to [`ForeignOwnable::into_foreign`] for /// which a previous matching [`ForeignOwnable::from_foreign`] hasn't been called yet. - /// Additionally, all instances (if any) of values returned by [`ForeignOwnable::borrow`] and - /// [`ForeignOwnable::borrow_mut`] for this object must have been dropped. + /// Additionally, all instances (if any) of values returned by [`ForeignOwnable::borrow`] for + /// this object must have been dropped. unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self; } diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index 16c87938b316..653b92f6d4c8 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c @@ -129,6 +129,7 @@ static struct sym_entry *read_symbol(FILE *in, char **buf, size_t *buf_len) ssize_t readlen; struct sym_entry *sym; + errno = 0; readlen = getline(buf, buf_len, in); if (readlen < 0) { if (errno) { diff --git a/security/keys/sysctl.c b/security/keys/sysctl.c index b72b82bb20c6..b348e1679d5d 100644 --- a/security/keys/sysctl.c +++ b/security/keys/sysctl.c @@ -9,7 +9,7 @@ #include <linux/sysctl.h> #include "internal.h" -struct ctl_table key_sysctls[] = { +static struct ctl_table key_sysctls[] = { { .procname = "maxkeys", .data = &key_quota_maxkeys, diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index cb8ca46213be..1f6d904c6481 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -14,7 +14,7 @@ * Defines x86 CPU feature bits */ #define NCAPINTS 21 /* N 32-bit words worth of info */ -#define NBUGINTS 1 /* N 32-bit bug flags */ +#define NBUGINTS 2 /* N 32-bit bug flags */ /* * Note: If the comment begins with a quoted string, that string is used diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h index 3aedae61af4f..a00a53e15ab7 100644 --- a/tools/arch/x86/include/asm/msr-index.h +++ b/tools/arch/x86/include/asm/msr-index.h @@ -545,6 +545,7 @@ #define MSR_AMD64_DE_CFG 0xc0011029 #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1 #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT) +#define MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT 9 #define MSR_AMD64_BU_CFG2 0xc001102a #define MSR_AMD64_IBSFETCHCTL 0xc0011030 diff --git a/tools/counter/Makefile b/tools/counter/Makefile index a0f4cab71fe5..b2c2946f44c9 100644 --- a/tools/counter/Makefile +++ b/tools/counter/Makefile @@ -40,7 +40,8 @@ $(OUTPUT)counter_example: $(COUNTER_EXAMPLE) clean: rm -f $(ALL_PROGRAMS) rm -rf $(OUTPUT)include/linux/counter.h - rmdir -p $(OUTPUT)include/linux + rm -df $(OUTPUT)include/linux + rm -df $(OUTPUT)include find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete install: $(ALL_PROGRAMS) diff --git a/tools/hv/vmbus_testing b/tools/hv/vmbus_testing index e7212903dd1d..4467979d8f69 100755 --- a/tools/hv/vmbus_testing +++ b/tools/hv/vmbus_testing @@ -164,7 +164,7 @@ def recursive_file_lookup(path, file_map): def get_all_devices_test_status(file_map): for device in file_map: - if (get_test_state(locate_state(device, file_map)) is 1): + if (get_test_state(locate_state(device, file_map)) == 1): print("Testing = ON for: {}" .format(device.split("/")[5])) else: @@ -203,7 +203,7 @@ def write_test_files(path, value): def set_test_state(state_path, state_value, quiet): write_test_files(state_path, state_value) - if (get_test_state(state_path) is 1): + if (get_test_state(state_path) == 1): if (not quiet): print("Testing = ON for device: {}" .format(state_path.split("/")[5])) diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index 2e1caabecb18..2d51fa8da9e8 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -824,5 +824,8 @@ bool arch_is_retpoline(struct symbol *sym) bool arch_is_rethunk(struct symbol *sym) { - return !strcmp(sym->name, "__x86_return_thunk"); + return !strcmp(sym->name, "__x86_return_thunk") || + !strcmp(sym->name, "srso_untrain_ret") || + !strcmp(sym->name, "srso_safe_ret") || + !strcmp(sym->name, "__ret"); } diff --git a/tools/perf/arch/arm64/util/pmu.c b/tools/perf/arch/arm64/util/pmu.c index 561de0cb6b95..512a8f13c4de 100644 --- a/tools/perf/arch/arm64/util/pmu.c +++ b/tools/perf/arch/arm64/util/pmu.c @@ -54,10 +54,11 @@ double perf_pmu__cpu_slots_per_cycle(void) perf_pmu__pathname_scnprintf(path, sizeof(path), pmu->name, "caps/slots"); /* - * The value of slots is not greater than 32 bits, but sysfs__read_int - * can't read value with 0x prefix, so use sysfs__read_ull instead. + * The value of slots is not greater than 32 bits, but + * filename__read_int can't read value with 0x prefix, + * so use filename__read_ull instead. */ - sysfs__read_ull(path, &slots); + filename__read_ull(path, &slots); } return slots ? (double)slots : NAN; diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c index b7223feec770..5f3edb3004d8 100644 --- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c +++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c @@ -250,6 +250,7 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain) if (!chain || chain->nr < 3) return skip_slot; + addr_location__init(&al); ip = chain->ips[1]; thread__find_symbol(thread, PERF_RECORD_MISC_USER, ip, &al); @@ -259,6 +260,7 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain) if (!dso) { pr_debug("%" PRIx64 " dso is NULL\n", ip); + addr_location__exit(&al); return skip_slot; } @@ -279,5 +281,7 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain) */ skip_slot = 3; } + + addr_location__exit(&al); return skip_slot; } diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c index b2f82847e4c3..658fb9599d95 100644 --- a/tools/perf/tests/parse-events.c +++ b/tools/perf/tests/parse-events.c @@ -1631,6 +1631,16 @@ static bool test__pmu_cpu_valid(void) return !!perf_pmus__find("cpu"); } +static bool test__pmu_cpu_event_valid(void) +{ + struct perf_pmu *pmu = perf_pmus__find("cpu"); + + if (!pmu) + return false; + + return perf_pmu__has_format(pmu, "event"); +} + static bool test__intel_pt_valid(void) { return !!perf_pmus__find("intel_pt"); @@ -2179,7 +2189,7 @@ static const struct evlist_test test__events_pmu[] = { }, { .name = "cpu/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks',period=0x1,event=0x2/ukp", - .valid = test__pmu_cpu_valid, + .valid = test__pmu_cpu_event_valid, .check = test__checkevent_complex_name, /* 3 */ }, diff --git a/tools/perf/tests/shell/test_uprobe_from_different_cu.sh b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh index 00d2e0e2e0c2..319f36ebb9a4 100755 --- a/tools/perf/tests/shell/test_uprobe_from_different_cu.sh +++ b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh @@ -4,6 +4,12 @@ set -e +# skip if there's no gcc +if ! [ -x "$(command -v gcc)" ]; then + echo "failed: no gcc compiler" + exit 2 +fi + temp_dir=$(mktemp -d /tmp/perf-uprobe-different-cu-sh.XXXXXXXXXX) cleanup() @@ -11,7 +17,7 @@ cleanup() trap - EXIT TERM INT if [[ "${temp_dir}" =~ ^/tmp/perf-uprobe-different-cu-sh.*$ ]]; then echo "--- Cleaning up ---" - perf probe -x ${temp_dir}/testfile -d foo + perf probe -x ${temp_dir}/testfile -d foo || true rm -f "${temp_dir}/"* rmdir "${temp_dir}" fi diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 4e62843d51b7..f4cb41ee23cd 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -45,7 +45,6 @@ static void __machine__remove_thread(struct machine *machine, struct thread_rb_node *nd, struct thread *th, bool lock); -static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip); static struct dso *machine__kernel_dso(struct machine *machine) { @@ -2385,10 +2384,6 @@ static int add_callchain_ip(struct thread *thread, ms.maps = maps__get(al.maps); ms.map = map__get(al.map); ms.sym = al.sym; - - if (!branch && append_inlines(cursor, &ms, ip) == 0) - goto out; - srcline = callchain_srcline(&ms, al.addr); err = callchain_cursor_append(cursor, ip, &ms, branch, flags, nr_loop_iter, diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index acde097e327c..c9ec0cafb69d 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -2100,16 +2100,16 @@ __weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs) return lhs->core.idx - rhs->core.idx; } -static int evlist__cmp(void *state, const struct list_head *l, const struct list_head *r) +static int evlist__cmp(void *_fg_idx, const struct list_head *l, const struct list_head *r) { const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node); const struct evsel *lhs = container_of(lhs_core, struct evsel, core); const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node); const struct evsel *rhs = container_of(rhs_core, struct evsel, core); - int *leader_idx = state; - int lhs_leader_idx = *leader_idx, rhs_leader_idx = *leader_idx, ret; + int *force_grouped_idx = _fg_idx; + int lhs_sort_idx, rhs_sort_idx, ret; const char *lhs_pmu_name, *rhs_pmu_name; - bool lhs_has_group = false, rhs_has_group = false; + bool lhs_has_group, rhs_has_group; /* * First sort by grouping/leader. Read the leader idx only if the evsel @@ -2121,15 +2121,25 @@ static int evlist__cmp(void *state, const struct list_head *l, const struct list */ if (lhs_core->leader != lhs_core || lhs_core->nr_members > 1) { lhs_has_group = true; - lhs_leader_idx = lhs_core->leader->idx; + lhs_sort_idx = lhs_core->leader->idx; + } else { + lhs_has_group = false; + lhs_sort_idx = *force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs) + ? *force_grouped_idx + : lhs_core->idx; } if (rhs_core->leader != rhs_core || rhs_core->nr_members > 1) { rhs_has_group = true; - rhs_leader_idx = rhs_core->leader->idx; + rhs_sort_idx = rhs_core->leader->idx; + } else { + rhs_has_group = false; + rhs_sort_idx = *force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs) + ? *force_grouped_idx + : rhs_core->idx; } - if (lhs_leader_idx != rhs_leader_idx) - return lhs_leader_idx - rhs_leader_idx; + if (lhs_sort_idx != rhs_sort_idx) + return lhs_sort_idx - rhs_sort_idx; /* Group by PMU if there is a group. Groups can't span PMUs. */ if (lhs_has_group && rhs_has_group) { @@ -2146,10 +2156,10 @@ static int evlist__cmp(void *state, const struct list_head *l, const struct list static int parse_events__sort_events_and_fix_groups(struct list_head *list) { - int idx = 0, unsorted_idx = -1; + int idx = 0, force_grouped_idx = -1; struct evsel *pos, *cur_leader = NULL; struct perf_evsel *cur_leaders_grp = NULL; - bool idx_changed = false; + bool idx_changed = false, cur_leader_force_grouped = false; int orig_num_leaders = 0, num_leaders = 0; int ret; @@ -2174,12 +2184,14 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list) */ pos->core.idx = idx++; - if (unsorted_idx == -1 && pos == pos_leader && pos->core.nr_members < 2) - unsorted_idx = pos->core.idx; + /* Remember an index to sort all forced grouped events together to. */ + if (force_grouped_idx == -1 && pos == pos_leader && pos->core.nr_members < 2 && + arch_evsel__must_be_in_group(pos)) + force_grouped_idx = pos->core.idx; } /* Sort events. */ - list_sort(&unsorted_idx, list, evlist__cmp); + list_sort(&force_grouped_idx, list, evlist__cmp); /* * Recompute groups, splitting for PMUs and adding groups for events @@ -2189,8 +2201,9 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list) list_for_each_entry(pos, list, core.node) { const struct evsel *pos_leader = evsel__leader(pos); const char *pos_pmu_name = pos->group_pmu_name; - const char *cur_leader_pmu_name, *pos_leader_pmu_name; - bool force_grouped = arch_evsel__must_be_in_group(pos); + const char *cur_leader_pmu_name; + bool pos_force_grouped = force_grouped_idx != -1 && + arch_evsel__must_be_in_group(pos); /* Reset index and nr_members. */ if (pos->core.idx != idx) @@ -2206,7 +2219,8 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list) cur_leader = pos; cur_leader_pmu_name = cur_leader->group_pmu_name; - if ((cur_leaders_grp != pos->core.leader && !force_grouped) || + if ((cur_leaders_grp != pos->core.leader && + (!pos_force_grouped || !cur_leader_force_grouped)) || strcmp(cur_leader_pmu_name, pos_pmu_name)) { /* Event is for a different group/PMU than last. */ cur_leader = pos; @@ -2216,14 +2230,14 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list) * group. */ cur_leaders_grp = pos->core.leader; - } - pos_leader_pmu_name = pos_leader->group_pmu_name; - if (strcmp(pos_leader_pmu_name, pos_pmu_name) || force_grouped) { /* - * Event's PMU differs from its leader's. Groups can't - * span PMUs, so update leader from the group/PMU - * tracker. + * Avoid forcing events into groups with events that + * don't need to be in the group. */ + cur_leader_force_grouped = pos_force_grouped; + } + if (pos_leader != cur_leader) { + /* The leader changed so update it. */ evsel__set_leader(pos, cur_leader); } } diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 7f984a7f16ca..28380e7aa8d0 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -1440,6 +1440,17 @@ void perf_pmu__del_formats(struct list_head *formats) } } +bool perf_pmu__has_format(const struct perf_pmu *pmu, const char *name) +{ + struct perf_pmu_format *format; + + list_for_each_entry(format, &pmu->format, list) { + if (!strcmp(format->name, name)) + return true; + } + return false; +} + bool is_pmu_core(const char *name) { return !strcmp(name, "cpu") || !strcmp(name, "cpum_cf") || is_sysfs_pmu_core(name); diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index 203b92860e3c..6b414cecbad2 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h @@ -234,6 +234,7 @@ int perf_pmu__new_format(struct list_head *list, char *name, void perf_pmu__set_format(unsigned long *bits, long from, long to); int perf_pmu__format_parse(int dirfd, struct list_head *head); void perf_pmu__del_formats(struct list_head *formats); +bool perf_pmu__has_format(const struct perf_pmu *pmu, const char *name); bool is_pmu_core(const char *name); bool perf_pmu__supports_legacy_cache(const struct perf_pmu *pmu); diff --git a/tools/perf/util/pmus.c b/tools/perf/util/pmus.c index 3cd9de42139e..c58ba9fb6a36 100644 --- a/tools/perf/util/pmus.c +++ b/tools/perf/util/pmus.c @@ -152,16 +152,14 @@ static void pmu_read_sysfs(bool core_only) } closedir(dir); - if (core_only) { - if (!list_empty(&core_pmus)) - read_sysfs_core_pmus = true; - else { - if (perf_pmu__create_placeholder_core_pmu(&core_pmus)) - read_sysfs_core_pmus = true; - } - } else { + if (list_empty(&core_pmus)) { + if (!perf_pmu__create_placeholder_core_pmu(&core_pmus)) + pr_err("Failure to set up any core PMUs\n"); + } + if (!list_empty(&core_pmus)) { read_sysfs_core_pmus = true; - read_sysfs_all_pmus = true; + if (!core_only) + read_sysfs_all_pmus = true; } } diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index 7329b3340f88..d45d5dcb0e2b 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -931,6 +931,11 @@ static bool should_skip_zero_counter(struct perf_stat_config *config, */ if (config->aggr_mode == AGGR_THREAD && config->system_wide) return true; + + /* Tool events have the software PMU but are only gathered on 1. */ + if (evsel__is_tool(counter)) + return true; + /* * Skip value 0 when it's an uncore event and the given aggr id * does not belong to the PMU cpumask. diff --git a/tools/testing/radix-tree/regression1.c b/tools/testing/radix-tree/regression1.c index a61c7bcbc72d..63f468bf8245 100644 --- a/tools/testing/radix-tree/regression1.c +++ b/tools/testing/radix-tree/regression1.c @@ -177,7 +177,7 @@ void regression1_test(void) nr_threads = 2; pthread_barrier_init(&worker_barrier, NULL, nr_threads); - threads = malloc(nr_threads * sizeof(pthread_t *)); + threads = malloc(nr_threads * sizeof(*threads)); for (i = 0; i < nr_threads; i++) { arg = i; diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c index b4f6f3a50ae5..5674a9d0cacf 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c @@ -869,6 +869,77 @@ static void test_msg_redir_to_listening(struct test_sockmap_listen *skel, xbpf_prog_detach2(verdict, sock_map, BPF_SK_MSG_VERDICT); } +static void redir_partial(int family, int sotype, int sock_map, int parser_map) +{ + int s, c0, c1, p0, p1; + int err, n, key, value; + char buf[] = "abc"; + + key = 0; + value = sizeof(buf) - 1; + err = xbpf_map_update_elem(parser_map, &key, &value, 0); + if (err) + return; + + s = socket_loopback(family, sotype | SOCK_NONBLOCK); + if (s < 0) + goto clean_parser_map; + + err = create_socket_pairs(s, family, sotype, &c0, &c1, &p0, &p1); + if (err) + goto close_srv; + + err = add_to_sockmap(sock_map, p0, p1); + if (err) + goto close; + + n = xsend(c1, buf, sizeof(buf), 0); + if (n < sizeof(buf)) + FAIL("incomplete write"); + + n = xrecv_nonblock(c0, buf, sizeof(buf), 0); + if (n != sizeof(buf) - 1) + FAIL("expect %zu, received %d", sizeof(buf) - 1, n); + +close: + xclose(c0); + xclose(p0); + xclose(c1); + xclose(p1); +close_srv: + xclose(s); + +clean_parser_map: + key = 0; + value = 0; + xbpf_map_update_elem(parser_map, &key, &value, 0); +} + +static void test_skb_redir_partial(struct test_sockmap_listen *skel, + struct bpf_map *inner_map, int family, + int sotype) +{ + int verdict = bpf_program__fd(skel->progs.prog_stream_verdict); + int parser = bpf_program__fd(skel->progs.prog_stream_parser); + int parser_map = bpf_map__fd(skel->maps.parser_map); + int sock_map = bpf_map__fd(inner_map); + int err; + + err = xbpf_prog_attach(parser, sock_map, BPF_SK_SKB_STREAM_PARSER, 0); + if (err) + return; + + err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_STREAM_VERDICT, 0); + if (err) + goto detach; + + redir_partial(family, sotype, sock_map, parser_map); + + xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_STREAM_VERDICT); +detach: + xbpf_prog_detach2(parser, sock_map, BPF_SK_SKB_STREAM_PARSER); +} + static void test_reuseport_select_listening(int family, int sotype, int sock_map, int verd_map, int reuseport_prog) @@ -1243,6 +1314,7 @@ static void test_redir(struct test_sockmap_listen *skel, struct bpf_map *map, } tests[] = { TEST(test_skb_redir_to_connected), TEST(test_skb_redir_to_listening), + TEST(test_skb_redir_partial), TEST(test_msg_redir_to_connected), TEST(test_msg_redir_to_listening), }; @@ -1432,7 +1504,7 @@ static void vsock_unix_redir_connectible(int sock_mapfd, int verd_mapfd, if (n < 1) goto out; - n = recv(mode == REDIR_INGRESS ? u0 : u1, &b, sizeof(b), MSG_DONTWAIT); + n = xrecv_nonblock(mode == REDIR_INGRESS ? u0 : u1, &b, sizeof(b), 0); if (n < 0) FAIL("%s: recv() err, errno=%d", log_prefix, errno); if (n == 0) diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_listen.c b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c index 325c9f193432..464d35bd57c7 100644 --- a/tools/testing/selftests/bpf/progs/test_sockmap_listen.c +++ b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c @@ -28,12 +28,26 @@ struct { __type(value, unsigned int); } verdict_map SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); +} parser_map SEC(".maps"); + bool test_sockmap = false; /* toggled by user-space */ bool test_ingress = false; /* toggled by user-space */ SEC("sk_skb/stream_parser") int prog_stream_parser(struct __sk_buff *skb) { + int *value; + __u32 key = 0; + + value = bpf_map_lookup_elem(&parser_map, &key); + if (value && *value) + return *value; + return skb->len; } diff --git a/tools/testing/selftests/cgroup/test_kmem.c b/tools/testing/selftests/cgroup/test_kmem.c index 258ddc565deb..1b2cec9d18a4 100644 --- a/tools/testing/selftests/cgroup/test_kmem.c +++ b/tools/testing/selftests/cgroup/test_kmem.c @@ -70,6 +70,10 @@ static int test_kmem_basic(const char *root) goto cleanup; cg_write(cg, "memory.high", "1M"); + + /* wait for RCU freeing */ + sleep(1); + slab1 = cg_read_key_long(cg, "memory.stat", "slab "); if (slab1 <= 0) goto cleanup; diff --git a/tools/testing/selftests/mm/ksm_tests.c b/tools/testing/selftests/mm/ksm_tests.c index 435acebdc325..380b691d3eb9 100644 --- a/tools/testing/selftests/mm/ksm_tests.c +++ b/tools/testing/selftests/mm/ksm_tests.c @@ -831,6 +831,7 @@ int main(int argc, char *argv[]) printf("Size must be greater than 0\n"); return KSFT_FAIL; } + break; case 't': { int tmp = atoi(optarg); diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh index 0f5e88c8f4ff..df8d90b51867 100755 --- a/tools/testing/selftests/net/fib_nexthops.sh +++ b/tools/testing/selftests/net/fib_nexthops.sh @@ -1981,6 +1981,11 @@ basic() run_cmd "$IP link set dev lo up" + # Dump should not loop endlessly when maximum nexthop ID is configured. + run_cmd "$IP nexthop add id $((2**32-1)) blackhole" + run_cmd "timeout 5 $IP nexthop" + log_test $? 0 "Maximum nexthop ID dump" + # # groups # @@ -2201,6 +2206,11 @@ basic_res() run_cmd "$IP nexthop bucket list fdb" log_test $? 255 "Dump all nexthop buckets with invalid 'fdb' keyword" + # Dump should not loop endlessly when maximum nexthop ID is configured. + run_cmd "$IP nexthop add id $((2**32-1)) group 1/2 type resilient buckets 4" + run_cmd "timeout 5 $IP nexthop bucket" + log_test $? 0 "Maximum nexthop ID dump" + # # resilient nexthop buckets get requests # diff --git a/tools/testing/selftests/net/forwarding/bridge_mdb.sh b/tools/testing/selftests/net/forwarding/bridge_mdb.sh index ae3f9462a2b6..d0c6c499d5da 100755 --- a/tools/testing/selftests/net/forwarding/bridge_mdb.sh +++ b/tools/testing/selftests/net/forwarding/bridge_mdb.sh @@ -617,7 +617,7 @@ __cfg_test_port_ip_sg() grep -q "permanent" check_err $? "Entry not added as \"permanent\" when should" bridge -d -s mdb show dev br0 vid 10 | grep "$grp_key" | \ - grep -q "0.00" + grep -q " 0.00" check_err $? "\"permanent\" entry has a pending group timer" bridge mdb del dev br0 port $swp1 $grp_key vid 10 @@ -626,7 +626,7 @@ __cfg_test_port_ip_sg() grep -q "temp" check_err $? "Entry not added as \"temp\" when should" bridge -d -s mdb show dev br0 vid 10 | grep "$grp_key" | \ - grep -q "0.00" + grep -q " 0.00" check_fail $? "\"temp\" entry has an unpending group timer" bridge mdb del dev br0 port $swp1 $grp_key vid 10 @@ -659,7 +659,7 @@ __cfg_test_port_ip_sg() grep -q "permanent" check_err $? "Entry not marked as \"permanent\" after replace" bridge -d -s mdb show dev br0 vid 10 | grep "$grp_key" | \ - grep -q "0.00" + grep -q " 0.00" check_err $? "Entry has a pending group timer after replace" bridge mdb replace dev br0 port $swp1 $grp_key vid 10 temp @@ -667,7 +667,7 @@ __cfg_test_port_ip_sg() grep -q "temp" check_err $? "Entry not marked as \"temp\" after replace" bridge -d -s mdb show dev br0 vid 10 | grep "$grp_key" | \ - grep -q "0.00" + grep -q " 0.00" check_fail $? "Entry has an unpending group timer after replace" bridge mdb del dev br0 port $swp1 $grp_key vid 10 @@ -850,6 +850,7 @@ cfg_test() __fwd_test_host_ip() { local grp=$1; shift + local dmac=$1; shift local src=$1; shift local mode=$1; shift local name @@ -872,27 +873,27 @@ __fwd_test_host_ip() # Packet should only be flooded to multicast router ports when there is # no matching MDB entry. The bridge is not configured as a multicast # router port. - $MZ $mode $h1.10 -c 1 -p 128 -A $src -B $grp -t udp -q + $MZ $mode $h1.10 -a own -b $dmac -c 1 -p 128 -A $src -B $grp -t udp -q tc_check_packets "dev br0 ingress" 1 0 check_err $? "Packet locally received after flood" # Install a regular port group entry and expect the packet to not be # locally received. bridge mdb add dev br0 port $swp2 grp $grp temp vid 10 - $MZ $mode $h1.10 -c 1 -p 128 -A $src -B $grp -t udp -q + $MZ $mode $h1.10 -a own -b $dmac -c 1 -p 128 -A $src -B $grp -t udp -q tc_check_packets "dev br0 ingress" 1 0 check_err $? "Packet locally received after installing a regular entry" # Add a host entry and expect the packet to be locally received. bridge mdb add dev br0 port br0 grp $grp temp vid 10 - $MZ $mode $h1.10 -c 1 -p 128 -A $src -B $grp -t udp -q + $MZ $mode $h1.10 -a own -b $dmac -c 1 -p 128 -A $src -B $grp -t udp -q tc_check_packets "dev br0 ingress" 1 1 check_err $? "Packet not locally received after adding a host entry" # Remove the host entry and expect the packet to not be locally # received. bridge mdb del dev br0 port br0 grp $grp vid 10 - $MZ $mode $h1.10 -c 1 -p 128 -A $src -B $grp -t udp -q + $MZ $mode $h1.10 -a own -b $dmac -c 1 -p 128 -A $src -B $grp -t udp -q tc_check_packets "dev br0 ingress" 1 1 check_err $? "Packet locally received after removing a host entry" @@ -905,8 +906,8 @@ __fwd_test_host_ip() fwd_test_host_ip() { - __fwd_test_host_ip "239.1.1.1" "192.0.2.1" "-4" - __fwd_test_host_ip "ff0e::1" "2001:db8:1::1" "-6" + __fwd_test_host_ip "239.1.1.1" "01:00:5e:01:01:01" "192.0.2.1" "-4" + __fwd_test_host_ip "ff0e::1" "33:33:00:00:00:01" "2001:db8:1::1" "-6" } fwd_test_host_l2() @@ -966,6 +967,7 @@ fwd_test_host() __fwd_test_port_ip() { local grp=$1; shift + local dmac=$1; shift local valid_src=$1; shift local invalid_src=$1; shift local mode=$1; shift @@ -999,43 +1001,43 @@ __fwd_test_port_ip() vlan_ethtype $eth_type vlan_id 10 dst_ip $grp \ src_ip $invalid_src action drop - $MZ $mode $h1.10 -c 1 -p 128 -A $valid_src -B $grp -t udp -q + $MZ $mode $h1.10 -a own -b $dmac -c 1 -p 128 -A $valid_src -B $grp -t udp -q tc_check_packets "dev $h2 ingress" 1 0 check_err $? "Packet from valid source received on H2 before adding entry" - $MZ $mode $h1.10 -c 1 -p 128 -A $invalid_src -B $grp -t udp -q + $MZ $mode $h1.10 -a own -b $dmac -c 1 -p 128 -A $invalid_src -B $grp -t udp -q tc_check_packets "dev $h2 ingress" 2 0 check_err $? "Packet from invalid source received on H2 before adding entry" bridge mdb add dev br0 port $swp2 grp $grp vid 10 \ filter_mode $filter_mode source_list $src_list - $MZ $mode $h1.10 -c 1 -p 128 -A $valid_src -B $grp -t udp -q + $MZ $mode $h1.10 -a own -b $dmac -c 1 -p 128 -A $valid_src -B $grp -t udp -q tc_check_packets "dev $h2 ingress" 1 1 check_err $? "Packet from valid source not received on H2 after adding entry" - $MZ $mode $h1.10 -c 1 -p 128 -A $invalid_src -B $grp -t udp -q + $MZ $mode $h1.10 -a own -b $dmac -c 1 -p 128 -A $invalid_src -B $grp -t udp -q tc_check_packets "dev $h2 ingress" 2 0 check_err $? "Packet from invalid source received on H2 after adding entry" bridge mdb replace dev br0 port $swp2 grp $grp vid 10 \ filter_mode exclude - $MZ $mode $h1.10 -c 1 -p 128 -A $valid_src -B $grp -t udp -q + $MZ $mode $h1.10 -a own -b $dmac -c 1 -p 128 -A $valid_src -B $grp -t udp -q tc_check_packets "dev $h2 ingress" 1 2 check_err $? "Packet from valid source not received on H2 after allowing all sources" - $MZ $mode $h1.10 -c 1 -p 128 -A $invalid_src -B $grp -t udp -q + $MZ $mode $h1.10 -a own -b $dmac -c 1 -p 128 -A $invalid_src -B $grp -t udp -q tc_check_packets "dev $h2 ingress" 2 1 check_err $? "Packet from invalid source not received on H2 after allowing all sources" bridge mdb del dev br0 port $swp2 grp $grp vid 10 - $MZ $mode $h1.10 -c 1 -p 128 -A $valid_src -B $grp -t udp -q + $MZ $mode $h1.10 -a own -b $dmac -c 1 -p 128 -A $valid_src -B $grp -t udp -q tc_check_packets "dev $h2 ingress" 1 2 check_err $? "Packet from valid source received on H2 after deleting entry" - $MZ $mode $h1.10 -c 1 -p 128 -A $invalid_src -B $grp -t udp -q + $MZ $mode $h1.10 -a own -b $dmac -c 1 -p 128 -A $invalid_src -B $grp -t udp -q tc_check_packets "dev $h2 ingress" 2 1 check_err $? "Packet from invalid source received on H2 after deleting entry" @@ -1047,11 +1049,11 @@ __fwd_test_port_ip() fwd_test_port_ip() { - __fwd_test_port_ip "239.1.1.1" "192.0.2.1" "192.0.2.2" "-4" "exclude" - __fwd_test_port_ip "ff0e::1" "2001:db8:1::1" "2001:db8:1::2" "-6" \ + __fwd_test_port_ip "239.1.1.1" "01:00:5e:01:01:01" "192.0.2.1" "192.0.2.2" "-4" "exclude" + __fwd_test_port_ip "ff0e::1" "33:33:00:00:00:01" "2001:db8:1::1" "2001:db8:1::2" "-6" \ "exclude" - __fwd_test_port_ip "239.1.1.1" "192.0.2.1" "192.0.2.2" "-4" "include" - __fwd_test_port_ip "ff0e::1" "2001:db8:1::1" "2001:db8:1::2" "-6" \ + __fwd_test_port_ip "239.1.1.1" "01:00:5e:01:01:01" "192.0.2.1" "192.0.2.2" "-4" "include" + __fwd_test_port_ip "ff0e::1" "33:33:00:00:00:01" "2001:db8:1::1" "2001:db8:1::2" "-6" \ "include" } @@ -1127,7 +1129,7 @@ ctrl_igmpv3_is_in_test() filter_mode include source_list 192.0.2.1 # IS_IN ( 192.0.2.2 ) - $MZ $h1.10 -c 1 -A 192.0.2.1 -B 239.1.1.1 \ + $MZ $h1.10 -c 1 -a own -b 01:00:5e:01:01:01 -A 192.0.2.1 -B 239.1.1.1 \ -t ip proto=2,p=$(igmpv3_is_in_get 239.1.1.1 192.0.2.2) -q bridge -d mdb show dev br0 vid 10 | grep 239.1.1.1 | grep -q 192.0.2.2 @@ -1140,7 +1142,7 @@ ctrl_igmpv3_is_in_test() filter_mode include source_list 192.0.2.1 # IS_IN ( 192.0.2.2 ) - $MZ $h1.10 -c 1 -A 192.0.2.1 -B 239.1.1.1 \ + $MZ $h1.10 -a own -b 01:00:5e:01:01:01 -c 1 -A 192.0.2.1 -B 239.1.1.1 \ -t ip proto=2,p=$(igmpv3_is_in_get 239.1.1.1 192.0.2.2) -q bridge -d mdb show dev br0 vid 10 | grep 239.1.1.1 | grep -v "src" | \ @@ -1167,7 +1169,7 @@ ctrl_mldv2_is_in_test() # IS_IN ( 2001:db8:1::2 ) local p=$(mldv2_is_in_get fe80::1 ff0e::1 2001:db8:1::2) - $MZ -6 $h1.10 -c 1 -A fe80::1 -B ff0e::1 \ + $MZ -6 $h1.10 -a own -b 33:33:00:00:00:01 -c 1 -A fe80::1 -B ff0e::1 \ -t ip hop=1,next=0,p="$p" -q bridge -d mdb show dev br0 vid 10 | grep ff0e::1 | \ @@ -1181,7 +1183,7 @@ ctrl_mldv2_is_in_test() filter_mode include source_list 2001:db8:1::1 # IS_IN ( 2001:db8:1::2 ) - $MZ -6 $h1.10 -c 1 -A fe80::1 -B ff0e::1 \ + $MZ -6 $h1.10 -a own -b 33:33:00:00:00:01 -c 1 -A fe80::1 -B ff0e::1 \ -t ip hop=1,next=0,p="$p" -q bridge -d mdb show dev br0 vid 10 | grep ff0e::1 | grep -v "src" | \ @@ -1206,6 +1208,11 @@ ctrl_test() ctrl_mldv2_is_in_test } +if ! bridge mdb help 2>&1 | grep -q "replace"; then + echo "SKIP: iproute2 too old, missing bridge mdb replace support" + exit $ksft_skip +fi + trap cleanup EXIT setup_prepare diff --git a/tools/testing/selftests/net/forwarding/bridge_mdb_max.sh b/tools/testing/selftests/net/forwarding/bridge_mdb_max.sh index ae255b662ba3..3da9d93ab36f 100755 --- a/tools/testing/selftests/net/forwarding/bridge_mdb_max.sh +++ b/tools/testing/selftests/net/forwarding/bridge_mdb_max.sh @@ -252,7 +252,8 @@ ctl4_entries_add() local IPs=$(seq -f 192.0.2.%g 1 $((n - 1))) local peer=$(locus_dev_peer $locus) local GRP=239.1.1.${grp} - $MZ $peer -c 1 -A 192.0.2.1 -B $GRP \ + local dmac=01:00:5e:01:01:$(printf "%02x" $grp) + $MZ $peer -a own -b $dmac -c 1 -A 192.0.2.1 -B $GRP \ -t ip proto=2,p=$(igmpv3_is_in_get $GRP $IPs) -q sleep 1 @@ -272,7 +273,8 @@ ctl4_entries_del() local peer=$(locus_dev_peer $locus) local GRP=239.1.1.${grp} - $MZ $peer -c 1 -A 192.0.2.1 -B 224.0.0.2 \ + local dmac=01:00:5e:00:00:02 + $MZ $peer -a own -b $dmac -c 1 -A 192.0.2.1 -B 224.0.0.2 \ -t ip proto=2,p=$(igmpv2_leave_get $GRP) -q sleep 1 ! bridge mdb show dev br0 | grep -q $GRP @@ -289,8 +291,10 @@ ctl6_entries_add() local peer=$(locus_dev_peer $locus) local SIP=fe80::1 local GRP=ff0e::${grp} + local dmac=33:33:00:00:00:$(printf "%02x" $grp) local p=$(mldv2_is_in_get $SIP $GRP $IPs) - $MZ -6 $peer -c 1 -A $SIP -B $GRP -t ip hop=1,next=0,p="$p" -q + $MZ -6 $peer -a own -b $dmac -c 1 -A $SIP -B $GRP \ + -t ip hop=1,next=0,p="$p" -q sleep 1 local nn=$(bridge mdb show dev br0 | grep $GRP | wc -l) @@ -310,8 +314,10 @@ ctl6_entries_del() local peer=$(locus_dev_peer $locus) local SIP=fe80::1 local GRP=ff0e::${grp} + local dmac=33:33:00:00:00:$(printf "%02x" $grp) local p=$(mldv1_done_get $SIP $GRP) - $MZ -6 $peer -c 1 -A $SIP -B $GRP -t ip hop=1,next=0,p="$p" -q + $MZ -6 $peer -a own -b $dmac -c 1 -A $SIP -B $GRP \ + -t ip hop=1,next=0,p="$p" -q sleep 1 ! bridge mdb show dev br0 | grep -q $GRP } @@ -1328,6 +1334,11 @@ test_8021qvs() switch_destroy } +if ! bridge link help 2>&1 | grep -q "mcast_max_groups"; then + echo "SKIP: iproute2 too old, missing bridge \"mcast_max_groups\" support" + exit $ksft_skip +fi + trap cleanup EXIT setup_prepare diff --git a/tools/testing/selftests/net/forwarding/ethtool.sh b/tools/testing/selftests/net/forwarding/ethtool.sh index dbb9fcf759e0..aa2eafb7b243 100755 --- a/tools/testing/selftests/net/forwarding/ethtool.sh +++ b/tools/testing/selftests/net/forwarding/ethtool.sh @@ -286,6 +286,8 @@ different_speeds_autoneg_on() ethtool -s $h1 autoneg on } +skip_on_veth + trap cleanup EXIT setup_prepare diff --git a/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh b/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh index 072faa77f53b..17f89c3b7c02 100755 --- a/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh +++ b/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh @@ -108,6 +108,8 @@ no_cable() ip link set dev $swp3 down } +skip_on_veth + setup_prepare tests_run diff --git a/tools/testing/selftests/net/forwarding/ethtool_mm.sh b/tools/testing/selftests/net/forwarding/ethtool_mm.sh index c580ad623848..39e736f30322 100755 --- a/tools/testing/selftests/net/forwarding/ethtool_mm.sh +++ b/tools/testing/selftests/net/forwarding/ethtool_mm.sh @@ -258,11 +258,6 @@ h2_destroy() setup_prepare() { - check_ethtool_mm_support - check_tc_fp_support - require_command lldptool - bail_on_lldpad "autoconfigure the MAC Merge layer" "configure it manually" - h1=${NETIFS[p1]} h2=${NETIFS[p2]} @@ -278,6 +273,19 @@ cleanup() h1_destroy } +check_ethtool_mm_support +check_tc_fp_support +require_command lldptool +bail_on_lldpad "autoconfigure the MAC Merge layer" "configure it manually" + +for netif in ${NETIFS[@]}; do + ethtool --show-mm $netif 2>&1 &> /dev/null + if [[ $? -ne 0 ]]; then + echo "SKIP: $netif does not support MAC Merge" + exit $ksft_skip + fi +done + trap cleanup EXIT setup_prepare diff --git a/tools/testing/selftests/net/forwarding/hw_stats_l3_gre.sh b/tools/testing/selftests/net/forwarding/hw_stats_l3_gre.sh index eb9ec4a68f84..7594bbb49029 100755 --- a/tools/testing/selftests/net/forwarding/hw_stats_l3_gre.sh +++ b/tools/testing/selftests/net/forwarding/hw_stats_l3_gre.sh @@ -99,6 +99,8 @@ test_stats_rx() test_stats g2a rx } +skip_on_veth + trap cleanup EXIT setup_prepare diff --git a/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh b/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh index 9f5b3e2e5e95..49fa94b53a1c 100755 --- a/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh +++ b/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh @@ -14,6 +14,8 @@ ALL_TESTS=" NUM_NETIFS=4 source lib.sh +require_command $TROUTE6 + h1_create() { simple_if_init $h1 2001:1:1::2/64 diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index 9ddb68dd6a08..f69015bf2dea 100755 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -30,6 +30,7 @@ REQUIRE_MZ=${REQUIRE_MZ:=yes} REQUIRE_MTOOLS=${REQUIRE_MTOOLS:=no} STABLE_MAC_ADDRS=${STABLE_MAC_ADDRS:=no} TCPDUMP_EXTRA_FLAGS=${TCPDUMP_EXTRA_FLAGS:=} +TROUTE6=${TROUTE6:=traceroute6} relative_path="${BASH_SOURCE%/*}" if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then @@ -163,6 +164,17 @@ check_port_mab_support() fi } +skip_on_veth() +{ + local kind=$(ip -j -d link show dev ${NETIFS[p1]} | + jq -r '.[].linkinfo.info_kind') + + if [[ $kind == veth ]]; then + echo "SKIP: Test cannot be run with veth pairs" + exit $ksft_skip + fi +} + if [[ "$(id -u)" -ne 0 ]]; then echo "SKIP: need root privileges" exit $ksft_skip @@ -225,6 +237,11 @@ create_netif_veth() for ((i = 1; i <= NUM_NETIFS; ++i)); do local j=$((i+1)) + if [ -z ${NETIFS[p$i]} ]; then + echo "SKIP: Cannot create interface. Name not specified" + exit $ksft_skip + fi + ip link show dev ${NETIFS[p$i]} &> /dev/null if [[ $? -ne 0 ]]; then ip link add ${NETIFS[p$i]} type veth \ diff --git a/tools/testing/selftests/net/forwarding/settings b/tools/testing/selftests/net/forwarding/settings new file mode 100644 index 000000000000..e7b9417537fb --- /dev/null +++ b/tools/testing/selftests/net/forwarding/settings @@ -0,0 +1 @@ +timeout=0 diff --git a/tools/testing/selftests/net/forwarding/tc_actions.sh b/tools/testing/selftests/net/forwarding/tc_actions.sh index a96cff8e7219..b0f5e55d2d0b 100755 --- a/tools/testing/selftests/net/forwarding/tc_actions.sh +++ b/tools/testing/selftests/net/forwarding/tc_actions.sh @@ -9,6 +9,8 @@ NUM_NETIFS=4 source tc_common.sh source lib.sh +require_command ncat + tcflags="skip_hw" h1_create() @@ -220,9 +222,9 @@ mirred_egress_to_ingress_tcp_test() ip_proto icmp \ action drop - ip vrf exec v$h1 nc --recv-only -w10 -l -p 12345 -o $mirred_e2i_tf2 & + ip vrf exec v$h1 ncat --recv-only -w10 -l -p 12345 -o $mirred_e2i_tf2 & local rpid=$! - ip vrf exec v$h1 nc -w1 --send-only 192.0.2.2 12345 <$mirred_e2i_tf1 + ip vrf exec v$h1 ncat -w1 --send-only 192.0.2.2 12345 <$mirred_e2i_tf1 wait -n $rpid cmp -s $mirred_e2i_tf1 $mirred_e2i_tf2 check_err $? "server output check failed" diff --git a/tools/testing/selftests/net/forwarding/tc_flower.sh b/tools/testing/selftests/net/forwarding/tc_flower.sh index 683711f41aa9..b1daad19b01e 100755 --- a/tools/testing/selftests/net/forwarding/tc_flower.sh +++ b/tools/testing/selftests/net/forwarding/tc_flower.sh @@ -52,8 +52,8 @@ match_dst_mac_test() tc_check_packets "dev $h2 ingress" 101 1 check_fail $? "Matched on a wrong filter" - tc_check_packets "dev $h2 ingress" 102 1 - check_err $? "Did not match on correct filter" + tc_check_packets "dev $h2 ingress" 102 0 + check_fail $? "Did not match on correct filter" tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower @@ -78,8 +78,8 @@ match_src_mac_test() tc_check_packets "dev $h2 ingress" 101 1 check_fail $? "Matched on a wrong filter" - tc_check_packets "dev $h2 ingress" 102 1 - check_err $? "Did not match on correct filter" + tc_check_packets "dev $h2 ingress" 102 0 + check_fail $? "Did not match on correct filter" tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower diff --git a/tools/testing/selftests/net/forwarding/tc_flower_l2_miss.sh b/tools/testing/selftests/net/forwarding/tc_flower_l2_miss.sh index e22c2d28b6eb..20a7cb7222b8 100755 --- a/tools/testing/selftests/net/forwarding/tc_flower_l2_miss.sh +++ b/tools/testing/selftests/net/forwarding/tc_flower_l2_miss.sh @@ -127,6 +127,7 @@ test_l2_miss_multicast_common() local proto=$1; shift local sip=$1; shift local dip=$1; shift + local dmac=$1; shift local mode=$1; shift local name=$1; shift @@ -142,7 +143,7 @@ test_l2_miss_multicast_common() action pass # Before adding MDB entry. - $MZ $mode $h1 -t ip -A $sip -B $dip -c 1 -p 100 -q + $MZ $mode $h1 -a own -b $dmac -t ip -A $sip -B $dip -c 1 -p 100 -q tc_check_packets "dev $swp2 egress" 101 1 check_err $? "Unregistered multicast filter was not hit before adding MDB entry" @@ -153,7 +154,7 @@ test_l2_miss_multicast_common() # Adding MDB entry. bridge mdb replace dev br1 port $swp2 grp $dip permanent - $MZ $mode $h1 -t ip -A $sip -B $dip -c 1 -p 100 -q + $MZ $mode $h1 -a own -b $dmac -t ip -A $sip -B $dip -c 1 -p 100 -q tc_check_packets "dev $swp2 egress" 101 1 check_err $? "Unregistered multicast filter was hit after adding MDB entry" @@ -164,7 +165,7 @@ test_l2_miss_multicast_common() # Deleting MDB entry. bridge mdb del dev br1 port $swp2 grp $dip - $MZ $mode $h1 -t ip -A $sip -B $dip -c 1 -p 100 -q + $MZ $mode $h1 -a own -b $dmac -t ip -A $sip -B $dip -c 1 -p 100 -q tc_check_packets "dev $swp2 egress" 101 2 check_err $? "Unregistered multicast filter was not hit after deleting MDB entry" @@ -183,10 +184,11 @@ test_l2_miss_multicast_ipv4() local proto="ipv4" local sip=192.0.2.1 local dip=239.1.1.1 + local dmac=01:00:5e:01:01:01 local mode="-4" local name="IPv4" - test_l2_miss_multicast_common $proto $sip $dip $mode $name + test_l2_miss_multicast_common $proto $sip $dip $dmac $mode $name } test_l2_miss_multicast_ipv6() @@ -194,10 +196,11 @@ test_l2_miss_multicast_ipv6() local proto="ipv6" local sip=2001:db8:1::1 local dip=ff0e::1 + local dmac=33:33:00:00:00:01 local mode="-6" local name="IPv6" - test_l2_miss_multicast_common $proto $sip $dip $mode $name + test_l2_miss_multicast_common $proto $sip $dip $dmac $mode $name } test_l2_miss_multicast() diff --git a/tools/testing/selftests/net/forwarding/tc_tunnel_key.sh b/tools/testing/selftests/net/forwarding/tc_tunnel_key.sh index 5ac184d51809..5a5dd9034819 100755 --- a/tools/testing/selftests/net/forwarding/tc_tunnel_key.sh +++ b/tools/testing/selftests/net/forwarding/tc_tunnel_key.sh @@ -104,11 +104,14 @@ tunnel_key_nofrag_test() local i tc filter add dev $swp1 ingress protocol ip pref 100 handle 100 \ - flower ip_flags nofrag action drop + flower src_ip 192.0.2.1 dst_ip 192.0.2.2 ip_proto udp \ + ip_flags nofrag action drop tc filter add dev $swp1 ingress protocol ip pref 101 handle 101 \ - flower ip_flags firstfrag action drop + flower src_ip 192.0.2.1 dst_ip 192.0.2.2 ip_proto udp \ + ip_flags firstfrag action drop tc filter add dev $swp1 ingress protocol ip pref 102 handle 102 \ - flower ip_flags nofirstfrag action drop + flower src_ip 192.0.2.1 dst_ip 192.0.2.2 ip_proto udp \ + ip_flags nofirstfrag action drop # test 'nofrag' set tc filter add dev h1-et egress protocol all pref 1 handle 1 matchall $tcflags \ diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh index 3c2096ac97ef..d01b73a8ed0f 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh @@ -705,6 +705,7 @@ pm_nl_del_endpoint() local addr=$3 if [ $ip_mptcp -eq 1 ]; then + [ $id -ne 0 ] && addr='' ip -n $ns mptcp endpoint delete id $id $addr else ip netns exec $ns ./pm_nl_ctl del $id $addr @@ -795,10 +796,11 @@ pm_nl_check_endpoint() fi if [ $ip_mptcp -eq 1 ]; then + # get line and trim trailing whitespace line=$(ip -n $ns mptcp endpoint show $id) + line="${line% }" # the dump order is: address id flags port dev - expected_line="$addr" - [ -n "$addr" ] && expected_line="$expected_line $addr" + [ -n "$addr" ] && expected_line="$addr" expected_line="$expected_line $id" [ -n "$_flags" ] && expected_line="$expected_line ${_flags//","/" "}" [ -n "$dev" ] && expected_line="$expected_line $dev" diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh index dfe3d287f01d..f838dd370f6a 100755 --- a/tools/testing/selftests/net/pmtu.sh +++ b/tools/testing/selftests/net/pmtu.sh @@ -361,6 +361,7 @@ err_buf= tcpdump_pids= nettest_pids= socat_pids= +tmpoutfile= err() { err_buf="${err_buf}${1} @@ -951,6 +952,7 @@ cleanup() { ip link del veth_A-R1 2>/dev/null ovs-vsctl --if-exists del-port vxlan_a 2>/dev/null ovs-vsctl --if-exists del-br ovs_br0 2>/dev/null + rm -f "$tmpoutfile" } mtu() { @@ -1328,6 +1330,39 @@ test_pmtu_ipvX_over_bridged_vxlanY_or_geneveY_exception() { check_pmtu_value ${exp_mtu} "${pmtu}" "exceeding link layer MTU on bridged ${type} interface" pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst})" check_pmtu_value ${exp_mtu} "${pmtu}" "exceeding link layer MTU on locally bridged ${type} interface" + + tmpoutfile=$(mktemp) + + # Flush Exceptions, retry with TCP + run_cmd ${ns_a} ip route flush cached ${dst} + run_cmd ${ns_b} ip route flush cached ${dst} + run_cmd ${ns_c} ip route flush cached ${dst} + + for target in "${ns_a}" "${ns_c}" ; do + if [ ${family} -eq 4 ]; then + TCPDST=TCP:${dst}:50000 + else + TCPDST="TCP:[${dst}]:50000" + fi + ${ns_b} socat -T 3 -u -6 TCP-LISTEN:50000 STDOUT > $tmpoutfile & + + sleep 1 + + dd if=/dev/zero of=/dev/stdout status=none bs=1M count=1 | ${target} socat -T 3 -u STDIN $TCPDST,connect-timeout=3 + + size=$(du -sb $tmpoutfile) + size=${size%%/tmp/*} + + [ $size -ne 1048576 ] && err "File size $size mismatches exepcted value in locally bridged vxlan test" && return 1 + done + + rm -f "$tmpoutfile" + + # Check that exceptions were created + pmtu="$(route_get_dst_pmtu_from_exception "${ns_c}" ${dst})" + check_pmtu_value ${exp_mtu} "${pmtu}" "tcp: exceeding link layer MTU on bridged ${type} interface" + pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst})" + check_pmtu_value ${exp_mtu} "${pmtu}" "tcp exceeding link layer MTU on locally bridged ${type} interface" } test_pmtu_ipv4_br_vxlan4_exception() { diff --git a/tools/testing/selftests/net/so_incoming_cpu.c b/tools/testing/selftests/net/so_incoming_cpu.c index 0e04f9fef986..a14818164102 100644 --- a/tools/testing/selftests/net/so_incoming_cpu.c +++ b/tools/testing/selftests/net/so_incoming_cpu.c @@ -159,7 +159,7 @@ void create_clients(struct __test_metadata *_metadata, /* Make sure SYN will be processed on the i-th CPU * and finally distributed to the i-th listener. */ - sched_setaffinity(0, sizeof(cpu_set), &cpu_set); + ret = sched_setaffinity(0, sizeof(cpu_set), &cpu_set); ASSERT_EQ(ret, 0); for (j = 0; j < CLIENT_PER_SERVER; j++) { diff --git a/tools/testing/selftests/riscv/vector/vstate_exec_nolibc.c b/tools/testing/selftests/riscv/vector/vstate_exec_nolibc.c index 5cbc392944a6..2c0d2b1126c1 100644 --- a/tools/testing/selftests/riscv/vector/vstate_exec_nolibc.c +++ b/tools/testing/selftests/riscv/vector/vstate_exec_nolibc.c @@ -1,6 +1,4 @@ // SPDX-License-Identifier: GPL-2.0-only -#include <sys/prctl.h> - #define THIS_PROGRAM "./vstate_exec_nolibc" int main(int argc, char **argv) diff --git a/tools/testing/selftests/rseq/Makefile b/tools/testing/selftests/rseq/Makefile index b357ba24af06..7a957c7d459a 100644 --- a/tools/testing/selftests/rseq/Makefile +++ b/tools/testing/selftests/rseq/Makefile @@ -4,8 +4,10 @@ ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),) CLANG_FLAGS += -no-integrated-as endif +top_srcdir = ../../../.. + CFLAGS += -O2 -Wall -g -I./ $(KHDR_INCLUDES) -L$(OUTPUT) -Wl,-rpath=./ \ - $(CLANG_FLAGS) + $(CLANG_FLAGS) -I$(top_srcdir)/tools/include LDLIBS += -lpthread -ldl # Own dependencies because we only want to build against 1st prerequisite, but diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c index a723da253244..96e812bdf8a4 100644 --- a/tools/testing/selftests/rseq/rseq.c +++ b/tools/testing/selftests/rseq/rseq.c @@ -31,6 +31,8 @@ #include <sys/auxv.h> #include <linux/auxvec.h> +#include <linux/compiler.h> + #include "../kselftest.h" #include "rseq.h" diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json index a44455372646..08d4861c2e78 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json +++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json @@ -131,5 +131,30 @@ "teardown": [ "echo \"1\" > /sys/bus/netdevsim/del_device" ] + }, + { + "id": "3e1e", + "name": "Add taprio Qdisc with an invalid cycle-time", + "category": [ + "qdisc", + "taprio" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "echo \"1 1 8\" > /sys/bus/netdevsim/new_device", + "$TC qdisc add dev $ETH root handle 1: taprio num_tc 3 map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 queues 1@0 1@0 1@0 base-time 1000000000 sched-entry S 01 300000 flags 0x1 clockid CLOCK_TAI cycle-time 4294967296 || /bin/true", + "$IP link set dev $ETH up", + "$IP addr add 10.10.10.10/24 dev $ETH" + ], + "cmdUnderTest": "/bin/true", + "expExitCode": "0", + "verifyCmd": "$TC qdisc show dev $ETH", + "matchPattern": "qdisc taprio 1: root refcnt", + "matchCount": "0", + "teardown": [ + "echo \"1\" > /sys/bus/netdevsim/del_device" + ] } ] diff --git a/tools/testing/vsock/Makefile b/tools/testing/vsock/Makefile index 43a254f0e14d..21a98ba565ab 100644 --- a/tools/testing/vsock/Makefile +++ b/tools/testing/vsock/Makefile @@ -8,5 +8,5 @@ vsock_perf: vsock_perf.o CFLAGS += -g -O2 -Werror -Wall -I. -I../../include -I../../../usr/include -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -D_GNU_SOURCE .PHONY: all test clean clean: - ${RM} *.o *.d vsock_test vsock_diag_test + ${RM} *.o *.d vsock_test vsock_diag_test vsock_perf -include *.d |