diff options
Diffstat (limited to 'drivers/firmware')
27 files changed, 1684 insertions, 232 deletions
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 5d3fd803d2bb..220a58cf0a44 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -6,39 +6,7 @@ menu "Firmware Drivers" -config ARM_SCMI_PROTOCOL - tristate "ARM System Control and Management Interface (SCMI) Message Protocol" - depends on ARM || ARM64 || COMPILE_TEST - depends on MAILBOX || HAVE_ARM_SMCCC_DISCOVERY - help - ARM System Control and Management Interface (SCMI) protocol is a - set of operating system-independent software interfaces that are - used in system management. SCMI is extensible and currently provides - interfaces for: Discovery and self-description of the interfaces - it supports, Power domain management which is the ability to place - a given device or domain into the various power-saving states that - it supports, Performance management which is the ability to control - the performance of a domain that is composed of compute engines - such as application processors and other accelerators, Clock - management which is the ability to set and inquire rates on platform - managed clocks and Sensor management which is the ability to read - sensor data, and be notified of sensor value. - - This protocol library provides interface for all the client drivers - making use of the features offered by the SCMI. - -config ARM_SCMI_POWER_DOMAIN - tristate "SCMI power domain driver" - depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF) - default y - select PM_GENERIC_DOMAINS if PM - help - This enables support for the SCMI power domains which can be - enabled or disabled via the SCP firmware - - This driver can also be built as a module. If so, the module - will be called scmi_pm_domain. Note this may needed early in boot - before rootfs may be available. +source "drivers/firmware/arm_scmi/Kconfig" config ARM_SCPI_PROTOCOL tristate "ARM System Control and Power Interface (SCPI) Message Protocol" @@ -235,7 +203,7 @@ config INTEL_STRATIX10_RSU Say Y here if you want Intel RSU support. config QCOM_SCM - bool + tristate "Qcom SCM driver" depends on ARM || ARM64 depends on HAVE_ARM_SMCCC select RESET_CONTROLLER diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 705fabe88156..5ced0673d94b 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile @@ -17,7 +17,8 @@ obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o obj-$(CONFIG_RASPBERRYPI_FIRMWARE) += raspberrypi.o obj-$(CONFIG_FW_CFG_SYSFS) += qemu_fw_cfg.o -obj-$(CONFIG_QCOM_SCM) += qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o +obj-$(CONFIG_QCOM_SCM) += qcom-scm.o +qcom-scm-objs += qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o obj-$(CONFIG_SYSFB) += sysfb.o obj-$(CONFIG_SYSFB_SIMPLEFB) += sysfb_simplefb.o obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig new file mode 100644 index 000000000000..7f4d2435503b --- /dev/null +++ b/drivers/firmware/arm_scmi/Kconfig @@ -0,0 +1,95 @@ +# SPDX-License-Identifier: GPL-2.0-only +menu "ARM System Control and Management Interface Protocol" + +config ARM_SCMI_PROTOCOL + tristate "ARM System Control and Management Interface (SCMI) Message Protocol" + depends on ARM || ARM64 || COMPILE_TEST + help + ARM System Control and Management Interface (SCMI) protocol is a + set of operating system-independent software interfaces that are + used in system management. SCMI is extensible and currently provides + interfaces for: Discovery and self-description of the interfaces + it supports, Power domain management which is the ability to place + a given device or domain into the various power-saving states that + it supports, Performance management which is the ability to control + the performance of a domain that is composed of compute engines + such as application processors and other accelerators, Clock + management which is the ability to set and inquire rates on platform + managed clocks and Sensor management which is the ability to read + sensor data, and be notified of sensor value. + + This protocol library provides interface for all the client drivers + making use of the features offered by the SCMI. + +if ARM_SCMI_PROTOCOL + +config ARM_SCMI_HAVE_TRANSPORT + bool + help + This declares whether at least one SCMI transport has been configured. + Used to trigger a build bug when trying to build SCMI without any + configured transport. + +config ARM_SCMI_HAVE_SHMEM + bool + help + This declares whether a shared memory based transport for SCMI is + available. + +config ARM_SCMI_HAVE_MSG + bool + help + This declares whether a message passing based transport for SCMI is + available. + +config ARM_SCMI_TRANSPORT_MAILBOX + bool "SCMI transport based on Mailbox" + depends on MAILBOX + select ARM_SCMI_HAVE_TRANSPORT + select ARM_SCMI_HAVE_SHMEM + default y + help + Enable mailbox based transport for SCMI. + + If you want the ARM SCMI PROTOCOL stack to include support for a + transport based on mailboxes, answer Y. + +config ARM_SCMI_TRANSPORT_SMC + bool "SCMI transport based on SMC" + depends on HAVE_ARM_SMCCC_DISCOVERY + select ARM_SCMI_HAVE_TRANSPORT + select ARM_SCMI_HAVE_SHMEM + default y + help + Enable SMC based transport for SCMI. + + If you want the ARM SCMI PROTOCOL stack to include support for a + transport based on SMC, answer Y. + +config ARM_SCMI_TRANSPORT_VIRTIO + bool "SCMI transport based on VirtIO" + depends on VIRTIO + select ARM_SCMI_HAVE_TRANSPORT + select ARM_SCMI_HAVE_MSG + help + This enables the virtio based transport for SCMI. + + If you want the ARM SCMI PROTOCOL stack to include support for a + transport based on VirtIO, answer Y. + +endif #ARM_SCMI_PROTOCOL + +config ARM_SCMI_POWER_DOMAIN + tristate "SCMI power domain driver" + depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF) + default y + select PM_GENERIC_DOMAINS if PM + help + This enables support for the SCMI power domains which can be + enabled or disabled via the SCP firmware + + This driver can also be built as a module. If so, the module + will be called scmi_pm_domain. Note this may needed early in boot + before rootfs may be available. + +endmenu diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile index 6a2ef63306d6..1dcf123d64ab 100644 --- a/drivers/firmware/arm_scmi/Makefile +++ b/drivers/firmware/arm_scmi/Makefile @@ -1,9 +1,11 @@ # SPDX-License-Identifier: GPL-2.0-only scmi-bus-y = bus.o scmi-driver-y = driver.o notify.o -scmi-transport-y = shmem.o -scmi-transport-$(CONFIG_MAILBOX) += mailbox.o -scmi-transport-$(CONFIG_HAVE_ARM_SMCCC_DISCOVERY) += smc.o +scmi-transport-$(CONFIG_ARM_SCMI_HAVE_SHMEM) = shmem.o +scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += mailbox.o +scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o +scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o +scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \ $(scmi-transport-y) diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index 6c7e24935eca..f6fe723ab869 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c @@ -111,15 +111,13 @@ static int scmi_dev_probe(struct device *dev) return scmi_drv->probe(scmi_dev); } -static int scmi_dev_remove(struct device *dev) +static void scmi_dev_remove(struct device *dev) { struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver); struct scmi_device *scmi_dev = to_scmi_dev(dev); if (scmi_drv->remove) scmi_drv->remove(scmi_dev); - - return 0; } static struct bus_type scmi_bus_type = { diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index 8685619d38f9..dea1bfbe1052 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -14,8 +14,12 @@ #include <linux/device.h> #include <linux/errno.h> #include <linux/kernel.h> +#include <linux/hashtable.h> +#include <linux/list.h> #include <linux/module.h> +#include <linux/refcount.h> #include <linux/scmi_protocol.h> +#include <linux/spinlock.h> #include <linux/types.h> #include <asm/unaligned.h> @@ -65,11 +69,22 @@ struct scmi_msg_resp_prot_version { #define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr)) #define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1) +/* + * Size of @pending_xfers hashtable included in @scmi_xfers_info; ideally, in + * order to minimize space and collisions, this should equal max_msg, i.e. the + * maximum number of in-flight messages on a specific platform, but such value + * is only available at runtime while kernel hashtables are statically sized: + * pick instead as a fixed static size the maximum number of entries that can + * fit the whole table into one 4k page. + */ +#define SCMI_PENDING_XFERS_HT_ORDER_SZ 9 + /** * struct scmi_msg_hdr - Message(Tx/Rx) header * * @id: The identifier of the message being sent * @protocol_id: The identifier of the protocol used to send @id message + * @type: The SCMI type for this message * @seq: The token to identify the message. When a message returns, the * platform returns the whole message header unmodified including the * token @@ -80,6 +95,7 @@ struct scmi_msg_resp_prot_version { struct scmi_msg_hdr { u8 id; u8 protocol_id; + u8 type; u16 seq; u32 status; bool poll_completion; @@ -89,13 +105,14 @@ struct scmi_msg_hdr { * pack_scmi_header() - packs and returns 32-bit header * * @hdr: pointer to header containing all the information on message id, - * protocol id and sequence id. + * protocol id, sequence id and type. * * Return: 32-bit packed message header to be sent to the platform. */ static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr) { return FIELD_PREP(MSG_ID_MASK, hdr->id) | + FIELD_PREP(MSG_TYPE_MASK, hdr->type) | FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) | FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id); } @@ -110,6 +127,7 @@ static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr) { hdr->id = MSG_XTRACT_ID(msg_hdr); hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr); + hdr->type = MSG_XTRACT_TYPE(msg_hdr); } /** @@ -134,6 +152,27 @@ struct scmi_msg { * buffer for the rx path as we use for the tx path. * @done: command message transmit completion event * @async_done: pointer to delayed response message received event completion + * @pending: True for xfers added to @pending_xfers hashtable + * @node: An hlist_node reference used to store this xfer, alternatively, on + * the free list @free_xfers or in the @pending_xfers hashtable + * @users: A refcount to track the active users for this xfer. + * This is meant to protect against the possibility that, when a command + * transaction times out concurrently with the reception of a valid + * response message, the xfer could be finally put on the TX path, and + * so vanish, while on the RX path scmi_rx_callback() is still + * processing it: in such a case this refcounting will ensure that, even + * though the timed-out transaction will anyway cause the command + * request to be reported as failed by time-out, the underlying xfer + * cannot be discarded and possibly reused until the last one user on + * the RX path has released it. + * @busy: An atomic flag to ensure exclusive write access to this xfer + * @state: The current state of this transfer, with states transitions deemed + * valid being: + * - SCMI_XFER_SENT_OK -> SCMI_XFER_RESP_OK [ -> SCMI_XFER_DRESP_OK ] + * - SCMI_XFER_SENT_OK -> SCMI_XFER_DRESP_OK + * (Missing synchronous response is assumed OK and ignored) + * @lock: A spinlock to protect state and busy fields. + * @priv: A pointer for transport private usage. */ struct scmi_xfer { int transfer_id; @@ -142,8 +181,36 @@ struct scmi_xfer { struct scmi_msg rx; struct completion done; struct completion *async_done; + bool pending; + struct hlist_node node; + refcount_t users; +#define SCMI_XFER_FREE 0 +#define SCMI_XFER_BUSY 1 + atomic_t busy; +#define SCMI_XFER_SENT_OK 0 +#define SCMI_XFER_RESP_OK 1 +#define SCMI_XFER_DRESP_OK 2 + int state; + /* A lock to protect state and busy fields */ + spinlock_t lock; + void *priv; }; +/* + * An helper macro to lookup an xfer from the @pending_xfers hashtable + * using the message sequence number token as a key. + */ +#define XFER_FIND(__ht, __k) \ +({ \ + typeof(__k) k_ = __k; \ + struct scmi_xfer *xfer_ = NULL; \ + \ + hash_for_each_possible((__ht), xfer_, node, k_) \ + if (xfer_->hdr.seq == k_) \ + break; \ + xfer_; \ +}) + struct scmi_xfer_ops; /** @@ -283,9 +350,13 @@ struct scmi_chan_info { /** * struct scmi_transport_ops - Structure representing a SCMI transport ops * + * @link_supplier: Optional callback to add link to a supplier device * @chan_available: Callback to check if channel is available or not * @chan_setup: Callback to allocate and setup a channel * @chan_free: Callback to free a channel + * @get_max_msg: Optional callback to provide max_msg dynamically + * Returns the maximum number of messages for the channel type + * (tx or rx) that can be pending simultaneously in the system * @send_message: Callback to send a message * @mark_txdone: Callback to mark tx as done * @fetch_response: Callback to fetch response @@ -294,10 +365,12 @@ struct scmi_chan_info { * @poll_done: Callback to poll transfer status */ struct scmi_transport_ops { + int (*link_supplier)(struct device *dev); bool (*chan_available)(struct device *dev, int idx); int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev, bool tx); int (*chan_free)(int id, void *p, void *data); + unsigned int (*get_max_msg)(struct scmi_chan_info *base_cinfo); int (*send_message)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer); void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret); @@ -317,25 +390,39 @@ struct scmi_device *scmi_child_dev_find(struct device *parent, /** * struct scmi_desc - Description of SoC integration * + * @transport_init: An optional function that a transport can provide to + * initialize some transport-specific setup during SCMI core + * initialization, so ahead of SCMI core probing. + * @transport_exit: An optional function that a transport can provide to + * de-initialize some transport-specific setup during SCMI core + * de-initialization, so after SCMI core removal. * @ops: Pointer to the transport specific ops structure * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds) - * @max_msg: Maximum number of messages that can be pending - * simultaneously in the system + * @max_msg: Maximum number of messages for a channel type (tx or rx) that can + * be pending simultaneously in the system. May be overridden by the + * get_max_msg op. * @max_msg_size: Maximum size of data per message that can be handled. */ struct scmi_desc { + int (*transport_init)(void); + void (*transport_exit)(void); const struct scmi_transport_ops *ops; int max_rx_timeout_ms; int max_msg; int max_msg_size; }; +#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX extern const struct scmi_desc scmi_mailbox_desc; -#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY +#endif +#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC extern const struct scmi_desc scmi_smc_desc; #endif +#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO +extern const struct scmi_desc scmi_virtio_desc; +#endif -void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr); +void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv); void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id); /* shmem related declarations */ @@ -352,8 +439,22 @@ void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem); bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem, struct scmi_xfer *xfer); +/* declarations for message passing transports */ +struct scmi_msg_payld; + +/* Maximum overhead of message w.r.t. struct scmi_desc.max_msg_size */ +#define SCMI_MSG_MAX_PROT_OVERHEAD (2 * sizeof(__le32)) + +size_t msg_response_size(struct scmi_xfer *xfer); +size_t msg_command_size(struct scmi_xfer *xfer); +void msg_tx_prepare(struct scmi_msg_payld *msg, struct scmi_xfer *xfer); +u32 msg_read_header(struct scmi_msg_payld *msg); +void msg_fetch_response(struct scmi_msg_payld *msg, size_t len, + struct scmi_xfer *xfer); +void msg_fetch_notification(struct scmi_msg_payld *msg, size_t len, + size_t max_len, struct scmi_xfer *xfer); + void scmi_notification_instance_data_set(const struct scmi_handle *handle, void *priv); void *scmi_notification_instance_data_get(const struct scmi_handle *handle); - #endif /* _SCMI_COMMON_H */ diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 9b2e8d42a992..b406b3f78f46 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -21,6 +21,7 @@ #include <linux/io.h> #include <linux/kernel.h> #include <linux/ktime.h> +#include <linux/hashtable.h> #include <linux/list.h> #include <linux/module.h> #include <linux/of_address.h> @@ -67,16 +68,23 @@ struct scmi_requested_dev { /** * struct scmi_xfers_info - Structure to manage transfer information * - * @xfer_block: Preallocated Message array * @xfer_alloc_table: Bitmap table for allocated messages. * Index of this bitmap table is also used for message * sequence identifier. * @xfer_lock: Protection for message allocation + * @max_msg: Maximum number of messages that can be pending + * @free_xfers: A free list for available to use xfers. It is initialized with + * a number of xfers equal to the maximum allowed in-flight + * messages. + * @pending_xfers: An hashtable, indexed by msg_hdr.seq, used to keep all the + * currently in-flight messages. */ struct scmi_xfers_info { - struct scmi_xfer *xfer_block; unsigned long *xfer_alloc_table; spinlock_t xfer_lock; + int max_msg; + struct hlist_head free_xfers; + DECLARE_HASHTABLE(pending_xfers, SCMI_PENDING_XFERS_HT_ORDER_SZ); }; /** @@ -172,19 +180,6 @@ static inline int scmi_to_linux_errno(int errno) return -EIO; } -/** - * scmi_dump_header_dbg() - Helper to dump a message header. - * - * @dev: Device pointer corresponding to the SCMI entity - * @hdr: pointer to header. - */ -static inline void scmi_dump_header_dbg(struct device *dev, - struct scmi_msg_hdr *hdr) -{ - dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n", - hdr->id, hdr->seq, hdr->protocol_id); -} - void scmi_notification_instance_data_set(const struct scmi_handle *handle, void *priv) { @@ -205,45 +200,189 @@ void *scmi_notification_instance_data_get(const struct scmi_handle *handle) } /** + * scmi_xfer_token_set - Reserve and set new token for the xfer at hand + * + * @minfo: Pointer to Tx/Rx Message management info based on channel type + * @xfer: The xfer to act upon + * + * Pick the next unused monotonically increasing token and set it into + * xfer->hdr.seq: picking a monotonically increasing value avoids immediate + * reuse of freshly completed or timed-out xfers, thus mitigating the risk + * of incorrect association of a late and expired xfer with a live in-flight + * transaction, both happening to re-use the same token identifier. + * + * Since platform is NOT required to answer our request in-order we should + * account for a few rare but possible scenarios: + * + * - exactly 'next_token' may be NOT available so pick xfer_id >= next_token + * using find_next_zero_bit() starting from candidate next_token bit + * + * - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we + * are plenty of free tokens at start, so try a second pass using + * find_next_zero_bit() and starting from 0. + * + * X = used in-flight + * + * Normal + * ------ + * + * |- xfer_id picked + * -----------+---------------------------------------------------------- + * | | |X|X|X| | | | | | ... ... ... ... ... ... ... ... ... ... ...|X|X| + * ---------------------------------------------------------------------- + * ^ + * |- next_token + * + * Out-of-order pending at start + * ----------------------------- + * + * |- xfer_id picked, last_token fixed + * -----+---------------------------------------------------------------- + * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... ... ...|X| | + * ---------------------------------------------------------------------- + * ^ + * |- next_token + * + * + * Out-of-order pending at end + * --------------------------- + * + * |- xfer_id picked, last_token fixed + * -----+---------------------------------------------------------------- + * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... |X|X|X||X|X| + * ---------------------------------------------------------------------- + * ^ + * |- next_token + * + * Context: Assumes to be called with @xfer_lock already acquired. + * + * Return: 0 on Success or error + */ +static int scmi_xfer_token_set(struct scmi_xfers_info *minfo, + struct scmi_xfer *xfer) +{ + unsigned long xfer_id, next_token; + + /* + * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1] + * using the pre-allocated transfer_id as a base. + * Note that the global transfer_id is shared across all message types + * so there could be holes in the allocated set of monotonic sequence + * numbers, but that is going to limit the effectiveness of the + * mitigation only in very rare limit conditions. + */ + next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1)); + + /* Pick the next available xfer_id >= next_token */ + xfer_id = find_next_zero_bit(minfo->xfer_alloc_table, + MSG_TOKEN_MAX, next_token); + if (xfer_id == MSG_TOKEN_MAX) { + /* + * After heavily out-of-order responses, there are no free + * tokens ahead, but only at start of xfer_alloc_table so + * try again from the beginning. + */ + xfer_id = find_next_zero_bit(minfo->xfer_alloc_table, + MSG_TOKEN_MAX, 0); + /* + * Something is wrong if we got here since there can be a + * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages + * but we have not found any free token [0, MSG_TOKEN_MAX - 1]. + */ + if (WARN_ON_ONCE(xfer_id == MSG_TOKEN_MAX)) + return -ENOMEM; + } + + /* Update +/- last_token accordingly if we skipped some hole */ + if (xfer_id != next_token) + atomic_add((int)(xfer_id - next_token), &transfer_last_id); + + /* Set in-flight */ + set_bit(xfer_id, minfo->xfer_alloc_table); + xfer->hdr.seq = (u16)xfer_id; + + return 0; +} + +/** + * scmi_xfer_token_clear - Release the token + * + * @minfo: Pointer to Tx/Rx Message management info based on channel type + * @xfer: The xfer to act upon + */ +static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo, + struct scmi_xfer *xfer) +{ + clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table); +} + +/** * scmi_xfer_get() - Allocate one message * * @handle: Pointer to SCMI entity handle * @minfo: Pointer to Tx/Rx Message management info based on channel type + * @set_pending: If true a monotonic token is picked and the xfer is added to + * the pending hash table. * * Helper function which is used by various message functions that are * exposed to clients of this driver for allocating a message traffic event. * - * This function can sleep depending on pending requests already in the system - * for the SCMI entity. Further, this also holds a spinlock to maintain - * integrity of internal data structures. + * Picks an xfer from the free list @free_xfers (if any available) and, if + * required, sets a monotonically increasing token and stores the inflight xfer + * into the @pending_xfers hashtable for later retrieval. + * + * The successfully initialized xfer is refcounted. + * + * Context: Holds @xfer_lock while manipulating @xfer_alloc_table and + * @free_xfers. * * Return: 0 if all went fine, else corresponding error. */ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle, - struct scmi_xfers_info *minfo) + struct scmi_xfers_info *minfo, + bool set_pending) { - u16 xfer_id; + int ret; + unsigned long flags; struct scmi_xfer *xfer; - unsigned long flags, bit_pos; - struct scmi_info *info = handle_to_scmi_info(handle); - /* Keep the locked section as small as possible */ spin_lock_irqsave(&minfo->xfer_lock, flags); - bit_pos = find_first_zero_bit(minfo->xfer_alloc_table, - info->desc->max_msg); - if (bit_pos == info->desc->max_msg) { + if (hlist_empty(&minfo->free_xfers)) { spin_unlock_irqrestore(&minfo->xfer_lock, flags); return ERR_PTR(-ENOMEM); } - set_bit(bit_pos, minfo->xfer_alloc_table); - spin_unlock_irqrestore(&minfo->xfer_lock, flags); - xfer_id = bit_pos; + /* grab an xfer from the free_list */ + xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node); + hlist_del_init(&xfer->node); - xfer = &minfo->xfer_block[xfer_id]; - xfer->hdr.seq = xfer_id; + /* + * Allocate transfer_id early so that can be used also as base for + * monotonic sequence number generation if needed. + */ xfer->transfer_id = atomic_inc_return(&transfer_last_id); + if (set_pending) { + /* Pick and set monotonic token */ + ret = scmi_xfer_token_set(minfo, xfer); + if (!ret) { + hash_add(minfo->pending_xfers, &xfer->node, + xfer->hdr.seq); + xfer->pending = true; + } else { + dev_err(handle->dev, + "Failed to get monotonic token %d\n", ret); + hlist_add_head(&xfer->node, &minfo->free_xfers); + xfer = ERR_PTR(ret); + } + } + + if (!IS_ERR(xfer)) { + refcount_set(&xfer->users, 1); + atomic_set(&xfer->busy, SCMI_XFER_FREE); + } + spin_unlock_irqrestore(&minfo->xfer_lock, flags); + return xfer; } @@ -253,6 +392,9 @@ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle, * @minfo: Pointer to Tx/Rx Message management info based on channel type * @xfer: message that was reserved by scmi_xfer_get * + * After refcount check, possibly release an xfer, clearing the token slot, + * removing xfer from @pending_xfers and putting it back into free_xfers. + * * This holds a spinlock to maintain integrity of internal data structures. */ static void @@ -260,17 +402,215 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer) { unsigned long flags; + spin_lock_irqsave(&minfo->xfer_lock, flags); + if (refcount_dec_and_test(&xfer->users)) { + if (xfer->pending) { + scmi_xfer_token_clear(minfo, xfer); + hash_del(&xfer->node); + xfer->pending = false; + } + hlist_add_head(&xfer->node, &minfo->free_xfers); + } + spin_unlock_irqrestore(&minfo->xfer_lock, flags); +} + +/** + * scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id + * + * @minfo: Pointer to Tx/Rx Message management info based on channel type + * @xfer_id: Token ID to lookup in @pending_xfers + * + * Refcounting is untouched. + * + * Context: Assumes to be called with @xfer_lock already acquired. + * + * Return: A valid xfer on Success or error otherwise + */ +static struct scmi_xfer * +scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id) +{ + struct scmi_xfer *xfer = NULL; + + if (test_bit(xfer_id, minfo->xfer_alloc_table)) + xfer = XFER_FIND(minfo->pending_xfers, xfer_id); + + return xfer ?: ERR_PTR(-EINVAL); +} + +/** + * scmi_msg_response_validate - Validate message type against state of related + * xfer + * + * @cinfo: A reference to the channel descriptor. + * @msg_type: Message type to check + * @xfer: A reference to the xfer to validate against @msg_type + * + * This function checks if @msg_type is congruent with the current state of + * a pending @xfer; if an asynchronous delayed response is received before the + * related synchronous response (Out-of-Order Delayed Response) the missing + * synchronous response is assumed to be OK and completed, carrying on with the + * Delayed Response: this is done to address the case in which the underlying + * SCMI transport can deliver such out-of-order responses. + * + * Context: Assumes to be called with xfer->lock already acquired. + * + * Return: 0 on Success, error otherwise + */ +static inline int scmi_msg_response_validate(struct scmi_chan_info *cinfo, + u8 msg_type, + struct scmi_xfer *xfer) +{ /* - * Keep the locked section as small as possible - * NOTE: we might escape with smp_mb and no lock here.. - * but just be conservative and symmetric. + * Even if a response was indeed expected on this slot at this point, + * a buggy platform could wrongly reply feeding us an unexpected + * delayed response we're not prepared to handle: bail-out safely + * blaming firmware. */ + if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) { + dev_err(cinfo->dev, + "Delayed Response for %d not expected! Buggy F/W ?\n", + xfer->hdr.seq); + return -EINVAL; + } + + switch (xfer->state) { + case SCMI_XFER_SENT_OK: + if (msg_type == MSG_TYPE_DELAYED_RESP) { + /* + * Delayed Response expected but delivered earlier. + * Assume message RESPONSE was OK and skip state. + */ + xfer->hdr.status = SCMI_SUCCESS; + xfer->state = SCMI_XFER_RESP_OK; + complete(&xfer->done); + dev_warn(cinfo->dev, + "Received valid OoO Delayed Response for %d\n", + xfer->hdr.seq); + } + break; + case SCMI_XFER_RESP_OK: + if (msg_type != MSG_TYPE_DELAYED_RESP) + return -EINVAL; + break; + case SCMI_XFER_DRESP_OK: + /* No further message expected once in SCMI_XFER_DRESP_OK */ + return -EINVAL; + } + + return 0; +} + +/** + * scmi_xfer_state_update - Update xfer state + * + * @xfer: A reference to the xfer to update + * @msg_type: Type of message being processed. + * + * Note that this message is assumed to have been already successfully validated + * by @scmi_msg_response_validate(), so here we just update the state. + * + * Context: Assumes to be called on an xfer exclusively acquired using the + * busy flag. + */ +static inline void scmi_xfer_state_update(struct scmi_xfer *xfer, u8 msg_type) +{ + xfer->hdr.type = msg_type; + + /* Unknown command types were already discarded earlier */ + if (xfer->hdr.type == MSG_TYPE_COMMAND) + xfer->state = SCMI_XFER_RESP_OK; + else + xfer->state = SCMI_XFER_DRESP_OK; +} + +static bool scmi_xfer_acquired(struct scmi_xfer *xfer) +{ + int ret; + + ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY); + + return ret == SCMI_XFER_FREE; +} + +/** + * scmi_xfer_command_acquire - Helper to lookup and acquire a command xfer + * + * @cinfo: A reference to the channel descriptor. + * @msg_hdr: A message header to use as lookup key + * + * When a valid xfer is found for the sequence number embedded in the provided + * msg_hdr, reference counting is properly updated and exclusive access to this + * xfer is granted till released with @scmi_xfer_command_release. + * + * Return: A valid @xfer on Success or error otherwise. + */ +static inline struct scmi_xfer * +scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr) +{ + int ret; + unsigned long flags; + struct scmi_xfer *xfer; + struct scmi_info *info = handle_to_scmi_info(cinfo->handle); + struct scmi_xfers_info *minfo = &info->tx_minfo; + u8 msg_type = MSG_XTRACT_TYPE(msg_hdr); + u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr); + + /* Are we even expecting this? */ spin_lock_irqsave(&minfo->xfer_lock, flags); - clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table); + xfer = scmi_xfer_lookup_unlocked(minfo, xfer_id); + if (IS_ERR(xfer)) { + dev_err(cinfo->dev, + "Message for %d type %d is not expected!\n", + xfer_id, msg_type); + spin_unlock_irqrestore(&minfo->xfer_lock, flags); + return xfer; + } + refcount_inc(&xfer->users); spin_unlock_irqrestore(&minfo->xfer_lock, flags); + + spin_lock_irqsave(&xfer->lock, flags); + ret = scmi_msg_response_validate(cinfo, msg_type, xfer); + /* + * If a pending xfer was found which was also in a congruent state with + * the received message, acquire exclusive access to it setting the busy + * flag. + * Spins only on the rare limit condition of concurrent reception of + * RESP and DRESP for the same xfer. + */ + if (!ret) { + spin_until_cond(scmi_xfer_acquired(xfer)); + scmi_xfer_state_update(xfer, msg_type); + } + spin_unlock_irqrestore(&xfer->lock, flags); + + if (ret) { + dev_err(cinfo->dev, + "Invalid message type:%d for %d - HDR:0x%X state:%d\n", + msg_type, xfer_id, msg_hdr, xfer->state); + /* On error the refcount incremented above has to be dropped */ + __scmi_xfer_put(minfo, xfer); + xfer = ERR_PTR(-EINVAL); + } + + return xfer; +} + +static inline void scmi_xfer_command_release(struct scmi_info *info, + struct scmi_xfer *xfer) +{ + atomic_set(&xfer->busy, SCMI_XFER_FREE); + __scmi_xfer_put(&info->tx_minfo, xfer); +} + +static inline void scmi_clear_channel(struct scmi_info *info, + struct scmi_chan_info *cinfo) +{ + if (info->desc->ops->clear_channel) + info->desc->ops->clear_channel(cinfo); } -static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr) +static void scmi_handle_notification(struct scmi_chan_info *cinfo, + u32 msg_hdr, void *priv) { struct scmi_xfer *xfer; struct device *dev = cinfo->dev; @@ -279,16 +619,17 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr) ktime_t ts; ts = ktime_get_boottime(); - xfer = scmi_xfer_get(cinfo->handle, minfo); + xfer = scmi_xfer_get(cinfo->handle, minfo, false); if (IS_ERR(xfer)) { dev_err(dev, "failed to get free message slot (%ld)\n", PTR_ERR(xfer)); - info->desc->ops->clear_channel(cinfo); + scmi_clear_channel(info, cinfo); return; } unpack_scmi_header(msg_hdr, &xfer->hdr); - scmi_dump_header_dbg(dev, &xfer->hdr); + if (priv) + xfer->priv = priv; info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size, xfer); scmi_notify(cinfo->handle, xfer->hdr.protocol_id, @@ -300,59 +641,41 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr) __scmi_xfer_put(minfo, xfer); - info->desc->ops->clear_channel(cinfo); + scmi_clear_channel(info, cinfo); } static void scmi_handle_response(struct scmi_chan_info *cinfo, - u16 xfer_id, u8 msg_type) + u32 msg_hdr, void *priv) { struct scmi_xfer *xfer; - struct device *dev = cinfo->dev; struct scmi_info *info = handle_to_scmi_info(cinfo->handle); - struct scmi_xfers_info *minfo = &info->tx_minfo; - /* Are we even expecting this? */ - if (!test_bit(xfer_id, minfo->xfer_alloc_table)) { - dev_err(dev, "message for %d is not expected!\n", xfer_id); - info->desc->ops->clear_channel(cinfo); - return; - } - - xfer = &minfo->xfer_block[xfer_id]; - /* - * Even if a response was indeed expected on this slot at this point, - * a buggy platform could wrongly reply feeding us an unexpected - * delayed response we're not prepared to handle: bail-out safely - * blaming firmware. - */ - if (unlikely(msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done)) { - dev_err(dev, - "Delayed Response for %d not expected! Buggy F/W ?\n", - xfer_id); - info->desc->ops->clear_channel(cinfo); - /* It was unexpected, so nobody will clear the xfer if not us */ - __scmi_xfer_put(minfo, xfer); + xfer = scmi_xfer_command_acquire(cinfo, msg_hdr); + if (IS_ERR(xfer)) { + scmi_clear_channel(info, cinfo); return; } /* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */ - if (msg_type == MSG_TYPE_DELAYED_RESP) + if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) xfer->rx.len = info->desc->max_msg_size; - scmi_dump_header_dbg(dev, &xfer->hdr); - + if (priv) + xfer->priv = priv; info->desc->ops->fetch_response(cinfo, xfer); trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, xfer->hdr.protocol_id, xfer->hdr.seq, - msg_type); + xfer->hdr.type); - if (msg_type == MSG_TYPE_DELAYED_RESP) { - info->desc->ops->clear_channel(cinfo); + if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) { + scmi_clear_channel(info, cinfo); complete(xfer->async_done); } else { complete(&xfer->done); } + + scmi_xfer_command_release(info, xfer); } /** @@ -360,6 +683,7 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, * * @cinfo: SCMI channel info * @msg_hdr: Message header + * @priv: Transport specific private data. * * Processes one received message to appropriate transfer information and * signals completion of the transfer. @@ -367,18 +691,17 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, * NOTE: This function will be invoked in IRQ context, hence should be * as optimal as possible. */ -void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr) +void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv) { - u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr); u8 msg_type = MSG_XTRACT_TYPE(msg_hdr); switch (msg_type) { case MSG_TYPE_NOTIFICATION: - scmi_handle_notification(cinfo, msg_hdr); + scmi_handle_notification(cinfo, msg_hdr, priv); break; case MSG_TYPE_COMMAND: case MSG_TYPE_DELAYED_RESP: - scmi_handle_response(cinfo, xfer_id, msg_type); + scmi_handle_response(cinfo, msg_hdr, priv); break; default: WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type); @@ -390,7 +713,7 @@ void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr) * xfer_put() - Release a transmit message * * @ph: Pointer to SCMI protocol handle - * @xfer: message that was reserved by scmi_xfer_get + * @xfer: message that was reserved by xfer_get_init */ static void xfer_put(const struct scmi_protocol_handle *ph, struct scmi_xfer *xfer) @@ -408,7 +731,12 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo, { struct scmi_info *info = handle_to_scmi_info(cinfo->handle); + /* + * Poll also on xfer->done so that polling can be forcibly terminated + * in case of out-of-order receptions of delayed responses + */ return info->desc->ops->poll_done(cinfo, xfer) || + try_wait_for_completion(&xfer->done) || ktime_after(ktime_get(), stop); } @@ -432,6 +760,12 @@ static int do_xfer(const struct scmi_protocol_handle *ph, struct device *dev = info->dev; struct scmi_chan_info *cinfo; + if (xfer->hdr.poll_completion && !info->desc->ops->poll_done) { + dev_warn_once(dev, + "Polling mode is not supported by transport.\n"); + return -EINVAL; + } + /* * Initialise protocol id now from protocol handle to avoid it being * overridden by mistake (or malice) by the protocol code mangling with @@ -448,6 +782,16 @@ static int do_xfer(const struct scmi_protocol_handle *ph, xfer->hdr.protocol_id, xfer->hdr.seq, xfer->hdr.poll_completion); + xfer->state = SCMI_XFER_SENT_OK; + /* + * Even though spinlocking is not needed here since no race is possible + * on xfer->state due to the monotonically increasing tokens allocation, + * we must anyway ensure xfer->state initialization is not re-ordered + * after the .send_message() to be sure that on the RX path an early + * ISR calling scmi_rx_callback() cannot see an old stale xfer->state. + */ + smp_mb(); + ret = info->desc->ops->send_message(cinfo, xfer); if (ret < 0) { dev_dbg(dev, "Failed to send message %d\n", ret); @@ -458,11 +802,22 @@ static int do_xfer(const struct scmi_protocol_handle *ph, ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS); spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop)); - - if (ktime_before(ktime_get(), stop)) - info->desc->ops->fetch_response(cinfo, xfer); - else + if (ktime_before(ktime_get(), stop)) { + unsigned long flags; + + /* + * Do not fetch_response if an out-of-order delayed + * response is being processed. + */ + spin_lock_irqsave(&xfer->lock, flags); + if (xfer->state == SCMI_XFER_SENT_OK) { + info->desc->ops->fetch_response(cinfo, xfer); + xfer->state = SCMI_XFER_RESP_OK; + } + spin_unlock_irqrestore(&xfer->lock, flags); + } else { ret = -ETIMEDOUT; + } } else { /* And we wait for the response. */ timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); @@ -557,7 +912,7 @@ static int xfer_get_init(const struct scmi_protocol_handle *ph, tx_size > info->desc->max_msg_size) return -ERANGE; - xfer = scmi_xfer_get(pi->handle, minfo); + xfer = scmi_xfer_get(pi->handle, minfo, true); if (IS_ERR(xfer)) { ret = PTR_ERR(xfer); dev_err(dev, "failed to get free message slot(%d)\n", ret); @@ -566,6 +921,7 @@ static int xfer_get_init(const struct scmi_protocol_handle *ph, xfer->tx.len = tx_size; xfer->rx.len = rx_size ? : info->desc->max_msg_size; + xfer->hdr.type = MSG_TYPE_COMMAND; xfer->hdr.id = msg_id; xfer->hdr.poll_completion = false; @@ -1026,25 +1382,32 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo, const struct scmi_desc *desc = sinfo->desc; /* Pre-allocated messages, no more than what hdr.seq can support */ - if (WARN_ON(!desc->max_msg || desc->max_msg > MSG_TOKEN_MAX)) { + if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) { dev_err(dev, "Invalid maximum messages %d, not in range [1 - %lu]\n", - desc->max_msg, MSG_TOKEN_MAX); + info->max_msg, MSG_TOKEN_MAX); return -EINVAL; } - info->xfer_block = devm_kcalloc(dev, desc->max_msg, - sizeof(*info->xfer_block), GFP_KERNEL); - if (!info->xfer_block) - return -ENOMEM; + hash_init(info->pending_xfers); - info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg), + /* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */ + info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(MSG_TOKEN_MAX), sizeof(long), GFP_KERNEL); if (!info->xfer_alloc_table) return -ENOMEM; - /* Pre-initialize the buffer pointer to pre-allocated buffers */ - for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) { + /* + * Preallocate a number of xfers equal to max inflight messages, + * pre-initialize the buffer pointer to pre-allocated buffers and + * attach all of them to the free list + */ + INIT_HLIST_HEAD(&info->free_xfers); + for (i = 0; i < info->max_msg; i++) { + xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL); + if (!xfer) + return -ENOMEM; + xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size, GFP_KERNEL); if (!xfer->rx.buf) @@ -1052,6 +1415,10 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo, xfer->tx.buf = xfer->rx.buf; init_completion(&xfer->done); + spin_lock_init(&xfer->lock); + + /* Add initialized xfer to the free list */ + hlist_add_head(&xfer->node, &info->free_xfers); } spin_lock_init(&info->xfer_lock); @@ -1059,10 +1426,40 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo, return 0; } +static int scmi_channels_max_msg_configure(struct scmi_info *sinfo) +{ + const struct scmi_desc *desc = sinfo->desc; + + if (!desc->ops->get_max_msg) { + sinfo->tx_minfo.max_msg = desc->max_msg; + sinfo->rx_minfo.max_msg = desc->max_msg; + } else { + struct scmi_chan_info *base_cinfo; + + base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE); + if (!base_cinfo) + return -EINVAL; + sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo); + + /* RX channel is optional so can be skipped */ + base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE); + if (base_cinfo) + sinfo->rx_minfo.max_msg = + desc->ops->get_max_msg(base_cinfo); + } + + return 0; +} + static int scmi_xfer_info_init(struct scmi_info *sinfo) { - int ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo); + int ret; + + ret = scmi_channels_max_msg_configure(sinfo); + if (ret) + return ret; + ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo); if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE)) ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo); @@ -1390,6 +1787,21 @@ void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table) mutex_unlock(&scmi_requested_devices_mtx); } +static int scmi_cleanup_txrx_channels(struct scmi_info *info) +{ + int ret; + struct idr *idr = &info->tx_idr; + + ret = idr_for_each(idr, info->desc->ops->chan_free, idr); + idr_destroy(&info->tx_idr); + + idr = &info->rx_idr; + ret = idr_for_each(idr, info->desc->ops->chan_free, idr); + idr_destroy(&info->rx_idr); + + return ret; +} + static int scmi_probe(struct platform_device *pdev) { int ret; @@ -1424,13 +1836,19 @@ static int scmi_probe(struct platform_device *pdev) handle->devm_protocol_get = scmi_devm_protocol_get; handle->devm_protocol_put = scmi_devm_protocol_put; + if (desc->ops->link_supplier) { + ret = desc->ops->link_supplier(dev); + if (ret) + return ret; + } + ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE); if (ret) return ret; ret = scmi_xfer_info_init(info); if (ret) - return ret; + goto clear_txrx_setup; if (scmi_notification_init(handle)) dev_err(dev, "SCMI Notifications NOT available.\n"); @@ -1443,7 +1861,7 @@ static int scmi_probe(struct platform_device *pdev) ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE); if (ret) { dev_err(dev, "unable to communicate with SCMI\n"); - return ret; + goto notification_exit; } mutex_lock(&scmi_list_mutex); @@ -1482,6 +1900,12 @@ static int scmi_probe(struct platform_device *pdev) } return 0; + +notification_exit: + scmi_notification_exit(&info->handle); +clear_txrx_setup: + scmi_cleanup_txrx_channels(info); + return ret; } void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id) @@ -1493,7 +1917,6 @@ static int scmi_remove(struct platform_device *pdev) { int ret = 0, id; struct scmi_info *info = platform_get_drvdata(pdev); - struct idr *idr = &info->tx_idr; struct device_node *child; mutex_lock(&scmi_list_mutex); @@ -1517,14 +1940,7 @@ static int scmi_remove(struct platform_device *pdev) idr_destroy(&info->active_protocols); /* Safe to free channels since no more users */ - ret = idr_for_each(idr, info->desc->ops->chan_free, idr); - idr_destroy(&info->tx_idr); - - idr = &info->rx_idr; - ret = idr_for_each(idr, info->desc->ops->chan_free, idr); - idr_destroy(&info->rx_idr); - - return ret; + return scmi_cleanup_txrx_channels(info); } static ssize_t protocol_version_show(struct device *dev, @@ -1575,12 +1991,15 @@ ATTRIBUTE_GROUPS(versions); /* Each compatible listed below must have descriptor associated with it */ static const struct of_device_id scmi_of_match[] = { -#ifdef CONFIG_MAILBOX +#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX { .compatible = "arm,scmi", .data = &scmi_mailbox_desc }, #endif -#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY +#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc}, #endif +#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO + { .compatible = "arm,scmi-virtio", .data = &scmi_virtio_desc}, +#endif { /* Sentinel */ }, }; @@ -1596,10 +2015,69 @@ static struct platform_driver scmi_driver = { .remove = scmi_remove, }; +/** + * __scmi_transports_setup - Common helper to call transport-specific + * .init/.exit code if provided. + * + * @init: A flag to distinguish between init and exit. + * + * Note that, if provided, we invoke .init/.exit functions for all the + * transports currently compiled in. + * + * Return: 0 on Success. + */ +static inline int __scmi_transports_setup(bool init) +{ + int ret = 0; + const struct of_device_id *trans; + + for (trans = scmi_of_match; trans->data; trans++) { + const struct scmi_desc *tdesc = trans->data; + + if ((init && !tdesc->transport_init) || + (!init && !tdesc->transport_exit)) + continue; + + if (init) + ret = tdesc->transport_init(); + else + tdesc->transport_exit(); + + if (ret) { + pr_err("SCMI transport %s FAILED initialization!\n", + trans->compatible); + break; + } + } + + return ret; +} + +static int __init scmi_transports_init(void) +{ + return __scmi_transports_setup(true); +} + +static void __exit scmi_transports_exit(void) +{ + __scmi_transports_setup(false); +} + static int __init scmi_driver_init(void) { + int ret; + + /* Bail out if no SCMI transport was configured */ + if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT))) + return -EINVAL; + scmi_bus_init(); + /* Initialize any compiled-in transport which provided an init/exit */ + ret = scmi_transports_init(); + if (ret) + return ret; + scmi_base_register(); scmi_clock_register(); @@ -1628,6 +2106,8 @@ static void __exit scmi_driver_exit(void) scmi_bus_exit(); + scmi_transports_exit(); + platform_driver_unregister(&scmi_driver); } module_exit(scmi_driver_exit); diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c index e3dcb58314ae..e09eb12bf421 100644 --- a/drivers/firmware/arm_scmi/mailbox.c +++ b/drivers/firmware/arm_scmi/mailbox.c @@ -43,7 +43,7 @@ static void rx_callback(struct mbox_client *cl, void *m) { struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl); - scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem)); + scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem), NULL); } static bool mailbox_chan_available(struct device *dev, int idx) diff --git a/drivers/firmware/arm_scmi/msg.c b/drivers/firmware/arm_scmi/msg.c new file mode 100644 index 000000000000..d33a704e5814 --- /dev/null +++ b/drivers/firmware/arm_scmi/msg.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * For transports using message passing. + * + * Derived from shm.c. + * + * Copyright (C) 2019-2021 ARM Ltd. + * Copyright (C) 2020-2021 OpenSynergy GmbH + */ + +#include <linux/types.h> + +#include "common.h" + +/* + * struct scmi_msg_payld - Transport SDU layout + * + * The SCMI specification requires all parameters, message headers, return + * arguments or any protocol data to be expressed in little endian format only. + */ +struct scmi_msg_payld { + __le32 msg_header; + __le32 msg_payload[]; +}; + +/** + * msg_command_size() - Actual size of transport SDU for command. + * + * @xfer: message which core has prepared for sending + * + * Return: transport SDU size. + */ +size_t msg_command_size(struct scmi_xfer *xfer) +{ + return sizeof(struct scmi_msg_payld) + xfer->tx.len; +} + +/** + * msg_response_size() - Maximum size of transport SDU for response. + * + * @xfer: message which core has prepared for sending + * + * Return: transport SDU size. + */ +size_t msg_response_size(struct scmi_xfer *xfer) +{ + return sizeof(struct scmi_msg_payld) + sizeof(__le32) + xfer->rx.len; +} + +/** + * msg_tx_prepare() - Set up transport SDU for command. + * + * @msg: transport SDU for command + * @xfer: message which is being sent + */ +void msg_tx_prepare(struct scmi_msg_payld *msg, struct scmi_xfer *xfer) +{ + msg->msg_header = cpu_to_le32(pack_scmi_header(&xfer->hdr)); + if (xfer->tx.buf) + memcpy(msg->msg_payload, xfer->tx.buf, xfer->tx.len); +} + +/** + * msg_read_header() - Read SCMI header from transport SDU. + * + * @msg: transport SDU + * + * Return: SCMI header + */ +u32 msg_read_header(struct scmi_msg_payld *msg) +{ + return le32_to_cpu(msg->msg_header); +} + +/** + * msg_fetch_response() - Fetch response SCMI payload from transport SDU. + * + * @msg: transport SDU with response + * @len: transport SDU size + * @xfer: message being responded to + */ +void msg_fetch_response(struct scmi_msg_payld *msg, size_t len, + struct scmi_xfer *xfer) +{ + size_t prefix_len = sizeof(*msg) + sizeof(msg->msg_payload[0]); + + xfer->hdr.status = le32_to_cpu(msg->msg_payload[0]); + xfer->rx.len = min_t(size_t, xfer->rx.len, + len >= prefix_len ? len - prefix_len : 0); + + /* Take a copy to the rx buffer.. */ + memcpy(xfer->rx.buf, &msg->msg_payload[1], xfer->rx.len); +} + +/** + * msg_fetch_notification() - Fetch notification payload from transport SDU. + * + * @msg: transport SDU with notification + * @len: transport SDU size + * @max_len: maximum SCMI payload size to fetch + * @xfer: notification message + */ +void msg_fetch_notification(struct scmi_msg_payld *msg, size_t len, + size_t max_len, struct scmi_xfer *xfer) +{ + xfer->rx.len = min_t(size_t, max_len, + len >= sizeof(*msg) ? len - sizeof(*msg) : 0); + + /* Take a copy to the rx buffer.. */ + memcpy(xfer->rx.buf, msg->msg_payload, xfer->rx.len); +} diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c index bed5596c7209..4effecc3bb46 100644 --- a/drivers/firmware/arm_scmi/smc.c +++ b/drivers/firmware/arm_scmi/smc.c @@ -154,7 +154,8 @@ static int smc_send_message(struct scmi_chan_info *cinfo, if (scmi_info->irq) wait_for_completion(&scmi_info->tx_complete); - scmi_rx_callback(scmi_info->cinfo, shmem_read_header(scmi_info->shmem)); + scmi_rx_callback(scmi_info->cinfo, + shmem_read_header(scmi_info->shmem), NULL); mutex_unlock(&scmi_info->shmem_lock); diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c new file mode 100644 index 000000000000..224577f86928 --- /dev/null +++ b/drivers/firmware/arm_scmi/virtio.c @@ -0,0 +1,491 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Virtio Transport driver for Arm System Control and Management Interface + * (SCMI). + * + * Copyright (C) 2020-2021 OpenSynergy. + * Copyright (C) 2021 ARM Ltd. + */ + +/** + * DOC: Theory of Operation + * + * The scmi-virtio transport implements a driver for the virtio SCMI device. + * + * There is one Tx channel (virtio cmdq, A2P channel) and at most one Rx + * channel (virtio eventq, P2A channel). Each channel is implemented through a + * virtqueue. Access to each virtqueue is protected by spinlocks. + */ + +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/virtio.h> +#include <linux/virtio_config.h> + +#include <uapi/linux/virtio_ids.h> +#include <uapi/linux/virtio_scmi.h> + +#include "common.h" + +#define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */ +#define VIRTIO_SCMI_MAX_PDU_SIZE \ + (VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD) +#define DESCRIPTORS_PER_TX_MSG 2 + +/** + * struct scmi_vio_channel - Transport channel information + * + * @vqueue: Associated virtqueue + * @cinfo: SCMI Tx or Rx channel + * @free_list: List of unused scmi_vio_msg, maintained for Tx channels only + * @is_rx: Whether channel is an Rx channel + * @ready: Whether transport user is ready to hear about channel + * @max_msg: Maximum number of pending messages for this channel. + * @lock: Protects access to all members except ready. + * @ready_lock: Protects access to ready. If required, it must be taken before + * lock. + */ +struct scmi_vio_channel { + struct virtqueue *vqueue; + struct scmi_chan_info *cinfo; + struct list_head free_list; + bool is_rx; + bool ready; + unsigned int max_msg; + /* lock to protect access to all members except ready. */ + spinlock_t lock; + /* lock to rotects access to ready flag. */ + spinlock_t ready_lock; +}; + +/** + * struct scmi_vio_msg - Transport PDU information + * + * @request: SDU used for commands + * @input: SDU used for (delayed) responses and notifications + * @list: List which scmi_vio_msg may be part of + * @rx_len: Input SDU size in bytes, once input has been received + */ +struct scmi_vio_msg { + struct scmi_msg_payld *request; + struct scmi_msg_payld *input; + struct list_head list; + unsigned int rx_len; +}; + +/* Only one SCMI VirtIO device can possibly exist */ +static struct virtio_device *scmi_vdev; + +static bool scmi_vio_have_vq_rx(struct virtio_device *vdev) +{ + return virtio_has_feature(vdev, VIRTIO_SCMI_F_P2A_CHANNELS); +} + +static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, + struct scmi_vio_msg *msg) +{ + struct scatterlist sg_in; + int rc; + unsigned long flags; + + sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE); + + spin_lock_irqsave(&vioch->lock, flags); + + rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC); + if (rc) + dev_err_once(vioch->cinfo->dev, + "failed to add to virtqueue (%d)\n", rc); + else + virtqueue_kick(vioch->vqueue); + + spin_unlock_irqrestore(&vioch->lock, flags); + + return rc; +} + +static void scmi_finalize_message(struct scmi_vio_channel *vioch, + struct scmi_vio_msg *msg) +{ + if (vioch->is_rx) { + scmi_vio_feed_vq_rx(vioch, msg); + } else { + unsigned long flags; + + spin_lock_irqsave(&vioch->lock, flags); + list_add(&msg->list, &vioch->free_list); + spin_unlock_irqrestore(&vioch->lock, flags); + } +} + +static void scmi_vio_complete_cb(struct virtqueue *vqueue) +{ + unsigned long ready_flags; + unsigned long flags; + unsigned int length; + struct scmi_vio_channel *vioch; + struct scmi_vio_msg *msg; + bool cb_enabled = true; + + if (WARN_ON_ONCE(!vqueue->vdev->priv)) + return; + vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index]; + + for (;;) { + spin_lock_irqsave(&vioch->ready_lock, ready_flags); + + if (!vioch->ready) { + if (!cb_enabled) + (void)virtqueue_enable_cb(vqueue); + goto unlock_ready_out; + } + + spin_lock_irqsave(&vioch->lock, flags); + if (cb_enabled) { + virtqueue_disable_cb(vqueue); + cb_enabled = false; + } + msg = virtqueue_get_buf(vqueue, &length); + if (!msg) { + if (virtqueue_enable_cb(vqueue)) + goto unlock_out; + cb_enabled = true; + } + spin_unlock_irqrestore(&vioch->lock, flags); + + if (msg) { + msg->rx_len = length; + scmi_rx_callback(vioch->cinfo, + msg_read_header(msg->input), msg); + + scmi_finalize_message(vioch, msg); + } + + spin_unlock_irqrestore(&vioch->ready_lock, ready_flags); + } + +unlock_out: + spin_unlock_irqrestore(&vioch->lock, flags); +unlock_ready_out: + spin_unlock_irqrestore(&vioch->ready_lock, ready_flags); +} + +static const char *const scmi_vio_vqueue_names[] = { "tx", "rx" }; + +static vq_callback_t *scmi_vio_complete_callbacks[] = { + scmi_vio_complete_cb, + scmi_vio_complete_cb +}; + +static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo) +{ + struct scmi_vio_channel *vioch = base_cinfo->transport_info; + + return vioch->max_msg; +} + +static int virtio_link_supplier(struct device *dev) +{ + if (!scmi_vdev) { + dev_notice_once(dev, + "Deferring probe after not finding a bound scmi-virtio device\n"); + return -EPROBE_DEFER; + } + + if (!device_link_add(dev, &scmi_vdev->dev, + DL_FLAG_AUTOREMOVE_CONSUMER)) { + dev_err(dev, "Adding link to supplier virtio device failed\n"); + return -ECANCELED; + } + + return 0; +} + +static bool virtio_chan_available(struct device *dev, int idx) +{ + struct scmi_vio_channel *channels, *vioch = NULL; + + if (WARN_ON_ONCE(!scmi_vdev)) + return false; + + channels = (struct scmi_vio_channel *)scmi_vdev->priv; + + switch (idx) { + case VIRTIO_SCMI_VQ_TX: + vioch = &channels[VIRTIO_SCMI_VQ_TX]; + break; + case VIRTIO_SCMI_VQ_RX: + if (scmi_vio_have_vq_rx(scmi_vdev)) + vioch = &channels[VIRTIO_SCMI_VQ_RX]; + break; + default: + return false; + } + + return vioch && !vioch->cinfo; +} + +static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, + bool tx) +{ + unsigned long flags; + struct scmi_vio_channel *vioch; + int index = tx ? VIRTIO_SCMI_VQ_TX : VIRTIO_SCMI_VQ_RX; + int i; + + if (!scmi_vdev) + return -EPROBE_DEFER; + + vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index]; + + for (i = 0; i < vioch->max_msg; i++) { + struct scmi_vio_msg *msg; + + msg = devm_kzalloc(cinfo->dev, sizeof(*msg), GFP_KERNEL); + if (!msg) + return -ENOMEM; + + if (tx) { + msg->request = devm_kzalloc(cinfo->dev, + VIRTIO_SCMI_MAX_PDU_SIZE, + GFP_KERNEL); + if (!msg->request) + return -ENOMEM; + } + + msg->input = devm_kzalloc(cinfo->dev, VIRTIO_SCMI_MAX_PDU_SIZE, + GFP_KERNEL); + if (!msg->input) + return -ENOMEM; + + if (tx) { + spin_lock_irqsave(&vioch->lock, flags); + list_add_tail(&msg->list, &vioch->free_list); + spin_unlock_irqrestore(&vioch->lock, flags); + } else { + scmi_vio_feed_vq_rx(vioch, msg); + } + } + + spin_lock_irqsave(&vioch->lock, flags); + cinfo->transport_info = vioch; + /* Indirectly setting channel not available any more */ + vioch->cinfo = cinfo; + spin_unlock_irqrestore(&vioch->lock, flags); + + spin_lock_irqsave(&vioch->ready_lock, flags); + vioch->ready = true; + spin_unlock_irqrestore(&vioch->ready_lock, flags); + + return 0; +} + +static int virtio_chan_free(int id, void *p, void *data) +{ + unsigned long flags; + struct scmi_chan_info *cinfo = p; + struct scmi_vio_channel *vioch = cinfo->transport_info; + + spin_lock_irqsave(&vioch->ready_lock, flags); + vioch->ready = false; + spin_unlock_irqrestore(&vioch->ready_lock, flags); + + scmi_free_channel(cinfo, data, id); + + spin_lock_irqsave(&vioch->lock, flags); + vioch->cinfo = NULL; + spin_unlock_irqrestore(&vioch->lock, flags); + + return 0; +} + +static int virtio_send_message(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_vio_channel *vioch = cinfo->transport_info; + struct scatterlist sg_out; + struct scatterlist sg_in; + struct scatterlist *sgs[DESCRIPTORS_PER_TX_MSG] = { &sg_out, &sg_in }; + unsigned long flags; + int rc; + struct scmi_vio_msg *msg; + + spin_lock_irqsave(&vioch->lock, flags); + + if (list_empty(&vioch->free_list)) { + spin_unlock_irqrestore(&vioch->lock, flags); + return -EBUSY; + } + + msg = list_first_entry(&vioch->free_list, typeof(*msg), list); + list_del(&msg->list); + + msg_tx_prepare(msg->request, xfer); + + sg_init_one(&sg_out, msg->request, msg_command_size(xfer)); + sg_init_one(&sg_in, msg->input, msg_response_size(xfer)); + + rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC); + if (rc) { + list_add(&msg->list, &vioch->free_list); + dev_err_once(vioch->cinfo->dev, + "%s() failed to add to virtqueue (%d)\n", __func__, + rc); + } else { + virtqueue_kick(vioch->vqueue); + } + + spin_unlock_irqrestore(&vioch->lock, flags); + + return rc; +} + +static void virtio_fetch_response(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_vio_msg *msg = xfer->priv; + + if (msg) { + msg_fetch_response(msg->input, msg->rx_len, xfer); + xfer->priv = NULL; + } +} + +static void virtio_fetch_notification(struct scmi_chan_info *cinfo, + size_t max_len, struct scmi_xfer *xfer) +{ + struct scmi_vio_msg *msg = xfer->priv; + + if (msg) { + msg_fetch_notification(msg->input, msg->rx_len, max_len, xfer); + xfer->priv = NULL; + } +} + +static const struct scmi_transport_ops scmi_virtio_ops = { + .link_supplier = virtio_link_supplier, + .chan_available = virtio_chan_available, + .chan_setup = virtio_chan_setup, + .chan_free = virtio_chan_free, + .get_max_msg = virtio_get_max_msg, + .send_message = virtio_send_message, + .fetch_response = virtio_fetch_response, + .fetch_notification = virtio_fetch_notification, +}; + +static int scmi_vio_probe(struct virtio_device *vdev) +{ + struct device *dev = &vdev->dev; + struct scmi_vio_channel *channels; + bool have_vq_rx; + int vq_cnt; + int i; + int ret; + struct virtqueue *vqs[VIRTIO_SCMI_VQ_MAX_CNT]; + + /* Only one SCMI VirtiO device allowed */ + if (scmi_vdev) + return -EINVAL; + + have_vq_rx = scmi_vio_have_vq_rx(vdev); + vq_cnt = have_vq_rx ? VIRTIO_SCMI_VQ_MAX_CNT : 1; + + channels = devm_kcalloc(dev, vq_cnt, sizeof(*channels), GFP_KERNEL); + if (!channels) + return -ENOMEM; + + if (have_vq_rx) + channels[VIRTIO_SCMI_VQ_RX].is_rx = true; + + ret = virtio_find_vqs(vdev, vq_cnt, vqs, scmi_vio_complete_callbacks, + scmi_vio_vqueue_names, NULL); + if (ret) { + dev_err(dev, "Failed to get %d virtqueue(s)\n", vq_cnt); + return ret; + } + + for (i = 0; i < vq_cnt; i++) { + unsigned int sz; + + spin_lock_init(&channels[i].lock); + spin_lock_init(&channels[i].ready_lock); + INIT_LIST_HEAD(&channels[i].free_list); + channels[i].vqueue = vqs[i]; + + sz = virtqueue_get_vring_size(channels[i].vqueue); + /* Tx messages need multiple descriptors. */ + if (!channels[i].is_rx) + sz /= DESCRIPTORS_PER_TX_MSG; + + if (sz > MSG_TOKEN_MAX) { + dev_info_once(dev, + "%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n", + channels[i].is_rx ? "rx" : "tx", + sz, MSG_TOKEN_MAX); + sz = MSG_TOKEN_MAX; + } + channels[i].max_msg = sz; + } + + vdev->priv = channels; + scmi_vdev = vdev; + + return 0; +} + +static void scmi_vio_remove(struct virtio_device *vdev) +{ + vdev->config->reset(vdev); + vdev->config->del_vqs(vdev); + scmi_vdev = NULL; +} + +static int scmi_vio_validate(struct virtio_device *vdev) +{ + if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { + dev_err(&vdev->dev, + "device does not comply with spec version 1.x\n"); + return -EINVAL; + } + + return 0; +} + +static unsigned int features[] = { + VIRTIO_SCMI_F_P2A_CHANNELS, +}; + +static const struct virtio_device_id id_table[] = { + { VIRTIO_ID_SCMI, VIRTIO_DEV_ANY_ID }, + { 0 } +}; + +static struct virtio_driver virtio_scmi_driver = { + .driver.name = "scmi-virtio", + .driver.owner = THIS_MODULE, + .feature_table = features, + .feature_table_size = ARRAY_SIZE(features), + .id_table = id_table, + .probe = scmi_vio_probe, + .remove = scmi_vio_remove, + .validate = scmi_vio_validate, +}; + +static int __init virtio_scmi_init(void) +{ + return register_virtio_driver(&virtio_scmi_driver); +} + +static void __exit virtio_scmi_exit(void) +{ + unregister_virtio_driver(&virtio_scmi_driver); +} + +const struct scmi_desc scmi_virtio_desc = { + .transport_init = virtio_scmi_init, + .transport_exit = virtio_scmi_exit, + .ops = &scmi_virtio_ops, + .max_rx_timeout_ms = 60000, /* for non-realtime virtio devices */ + .max_msg = 0, /* overridden by virtio_get_max_msg() */ + .max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE, +}; diff --git a/drivers/firmware/broadcom/tee_bnxt_fw.c b/drivers/firmware/broadcom/tee_bnxt_fw.c index ed10da5313e8..a5bf4c3f6dc7 100644 --- a/drivers/firmware/broadcom/tee_bnxt_fw.c +++ b/drivers/firmware/broadcom/tee_bnxt_fw.c @@ -212,10 +212,9 @@ static int tee_bnxt_fw_probe(struct device *dev) pvt_data.dev = dev; - fw_shm_pool = tee_shm_alloc(pvt_data.ctx, MAX_SHM_MEM_SZ, - TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); + fw_shm_pool = tee_shm_alloc_kernel_buf(pvt_data.ctx, MAX_SHM_MEM_SZ); if (IS_ERR(fw_shm_pool)) { - dev_err(pvt_data.dev, "tee_shm_alloc failed\n"); + dev_err(pvt_data.dev, "tee_shm_alloc_kernel_buf failed\n"); err = PTR_ERR(fw_shm_pool); goto out_sess; } @@ -242,6 +241,14 @@ static int tee_bnxt_fw_remove(struct device *dev) return 0; } +static void tee_bnxt_fw_shutdown(struct device *dev) +{ + tee_shm_free(pvt_data.fw_shm_pool); + tee_client_close_session(pvt_data.ctx, pvt_data.session_id); + tee_client_close_context(pvt_data.ctx); + pvt_data.ctx = NULL; +} + static const struct tee_client_device_id tee_bnxt_fw_id_table[] = { {UUID_INIT(0x6272636D, 0x2019, 0x0716, 0x42, 0x43, 0x4D, 0x5F, 0x53, 0x43, 0x48, 0x49)}, @@ -257,6 +264,7 @@ static struct tee_client_driver tee_bnxt_fw_driver = { .bus = &tee_bus_type, .probe = tee_bnxt_fw_probe, .remove = tee_bnxt_fw_remove, + .shutdown = tee_bnxt_fw_shutdown, }, }; diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c index 4d5421d14a41..940ddf916202 100644 --- a/drivers/firmware/dmi-id.c +++ b/drivers/firmware/dmi-id.c @@ -73,6 +73,10 @@ static void ascii_filter(char *d, const char *s) static ssize_t get_modalias(char *buffer, size_t buffer_size) { + /* + * Note new fields need to be added at the end to keep compatibility + * with udev's hwdb which does matches on "`cat dmi/id/modalias`*". + */ static const struct mafield { const char *prefix; int field; @@ -85,13 +89,13 @@ static ssize_t get_modalias(char *buffer, size_t buffer_size) { "svn", DMI_SYS_VENDOR }, { "pn", DMI_PRODUCT_NAME }, { "pvr", DMI_PRODUCT_VERSION }, - { "sku", DMI_PRODUCT_SKU }, { "rvn", DMI_BOARD_VENDOR }, { "rn", DMI_BOARD_NAME }, { "rvr", DMI_BOARD_VERSION }, { "cvn", DMI_CHASSIS_VENDOR }, { "ct", DMI_CHASSIS_TYPE }, { "cvr", DMI_CHASSIS_VERSION }, + { "sku", DMI_PRODUCT_SKU }, { NULL, DMI_NONE } }; diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index ea7ca74fc173..73bdbd207e7a 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -221,7 +221,7 @@ static int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg) return 0; n = 0; - len = CPER_REC_LEN - 1; + len = CPER_REC_LEN; if (mem->validation_bits & CPER_MEM_VALID_NODE) n += scnprintf(msg + n, len - n, "node: %d ", mem->node); if (mem->validation_bits & CPER_MEM_VALID_CARD) @@ -258,13 +258,12 @@ static int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg) n += scnprintf(msg + n, len - n, "responder_id: 0x%016llx ", mem->responder_id); if (mem->validation_bits & CPER_MEM_VALID_TARGET_ID) - scnprintf(msg + n, len - n, "target_id: 0x%016llx ", - mem->target_id); + n += scnprintf(msg + n, len - n, "target_id: 0x%016llx ", + mem->target_id); if (mem->validation_bits & CPER_MEM_VALID_CHIP_ID) - scnprintf(msg + n, len - n, "chip_id: %d ", - mem->extended >> CPER_MEM_CHIP_ID_SHIFT); + n += scnprintf(msg + n, len - n, "chip_id: %d ", + mem->extended >> CPER_MEM_CHIP_ID_SHIFT); - msg[n] = '\0'; return n; } @@ -633,7 +632,7 @@ int cper_estatus_check(const struct acpi_hest_generic_status *estatus) data_len = estatus->data_length; apei_estatus_for_each_section(estatus, gdata) { - if (sizeof(struct acpi_hest_generic_data) > data_len) + if (acpi_hest_get_size(gdata) > data_len) return -EINVAL; record_size = acpi_hest_get_record_size(gdata); diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index 7bf0a7acae5e..2363fee9211c 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -35,15 +35,48 @@ efi_status_t check_platform_features(void) } /* - * Although relocatable kernels can fix up the misalignment with respect to - * MIN_KIMG_ALIGN, the resulting virtual text addresses are subtly out of - * sync with those recorded in the vmlinux when kaslr is disabled but the - * image required relocation anyway. Therefore retain 2M alignment unless - * KASLR is in use. + * Distro versions of GRUB may ignore the BSS allocation entirely (i.e., fail + * to provide space, and fail to zero it). Check for this condition by double + * checking that the first and the last byte of the image are covered by the + * same EFI memory map entry. */ -static u64 min_kimg_align(void) +static bool check_image_region(u64 base, u64 size) { - return efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN; + unsigned long map_size, desc_size, buff_size; + efi_memory_desc_t *memory_map; + struct efi_boot_memmap map; + efi_status_t status; + bool ret = false; + int map_offset; + + map.map = &memory_map; + map.map_size = &map_size; + map.desc_size = &desc_size; + map.desc_ver = NULL; + map.key_ptr = NULL; + map.buff_size = &buff_size; + + status = efi_get_memory_map(&map); + if (status != EFI_SUCCESS) + return false; + + for (map_offset = 0; map_offset < map_size; map_offset += desc_size) { + efi_memory_desc_t *md = (void *)memory_map + map_offset; + u64 end = md->phys_addr + md->num_pages * EFI_PAGE_SIZE; + + /* + * Find the region that covers base, and return whether + * it covers base+size bytes. + */ + if (base >= md->phys_addr && base < end) { + ret = (base + size) <= end; + break; + } + } + + efi_bs_call(free_pool, memory_map); + + return ret; } efi_status_t handle_kernel_image(unsigned long *image_addr, @@ -56,6 +89,16 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, unsigned long kernel_size, kernel_memsize = 0; u32 phys_seed = 0; + /* + * Although relocatable kernels can fix up the misalignment with + * respect to MIN_KIMG_ALIGN, the resulting virtual text addresses are + * subtly out of sync with those recorded in the vmlinux when kaslr is + * disabled but the image required relocation anyway. Therefore retain + * 2M alignment if KASLR was explicitly disabled, even if it was not + * going to be activated to begin with. + */ + u64 min_kimg_align = efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN; + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { if (!efi_nokaslr) { status = efi_get_random_bytes(sizeof(phys_seed), @@ -76,6 +119,10 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, if (image->image_base != _text) efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n"); + if (!IS_ALIGNED((u64)_text, EFI_KIMG_ALIGN)) + efi_err("FIRMWARE BUG: kernel image not aligned on %ldk boundary\n", + EFI_KIMG_ALIGN >> 10); + kernel_size = _edata - _text; kernel_memsize = kernel_size + (_end - _edata); *reserve_size = kernel_memsize; @@ -85,14 +132,18 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, * If KASLR is enabled, and we have some randomness available, * locate the kernel at a randomized offset in physical memory. */ - status = efi_random_alloc(*reserve_size, min_kimg_align(), + status = efi_random_alloc(*reserve_size, min_kimg_align, reserve_addr, phys_seed); + if (status != EFI_SUCCESS) + efi_warn("efi_random_alloc() failed: 0x%lx\n", status); } else { status = EFI_OUT_OF_RESOURCES; } if (status != EFI_SUCCESS) { - if (IS_ALIGNED((u64)_text, min_kimg_align())) { + if (!check_image_region((u64)_text, kernel_memsize)) { + efi_err("FIRMWARE BUG: Image BSS overlaps adjacent EFI memory region\n"); + } else if (IS_ALIGNED((u64)_text, min_kimg_align)) { /* * Just execute from wherever we were loaded by the * UEFI PE/COFF loader if the alignment is suitable. @@ -103,7 +154,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, } status = efi_allocate_pages_aligned(*reserve_size, reserve_addr, - ULONG_MAX, min_kimg_align()); + ULONG_MAX, min_kimg_align); if (status != EFI_SUCCESS) { efi_err("Failed to relocate kernel\n"); diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index ae87dded989d..d489bdc645fe 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c @@ -7,7 +7,7 @@ * Copyright 2011 Intel Corporation; author Matt Fleming */ -#include <stdarg.h> +#include <linux/stdarg.h> #include <linux/ctype.h> #include <linux/efi.h> diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c index a408df474d83..724155b9e10d 100644 --- a/drivers/firmware/efi/libstub/randomalloc.c +++ b/drivers/firmware/efi/libstub/randomalloc.c @@ -30,6 +30,8 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md, region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1, (u64)ULONG_MAX); + if (region_end < size) + return 0; first_slot = round_up(md->phys_addr, align); last_slot = round_down(region_end - size + 1, align); diff --git a/drivers/firmware/efi/libstub/vsprintf.c b/drivers/firmware/efi/libstub/vsprintf.c index 1088e288c04d..71c71c222346 100644 --- a/drivers/firmware/efi/libstub/vsprintf.c +++ b/drivers/firmware/efi/libstub/vsprintf.c @@ -10,7 +10,7 @@ * Oh, it's a waste of space, but oh-so-yummy for debugging. */ -#include <stdarg.h> +#include <linux/stdarg.h> #include <linux/compiler.h> #include <linux/ctype.h> diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c index dc83ea118c67..c52bcaa9def6 100644 --- a/drivers/firmware/google/coreboot_table.c +++ b/drivers/firmware/google/coreboot_table.c @@ -44,15 +44,13 @@ static int coreboot_bus_probe(struct device *dev) return ret; } -static int coreboot_bus_remove(struct device *dev) +static void coreboot_bus_remove(struct device *dev) { struct coreboot_device *device = CB_DEV(dev); struct coreboot_driver *driver = CB_DRV(dev->driver); if (driver->remove) driver->remove(device); - - return 0; } static struct bus_type coreboot_bus_type = { diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c index 7127a04bca19..6e9788324fea 100644 --- a/drivers/firmware/iscsi_ibft.c +++ b/drivers/firmware/iscsi_ibft.c @@ -84,9 +84,7 @@ MODULE_DESCRIPTION("sysfs interface to BIOS iBFT information"); MODULE_LICENSE("GPL"); MODULE_VERSION(IBFT_ISCSI_VERSION); -#ifndef CONFIG_ISCSI_IBFT_FIND -struct acpi_table_ibft *ibft_addr; -#endif +static struct acpi_table_ibft *ibft_addr; struct ibft_hdr { u8 id; @@ -849,7 +847,21 @@ static void __init acpi_find_ibft_region(void) { } #endif - +#ifdef CONFIG_ISCSI_IBFT_FIND +static int __init acpi_find_isa_region(void) +{ + if (ibft_phys_addr) { + ibft_addr = isa_bus_to_virt(ibft_phys_addr); + return 0; + } + return -ENODEV; +} +#else +static int __init acpi_find_isa_region(void) +{ + return -ENODEV; +} +#endif /* * ibft_init() - creates sysfs tree entries for the iBFT data. */ @@ -858,11 +870,11 @@ static int __init ibft_init(void) int rc = 0; /* - As on UEFI systems the setup_arch()/find_ibft_region() + As on UEFI systems the setup_arch()/reserve_ibft_region() is called before ACPI tables are parsed and it only does legacy finding. */ - if (!ibft_addr) + if (acpi_find_isa_region()) acpi_find_ibft_region(); if (ibft_addr) { diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c index 64bb94523281..94b49ccd23ac 100644 --- a/drivers/firmware/iscsi_ibft_find.c +++ b/drivers/firmware/iscsi_ibft_find.c @@ -31,8 +31,8 @@ /* * Physical location of iSCSI Boot Format Table. */ -struct acpi_table_ibft *ibft_addr; -EXPORT_SYMBOL_GPL(ibft_addr); +phys_addr_t ibft_phys_addr; +EXPORT_SYMBOL_GPL(ibft_phys_addr); static const struct { char *sign; @@ -47,13 +47,24 @@ static const struct { #define VGA_MEM 0xA0000 /* VGA buffer */ #define VGA_SIZE 0x20000 /* 128kB */ -static int __init find_ibft_in_mem(void) +/* + * Routine used to find and reserve the iSCSI Boot Format Table + */ +void __init reserve_ibft_region(void) { unsigned long pos; unsigned int len = 0; void *virt; int i; + ibft_phys_addr = 0; + + /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will + * only use ACPI for this + */ + if (efi_enabled(EFI_BOOT)) + return; + for (pos = IBFT_START; pos < IBFT_END; pos += 16) { /* The table can't be inside the VGA BIOS reserved space, * so skip that area */ @@ -70,35 +81,12 @@ static int __init find_ibft_in_mem(void) /* if the length of the table extends past 1M, * the table cannot be valid. */ if (pos + len <= (IBFT_END-1)) { - ibft_addr = (struct acpi_table_ibft *)virt; - pr_info("iBFT found at 0x%lx.\n", pos); - goto done; + ibft_phys_addr = pos; + memblock_reserve(ibft_phys_addr, PAGE_ALIGN(len)); + pr_info("iBFT found at %pa.\n", &ibft_phys_addr); + return; } } } } -done: - return len; -} -/* - * Routine used to find the iSCSI Boot Format Table. The logical - * kernel address is set in the ibft_addr global variable. - */ -unsigned long __init find_ibft_region(unsigned long *sizep) -{ - ibft_addr = NULL; - - /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will - * only use ACPI for this */ - - if (!efi_enabled(EFI_BOOT)) - find_ibft_in_mem(); - - if (ibft_addr) { - *sizep = PAGE_ALIGN(ibft_addr->header.length); - return (u64)virt_to_phys(ibft_addr); - } - - *sizep = 0; - return 0; } diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 47ea2bd42b10..2ee97bab7440 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -71,7 +71,7 @@ static struct qcom_scm_wb_entry qcom_scm_wb[] = { { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 }, }; -static const char *qcom_scm_convention_names[] = { +static const char * const qcom_scm_convention_names[] = { [SMC_CONVENTION_UNKNOWN] = "unknown", [SMC_CONVENTION_ARM_32] = "smc arm 32", [SMC_CONVENTION_ARM_64] = "smc arm 64", @@ -331,7 +331,7 @@ int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) .owner = ARM_SMCCC_OWNER_SIP, }; - if (!cpus || (cpus && cpumask_empty(cpus))) + if (!cpus || cpumask_empty(cpus)) return -EINVAL; for_each_cpu(cpu, cpus) { @@ -1147,6 +1147,64 @@ int qcom_scm_qsmmu500_wait_safe_toggle(bool en) } EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle); +bool qcom_scm_lmh_dcvsh_available(void) +{ + return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH); +} +EXPORT_SYMBOL(qcom_scm_lmh_dcvsh_available); + +int qcom_scm_lmh_profile_change(u32 profile_id) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_LMH, + .cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE, + .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL), + .args[0] = profile_id, + .owner = ARM_SMCCC_OWNER_SIP, + }; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL(qcom_scm_lmh_profile_change); + +int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, + u64 limit_node, u32 node_id, u64 version) +{ + dma_addr_t payload_phys; + u32 *payload_buf; + int ret, payload_size = 5 * sizeof(u32); + + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_LMH, + .cmd = QCOM_SCM_LMH_LIMIT_DCVSH, + .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL, + QCOM_SCM_VAL, QCOM_SCM_VAL), + .args[1] = payload_size, + .args[2] = limit_node, + .args[3] = node_id, + .args[4] = version, + .owner = ARM_SMCCC_OWNER_SIP, + }; + + payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL); + if (!payload_buf) + return -ENOMEM; + + payload_buf[0] = payload_fn; + payload_buf[1] = 0; + payload_buf[2] = payload_reg; + payload_buf[3] = 1; + payload_buf[4] = payload_val; + + desc.args[0] = payload_phys; + + ret = qcom_scm_call(__scm->dev, &desc, NULL); + + dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys); + return ret; +} +EXPORT_SYMBOL(qcom_scm_lmh_dcvsh); + static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) { struct device_node *tcsr; @@ -1299,6 +1357,7 @@ static const struct of_device_id qcom_scm_dt_match[] = { { .compatible = "qcom,scm" }, {} }; +MODULE_DEVICE_TABLE(of, qcom_scm_dt_match); static struct platform_driver qcom_scm_driver = { .driver = { @@ -1315,3 +1374,6 @@ static int __init qcom_scm_init(void) return platform_driver_register(&qcom_scm_driver); } subsys_initcall(qcom_scm_init); + +MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h index 632fe3142462..d92156ceb3ac 100644 --- a/drivers/firmware/qcom_scm.h +++ b/drivers/firmware/qcom_scm.h @@ -114,6 +114,10 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, #define QCOM_SCM_SVC_HDCP 0x11 #define QCOM_SCM_HDCP_INVOKE 0x01 +#define QCOM_SCM_SVC_LMH 0x13 +#define QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE 0x01 +#define QCOM_SCM_LMH_LIMIT_DCVSH 0x10 + #define QCOM_SCM_SVC_SMMU_PROGRAM 0x15 #define QCOM_SCM_SMMU_CONFIG_ERRATA1 0x03 #define QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL 0x02 diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c index 250e01680742..4b8978b254f9 100644 --- a/drivers/firmware/raspberrypi.c +++ b/drivers/firmware/raspberrypi.c @@ -329,12 +329,18 @@ struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node) fw = platform_get_drvdata(pdev); if (!fw) - return NULL; + goto err_put_device; if (!kref_get_unless_zero(&fw->consumers)) - return NULL; + goto err_put_device; + + put_device(&pdev->dev); return fw; + +err_put_device: + put_device(&pdev->dev); + return NULL; } EXPORT_SYMBOL_GPL(rpi_firmware_get); diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c index 9f937b125ab0..60ccf3e90d7d 100644 --- a/drivers/firmware/smccc/smccc.c +++ b/drivers/firmware/smccc/smccc.c @@ -9,6 +9,7 @@ #include <linux/init.h> #include <linux/arm-smccc.h> #include <linux/kernel.h> +#include <linux/platform_device.h> #include <asm/archrandom.h> static u32 smccc_version = ARM_SMCCC_VERSION_1_0; @@ -42,3 +43,19 @@ u32 arm_smccc_get_version(void) return smccc_version; } EXPORT_SYMBOL_GPL(arm_smccc_get_version); + +static int __init smccc_devices_init(void) +{ + struct platform_device *pdev; + + if (smccc_trng_available) { + pdev = platform_device_register_simple("smccc_trng", -1, + NULL, 0); + if (IS_ERR(pdev)) + pr_err("smccc_trng: could not register device: %ld\n", + PTR_ERR(pdev)); + } + + return 0; +} +device_initcall(smccc_devices_init); diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c index 440d99c63638..3e9fa4b54358 100644 --- a/drivers/firmware/tegra/bpmp-debugfs.c +++ b/drivers/firmware/tegra/bpmp-debugfs.c @@ -296,25 +296,61 @@ static int bpmp_debug_show(struct seq_file *m, void *p) struct file *file = m->private; struct inode *inode = file_inode(file); struct tegra_bpmp *bpmp = inode->i_private; - char *databuf = NULL; char fnamebuf[256]; const char *filename; - uint32_t nbytes = 0; - size_t len; - int err; - - len = seq_get_buf(m, &databuf); - if (!databuf) - return -ENOMEM; + struct mrq_debug_request req = { + .cmd = cpu_to_le32(CMD_DEBUG_READ), + }; + struct mrq_debug_response resp; + struct tegra_bpmp_message msg = { + .mrq = MRQ_DEBUG, + .tx = { + .data = &req, + .size = sizeof(req), + }, + .rx = { + .data = &resp, + .size = sizeof(resp), + }, + }; + uint32_t fd = 0, len = 0; + int remaining, err; filename = get_filename(bpmp, file, fnamebuf, sizeof(fnamebuf)); if (!filename) return -ENOENT; - err = mrq_debug_read(bpmp, filename, databuf, len, &nbytes); - if (!err) - seq_commit(m, nbytes); + mutex_lock(&bpmp_debug_lock); + err = mrq_debug_open(bpmp, filename, &fd, &len, 0); + if (err) + goto out; + + req.frd.fd = fd; + remaining = len; + + while (remaining > 0) { + err = tegra_bpmp_transfer(bpmp, &msg); + if (err < 0) { + goto close; + } else if (msg.rx.ret < 0) { + err = -EINVAL; + goto close; + } + if (resp.frd.readlen > remaining) { + pr_err("%s: read data length invalid\n", __func__); + err = -EINVAL; + goto close; + } + + seq_write(m, resp.frd.data, resp.frd.readlen); + remaining -= resp.frd.readlen; + } + +close: + err = mrq_debug_close(bpmp, fd); +out: + mutex_unlock(&bpmp_debug_lock); return err; } diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index 15b138326ecc..a3cadbaf3cba 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -664,7 +664,7 @@ int zynqmp_pm_write_ggs(u32 index, u32 value) EXPORT_SYMBOL_GPL(zynqmp_pm_write_ggs); /** - * zynqmp_pm_write_ggs() - PM API for reading global general storage (ggs) + * zynqmp_pm_read_ggs() - PM API for reading global general storage (ggs) * @index: GGS register index * @value: Register value to be written * @@ -697,7 +697,7 @@ int zynqmp_pm_write_pggs(u32 index, u32 value) EXPORT_SYMBOL_GPL(zynqmp_pm_write_pggs); /** - * zynqmp_pm_write_pggs() - PM API for reading persistent global general + * zynqmp_pm_read_pggs() - PM API for reading persistent global general * storage (pggs) * @index: PGGS register index * @value: Register value to be written @@ -1012,7 +1012,24 @@ int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities, EXPORT_SYMBOL_GPL(zynqmp_pm_set_requirement); /** - * zynqmp_pm_aes - Access AES hardware to encrypt/decrypt the data using + * zynqmp_pm_load_pdi - Load and process PDI + * @src: Source device where PDI is located + * @address: PDI src address + * + * This function provides support to load PDI from linux + * + * Return: Returns status, either success or error+reason + */ +int zynqmp_pm_load_pdi(const u32 src, const u64 address) +{ + return zynqmp_pm_invoke_fn(PM_LOAD_PDI, src, + lower_32_bits(address), + upper_32_bits(address), 0, NULL); +} +EXPORT_SYMBOL_GPL(zynqmp_pm_load_pdi); + +/** + * zynqmp_pm_aes_engine - Access AES hardware to encrypt/decrypt the data using * AES-GCM core. * @address: Address of the AesParams structure. * @out: Returned output value |