aboutsummaryrefslogtreecommitdiff
path: root/drivers/firmware
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/firmware')
-rw-r--r--drivers/firmware/arm_ffa/bus.c15
-rw-r--r--drivers/firmware/arm_ffa/driver.c7
-rw-r--r--drivers/firmware/arm_scmi/common.h45
-rw-r--r--drivers/firmware/arm_scmi/driver.c44
-rw-r--r--drivers/firmware/arm_scmi/perf.c44
-rw-r--r--drivers/firmware/arm_scmi/shmem.c85
-rw-r--r--drivers/firmware/arm_scmi/transports/mailbox.c15
-rw-r--r--drivers/firmware/arm_scmi/transports/optee.c19
-rw-r--r--drivers/firmware/arm_scmi/transports/smc.c13
-rw-r--r--drivers/firmware/arm_scmi/transports/virtio.c15
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/Kconfig1
-rw-r--r--drivers/firmware/arm_scpi.c5
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c68
-rw-r--r--drivers/firmware/efi/Kconfig14
-rw-r--r--drivers/firmware/efi/efi-pstore.c2
-rw-r--r--drivers/firmware/efi/efi.c41
-rw-r--r--drivers/firmware/efi/embedded-firmware.c4
-rw-r--r--drivers/firmware/efi/esrt.c2
-rw-r--r--drivers/firmware/efi/libstub/Makefile.zboot18
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c12
-rw-r--r--drivers/firmware/efi/libstub/efi-stub.c26
-rw-r--r--drivers/firmware/efi/libstub/efistub.h2
-rw-r--r--drivers/firmware/efi/libstub/file.c22
-rw-r--r--drivers/firmware/efi/libstub/tpm.c9
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c3
-rw-r--r--drivers/firmware/efi/memattr.c18
-rw-r--r--drivers/firmware/efi/tpm.c26
-rw-r--r--drivers/firmware/efi/vars.c16
-rw-r--r--drivers/firmware/google/coreboot_table.c2
-rw-r--r--drivers/firmware/google/framebuffer-coreboot.c14
-rw-r--r--drivers/firmware/google/gsmi.c6
-rw-r--r--drivers/firmware/imx/Kconfig1
-rw-r--r--drivers/firmware/imx/imx-dsp.c2
-rw-r--r--drivers/firmware/memmap.c2
-rw-r--r--drivers/firmware/microchip/mpfs-auto-update.c6
-rw-r--r--drivers/firmware/mtk-adsp-ipc.c9
-rw-r--r--drivers/firmware/psci/psci.c45
-rw-r--r--drivers/firmware/qcom/qcom_scm.c30
-rw-r--r--drivers/firmware/qcom/qcom_scm.h1
-rw-r--r--drivers/firmware/qemu_fw_cfg.c2
-rw-r--r--drivers/firmware/raspberrypi.c2
-rw-r--r--drivers/firmware/stratix10-rsu.c2
-rw-r--r--drivers/firmware/stratix10-svc.c2
-rw-r--r--drivers/firmware/sysfb.c19
-rw-r--r--drivers/firmware/tegra/bpmp.c14
-rw-r--r--drivers/firmware/ti_sci.c489
-rw-r--r--drivers/firmware/ti_sci.h143
-rw-r--r--drivers/firmware/turris-mox-rwtm.c23
-rw-r--r--drivers/firmware/xilinx/zynqmp-debug.c162
-rw-r--r--drivers/firmware/xilinx/zynqmp.c155
50 files changed, 1485 insertions, 237 deletions
diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
index eb17d03b66fe..dfda5ffc14db 100644
--- a/drivers/firmware/arm_ffa/bus.c
+++ b/drivers/firmware/arm_ffa/bus.c
@@ -187,13 +187,18 @@ bool ffa_device_is_valid(struct ffa_device *ffa_dev)
return valid;
}
-struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
- const struct ffa_ops *ops)
+struct ffa_device *
+ffa_device_register(const struct ffa_partition_info *part_info,
+ const struct ffa_ops *ops)
{
int id, ret;
+ uuid_t uuid;
struct device *dev;
struct ffa_device *ffa_dev;
+ if (!part_info)
+ return NULL;
+
id = ida_alloc_min(&ffa_bus_id, 1, GFP_KERNEL);
if (id < 0)
return NULL;
@@ -210,9 +215,11 @@ struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id);
ffa_dev->id = id;
- ffa_dev->vm_id = vm_id;
+ ffa_dev->vm_id = part_info->id;
+ ffa_dev->properties = part_info->properties;
ffa_dev->ops = ops;
- uuid_copy(&ffa_dev->uuid, uuid);
+ import_uuid(&uuid, (u8 *)part_info->uuid);
+ uuid_copy(&ffa_dev->uuid, &uuid);
ret = device_register(&ffa_dev->dev);
if (ret) {
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index b14cbdae94e8..2c2ec3c35f15 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -1387,7 +1387,6 @@ static struct notifier_block ffa_bus_nb = {
static int ffa_setup_partitions(void)
{
int count, idx, ret;
- uuid_t uuid;
struct ffa_device *ffa_dev;
struct ffa_dev_part_info *info;
struct ffa_partition_info *pbuf, *tpbuf;
@@ -1406,23 +1405,19 @@ static int ffa_setup_partitions(void)
xa_init(&drv_info->partition_info);
for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) {
- import_uuid(&uuid, (u8 *)tpbuf->uuid);
-
/* Note that if the UUID will be uuid_null, that will require
* ffa_bus_notifier() to find the UUID of this partition id
* with help of ffa_device_match_uuid(). FF-A v1.1 and above
* provides UUID here for each partition as part of the
* discovery API and the same is passed.
*/
- ffa_dev = ffa_device_register(&uuid, tpbuf->id, &ffa_drv_ops);
+ ffa_dev = ffa_device_register(tpbuf, &ffa_drv_ops);
if (!ffa_dev) {
pr_err("%s: failed to register partition ID 0x%x\n",
__func__, tpbuf->id);
continue;
}
- ffa_dev->properties = tpbuf->properties;
-
if (drv_info->version > FFA_VERSION_1_0 &&
!(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
ffa_mode_32bit_set(ffa_dev);
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index cdec50a698a1..48b12f81141d 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -31,6 +31,8 @@
#define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
+#define SCMI_SHMEM_MAX_PAYLOAD_SIZE 104
+
enum scmi_error_codes {
SCMI_SUCCESS = 0, /* Success */
SCMI_ERR_SUPPORT = -1, /* Not supported */
@@ -165,6 +167,7 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
* channel
* @is_p2a: A flag to identify a channel as P2A (RX)
* @rx_timeout_ms: The configured RX timeout in milliseconds.
+ * @max_msg_size: Maximum size of message payload.
* @handle: Pointer to SCMI entity handle
* @no_completion_irq: Flag to indicate that this channel has no completion
* interrupt mechanism for synchronous commands.
@@ -177,6 +180,7 @@ struct scmi_chan_info {
struct device *dev;
bool is_p2a;
unsigned int rx_timeout_ms;
+ unsigned int max_msg_size;
struct scmi_handle *handle;
bool no_completion_irq;
void *transport_info;
@@ -224,7 +228,13 @@ struct scmi_transport_ops {
* @max_msg: Maximum number of messages for a channel type (tx or rx) that can
* be pending simultaneously in the system. May be overridden by the
* get_max_msg op.
- * @max_msg_size: Maximum size of data per message that can be handled.
+ * @max_msg_size: Maximum size of data payload per message that can be handled.
+ * @atomic_threshold: Optional system wide DT-configured threshold, expressed
+ * in microseconds, for atomic operations.
+ * Only SCMI synchronous commands reported by the platform
+ * to have an execution latency lesser-equal to the threshold
+ * should be considered for atomic mode operation: such
+ * decision is finally left up to the SCMI drivers.
* @force_polling: Flag to force this whole transport to use SCMI core polling
* mechanism instead of completion interrupts even if available.
* @sync_cmds_completed_on_ret: Flag to indicate that the transport assures
@@ -243,6 +253,7 @@ struct scmi_desc {
int max_rx_timeout_ms;
int max_msg;
int max_msg_size;
+ unsigned int atomic_threshold;
const bool force_polling;
const bool sync_cmds_completed_on_ret;
const bool atomic_enabled;
@@ -311,6 +322,26 @@ enum scmi_bad_msg {
MSG_MBOX_SPURIOUS = -5,
};
+/* Used for compactness and signature validation of the function pointers being
+ * passed.
+ */
+typedef void (*shmem_copy_toio_t)(void __iomem *to, const void *from,
+ size_t count);
+typedef void (*shmem_copy_fromio_t)(void *to, const void __iomem *from,
+ size_t count);
+
+/**
+ * struct scmi_shmem_io_ops - I/O operations to read from/write to
+ * Shared Memory
+ *
+ * @toio: Copy data to the shared memory area
+ * @fromio: Copy data from the shared memory area
+ */
+struct scmi_shmem_io_ops {
+ shmem_copy_fromio_t fromio;
+ shmem_copy_toio_t toio;
+};
+
/* shmem related declarations */
struct scmi_shared_mem;
@@ -331,13 +362,16 @@ struct scmi_shared_mem;
struct scmi_shared_mem_operations {
void (*tx_prepare)(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer,
- struct scmi_chan_info *cinfo);
+ struct scmi_chan_info *cinfo,
+ shmem_copy_toio_t toio);
u32 (*read_header)(struct scmi_shared_mem __iomem *shmem);
void (*fetch_response)(struct scmi_shared_mem __iomem *shmem,
- struct scmi_xfer *xfer);
+ struct scmi_xfer *xfer,
+ shmem_copy_fromio_t fromio);
void (*fetch_notification)(struct scmi_shared_mem __iomem *shmem,
- size_t max_len, struct scmi_xfer *xfer);
+ size_t max_len, struct scmi_xfer *xfer,
+ shmem_copy_fromio_t fromio);
void (*clear_channel)(struct scmi_shared_mem __iomem *shmem);
bool (*poll_done)(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer);
@@ -345,7 +379,8 @@ struct scmi_shared_mem_operations {
bool (*channel_intr_enabled)(struct scmi_shared_mem __iomem *shmem);
void __iomem *(*setup_iomap)(struct scmi_chan_info *cinfo,
struct device *dev,
- bool tx, struct resource *res);
+ bool tx, struct resource *res,
+ struct scmi_shmem_io_ops **ops);
};
const struct scmi_shared_mem_operations *scmi_shared_mem_operations_get(void);
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index f8934d049d68..1b5fb2c4ce86 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -149,12 +149,6 @@ struct scmi_debug_info {
* base protocol
* @active_protocols: IDR storing device_nodes for protocols actually defined
* in the DT and confirmed as implemented by fw.
- * @atomic_threshold: Optional system wide DT-configured threshold, expressed
- * in microseconds, for atomic operations.
- * Only SCMI synchronous commands reported by the platform
- * to have an execution latency lesser-equal to the threshold
- * should be considered for atomic mode operation: such
- * decision is finally left up to the SCMI drivers.
* @notify_priv: Pointer to private data structure specific to notifications.
* @node: List head
* @users: Number of users of this instance
@@ -180,7 +174,6 @@ struct scmi_info {
struct mutex protocols_mtx;
u8 *protocols_imp;
struct idr active_protocols;
- unsigned int atomic_threshold;
void *notify_priv;
struct list_head node;
int users;
@@ -2445,7 +2438,7 @@ static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
ret = info->desc->atomic_enabled &&
is_transport_polling_capable(info->desc);
if (ret && atomic_threshold)
- *atomic_threshold = info->atomic_threshold;
+ *atomic_threshold = info->desc->atomic_threshold;
return ret;
}
@@ -2645,6 +2638,7 @@ static int scmi_chan_setup(struct scmi_info *info, struct device_node *of_node,
cinfo->is_p2a = !tx;
cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms;
+ cinfo->max_msg_size = info->desc->max_msg_size;
/* Create a unique name for this transport device */
snprintf(name, 32, "__scmi_transport_device_%s_%02X",
@@ -2958,7 +2952,7 @@ static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info)
(char **)&dbg->name);
debugfs_create_u32("atomic_threshold_us", 0400, top_dentry,
- &info->atomic_threshold);
+ (u32 *)&info->desc->atomic_threshold);
debugfs_create_str("type", 0400, trans, (char **)&dbg->type);
@@ -3053,8 +3047,27 @@ static const struct scmi_desc *scmi_transport_setup(struct device *dev)
if (ret && ret != -EINVAL)
dev_err(dev, "Malformed arm,max-rx-timeout-ms DT property.\n");
- dev_info(dev, "SCMI max-rx-timeout: %dms\n",
- trans->desc->max_rx_timeout_ms);
+ ret = of_property_read_u32(dev->of_node, "arm,max-msg-size",
+ &trans->desc->max_msg_size);
+ if (ret && ret != -EINVAL)
+ dev_err(dev, "Malformed arm,max-msg-size DT property.\n");
+
+ ret = of_property_read_u32(dev->of_node, "arm,max-msg",
+ &trans->desc->max_msg);
+ if (ret && ret != -EINVAL)
+ dev_err(dev, "Malformed arm,max-msg DT property.\n");
+
+ dev_info(dev,
+ "SCMI max-rx-timeout: %dms / max-msg-size: %dbytes / max-msg: %d\n",
+ trans->desc->max_rx_timeout_ms, trans->desc->max_msg_size,
+ trans->desc->max_msg);
+
+ /* System wide atomic threshold for atomic ops .. if any */
+ if (!of_property_read_u32(dev->of_node, "atomic-threshold-us",
+ &trans->desc->atomic_threshold))
+ dev_info(dev,
+ "SCMI System wide atomic threshold set to %u us\n",
+ trans->desc->atomic_threshold);
return trans->desc;
}
@@ -3105,13 +3118,6 @@ static int scmi_probe(struct platform_device *pdev)
handle->devm_protocol_acquire = scmi_devm_protocol_acquire;
handle->devm_protocol_get = scmi_devm_protocol_get;
handle->devm_protocol_put = scmi_devm_protocol_put;
-
- /* System wide atomic threshold for atomic ops .. if any */
- if (!of_property_read_u32(np, "atomic-threshold-us",
- &info->atomic_threshold))
- dev_info(dev,
- "SCMI System wide atomic threshold set to %d us\n",
- info->atomic_threshold);
handle->is_transport_atomic = scmi_is_transport_atomic;
/* Setup all channels described in the DT at first */
@@ -3327,7 +3333,7 @@ static struct platform_driver scmi_driver = {
.dev_groups = versions_groups,
},
.probe = scmi_probe,
- .remove_new = scmi_remove,
+ .remove = scmi_remove,
};
static struct dentry *scmi_debugfs_init(void)
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 2d77b5f40ca7..c7e5a34b254b 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -373,7 +373,7 @@ static int iter_perf_levels_update_state(struct scmi_iterator_state *st,
return 0;
}
-static inline void
+static inline int
process_response_opp(struct device *dev, struct perf_dom_info *dom,
struct scmi_opp *opp, unsigned int loop_idx,
const struct scmi_msg_resp_perf_describe_levels *r)
@@ -386,12 +386,16 @@ process_response_opp(struct device *dev, struct perf_dom_info *dom,
le16_to_cpu(r->opp[loop_idx].transition_latency_us);
ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
- if (ret)
- dev_warn(dev, "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
+ if (ret) {
+ dev_info(dev, FW_BUG "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
opp->perf, dom->info.name, ret);
+ return ret;
+ }
+
+ return 0;
}
-static inline void
+static inline int
process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
struct scmi_opp *opp, unsigned int loop_idx,
const struct scmi_msg_resp_perf_describe_levels_v4 *r)
@@ -404,9 +408,11 @@ process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
le16_to_cpu(r->opp[loop_idx].transition_latency_us);
ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
- if (ret)
- dev_warn(dev, "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
+ if (ret) {
+ dev_info(dev, FW_BUG "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
opp->perf, dom->info.name, ret);
+ return ret;
+ }
/* Note that PERF v4 reports always five 32-bit words */
opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq);
@@ -415,13 +421,21 @@ process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
ret = xa_insert(&dom->opps_by_idx, opp->level_index, opp,
GFP_KERNEL);
- if (ret)
+ if (ret) {
dev_warn(dev,
"Failed to add opps_by_idx at %d for %s - ret:%d\n",
opp->level_index, dom->info.name, ret);
+ /* Cleanup by_lvl too */
+ xa_erase(&dom->opps_by_lvl, opp->perf);
+
+ return ret;
+ }
+
hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq);
}
+
+ return 0;
}
static int
@@ -429,16 +443,22 @@ iter_perf_levels_process_response(const struct scmi_protocol_handle *ph,
const void *response,
struct scmi_iterator_state *st, void *priv)
{
+ int ret;
struct scmi_opp *opp;
struct scmi_perf_ipriv *p = priv;
- opp = &p->perf_dom->opp[st->desc_index + st->loop_idx];
+ opp = &p->perf_dom->opp[p->perf_dom->opp_count];
if (PROTOCOL_REV_MAJOR(p->version) <= 0x3)
- process_response_opp(ph->dev, p->perf_dom, opp, st->loop_idx,
- response);
+ ret = process_response_opp(ph->dev, p->perf_dom, opp,
+ st->loop_idx, response);
else
- process_response_opp_v4(ph->dev, p->perf_dom, opp, st->loop_idx,
- response);
+ ret = process_response_opp_v4(ph->dev, p->perf_dom, opp,
+ st->loop_idx, response);
+
+ /* Skip BAD duplicates received from firmware */
+ if (ret)
+ return ret == -EBUSY ? 0 : ret;
+
p->perf_dom->opp_count++;
dev_dbg(ph->dev, "Level %d Power %d Latency %dus Ifreq %d Index %d\n",
diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c
index 01d8a9398fe8..11c347bff766 100644
--- a/drivers/firmware/arm_scmi/shmem.c
+++ b/drivers/firmware/arm_scmi/shmem.c
@@ -16,6 +16,8 @@
#include "common.h"
+#define SCMI_SHMEM_LAYOUT_OVERHEAD 24
+
/*
* SCMI specification requires all parameters, message headers, return
* arguments or any protocol data to be expressed in little endian
@@ -34,9 +36,59 @@ struct scmi_shared_mem {
u8 msg_payload[];
};
+static inline void shmem_memcpy_fromio32(void *to,
+ const void __iomem *from,
+ size_t count)
+{
+ WARN_ON(!IS_ALIGNED((unsigned long)from, 4) ||
+ !IS_ALIGNED((unsigned long)to, 4) ||
+ count % 4);
+
+ __ioread32_copy(to, from, count / 4);
+}
+
+static inline void shmem_memcpy_toio32(void __iomem *to,
+ const void *from,
+ size_t count)
+{
+ WARN_ON(!IS_ALIGNED((unsigned long)to, 4) ||
+ !IS_ALIGNED((unsigned long)from, 4) ||
+ count % 4);
+
+ __iowrite32_copy(to, from, count / 4);
+}
+
+static struct scmi_shmem_io_ops shmem_io_ops32 = {
+ .fromio = shmem_memcpy_fromio32,
+ .toio = shmem_memcpy_toio32,
+};
+
+/* Wrappers are needed for proper memcpy_{from,to}_io expansion by the
+ * pre-processor.
+ */
+static inline void shmem_memcpy_fromio(void *to,
+ const void __iomem *from,
+ size_t count)
+{
+ memcpy_fromio(to, from, count);
+}
+
+static inline void shmem_memcpy_toio(void __iomem *to,
+ const void *from,
+ size_t count)
+{
+ memcpy_toio(to, from, count);
+}
+
+static struct scmi_shmem_io_ops shmem_io_ops_default = {
+ .fromio = shmem_memcpy_fromio,
+ .toio = shmem_memcpy_toio,
+};
+
static void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer,
- struct scmi_chan_info *cinfo)
+ struct scmi_chan_info *cinfo,
+ shmem_copy_toio_t copy_toio)
{
ktime_t stop;
@@ -73,7 +125,7 @@ static void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
iowrite32(sizeof(shmem->msg_header) + xfer->tx.len, &shmem->length);
iowrite32(pack_scmi_header(&xfer->hdr), &shmem->msg_header);
if (xfer->tx.buf)
- memcpy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len);
+ copy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len);
}
static u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
@@ -82,7 +134,8 @@ static u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
}
static void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
- struct scmi_xfer *xfer)
+ struct scmi_xfer *xfer,
+ shmem_copy_fromio_t copy_fromio)
{
size_t len = ioread32(&shmem->length);
@@ -91,11 +144,12 @@ static void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
xfer->rx.len = min_t(size_t, xfer->rx.len, len > 8 ? len - 8 : 0);
/* Take a copy to the rx buffer.. */
- memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
+ copy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
}
static void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
- size_t max_len, struct scmi_xfer *xfer)
+ size_t max_len, struct scmi_xfer *xfer,
+ shmem_copy_fromio_t copy_fromio)
{
size_t len = ioread32(&shmem->length);
@@ -103,7 +157,7 @@ static void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
xfer->rx.len = min_t(size_t, max_len, len > 4 ? len - 4 : 0);
/* Take a copy to the rx buffer.. */
- memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
+ copy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
}
static void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem)
@@ -139,7 +193,8 @@ static bool shmem_channel_intr_enabled(struct scmi_shared_mem __iomem *shmem)
static void __iomem *shmem_setup_iomap(struct scmi_chan_info *cinfo,
struct device *dev, bool tx,
- struct resource *res)
+ struct resource *res,
+ struct scmi_shmem_io_ops **ops)
{
struct device_node *shmem __free(device_node);
const char *desc = tx ? "Tx" : "Rx";
@@ -148,6 +203,7 @@ static void __iomem *shmem_setup_iomap(struct scmi_chan_info *cinfo,
struct resource lres = {};
resource_size_t size;
void __iomem *addr;
+ u32 reg_io_width;
shmem = of_parse_phandle(cdev->of_node, "shmem", idx);
if (!shmem)
@@ -167,12 +223,27 @@ static void __iomem *shmem_setup_iomap(struct scmi_chan_info *cinfo,
}
size = resource_size(res);
+ if (cinfo->max_msg_size + SCMI_SHMEM_LAYOUT_OVERHEAD > size) {
+ dev_err(dev, "misconfigured SCMI shared memory\n");
+ return IOMEM_ERR_PTR(-ENOSPC);
+ }
+
addr = devm_ioremap(dev, res->start, size);
if (!addr) {
dev_err(dev, "failed to ioremap SCMI %s shared memory\n", desc);
return IOMEM_ERR_PTR(-EADDRNOTAVAIL);
}
+ of_property_read_u32(shmem, "reg-io-width", &reg_io_width);
+ switch (reg_io_width) {
+ case 4:
+ *ops = &shmem_io_ops32;
+ break;
+ default:
+ *ops = &shmem_io_ops_default;
+ break;
+ }
+
return addr;
}
diff --git a/drivers/firmware/arm_scmi/transports/mailbox.c b/drivers/firmware/arm_scmi/transports/mailbox.c
index e3d5f7560990..b66df2981456 100644
--- a/drivers/firmware/arm_scmi/transports/mailbox.c
+++ b/drivers/firmware/arm_scmi/transports/mailbox.c
@@ -26,6 +26,7 @@
* @cinfo: SCMI channel info
* @shmem: Transmit/Receive shared memory area
* @chan_lock: Lock that prevents multiple xfers from being queued
+ * @io_ops: Transport specific I/O operations
*/
struct scmi_mailbox {
struct mbox_client cl;
@@ -35,6 +36,7 @@ struct scmi_mailbox {
struct scmi_chan_info *cinfo;
struct scmi_shared_mem __iomem *shmem;
struct mutex chan_lock;
+ struct scmi_shmem_io_ops *io_ops;
};
#define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl)
@@ -45,7 +47,8 @@ static void tx_prepare(struct mbox_client *cl, void *m)
{
struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
- core->shmem->tx_prepare(smbox->shmem, m, smbox->cinfo);
+ core->shmem->tx_prepare(smbox->shmem, m, smbox->cinfo,
+ smbox->io_ops->toio);
}
static void rx_callback(struct mbox_client *cl, void *m)
@@ -197,7 +200,8 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
if (!smbox)
return -ENOMEM;
- smbox->shmem = core->shmem->setup_iomap(cinfo, dev, tx, NULL);
+ smbox->shmem = core->shmem->setup_iomap(cinfo, dev, tx, NULL,
+ &smbox->io_ops);
if (IS_ERR(smbox->shmem))
return PTR_ERR(smbox->shmem);
@@ -305,7 +309,7 @@ static void mailbox_fetch_response(struct scmi_chan_info *cinfo,
{
struct scmi_mailbox *smbox = cinfo->transport_info;
- core->shmem->fetch_response(smbox->shmem, xfer);
+ core->shmem->fetch_response(smbox->shmem, xfer, smbox->io_ops->fromio);
}
static void mailbox_fetch_notification(struct scmi_chan_info *cinfo,
@@ -313,7 +317,8 @@ static void mailbox_fetch_notification(struct scmi_chan_info *cinfo,
{
struct scmi_mailbox *smbox = cinfo->transport_info;
- core->shmem->fetch_notification(smbox->shmem, max_len, xfer);
+ core->shmem->fetch_notification(smbox->shmem, max_len, xfer,
+ smbox->io_ops->fromio);
}
static void mailbox_clear_channel(struct scmi_chan_info *cinfo)
@@ -366,7 +371,7 @@ static struct scmi_desc scmi_mailbox_desc = {
.ops = &scmi_mailbox_ops,
.max_rx_timeout_ms = 30, /* We may increase this if required */
.max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
- .max_msg_size = 128,
+ .max_msg_size = SCMI_SHMEM_MAX_PAYLOAD_SIZE,
};
static const struct of_device_id scmi_of_match[] = {
diff --git a/drivers/firmware/arm_scmi/transports/optee.c b/drivers/firmware/arm_scmi/transports/optee.c
index 56fc63edf51e..3949a877e17d 100644
--- a/drivers/firmware/arm_scmi/transports/optee.c
+++ b/drivers/firmware/arm_scmi/transports/optee.c
@@ -17,8 +17,6 @@
#include "../common.h"
-#define SCMI_OPTEE_MAX_MSG_SIZE 128
-
enum scmi_optee_pta_cmd {
/*
* PTA_SCMI_CMD_CAPABILITIES - Get channel capabilities
@@ -114,6 +112,7 @@ enum scmi_optee_pta_cmd {
* @req.shmem: Virtual base address of the shared memory
* @req.msg: Shared memory protocol handle for SCMI request and
* synchronous response
+ * @io_ops: Transport specific I/O operations
* @tee_shm: TEE shared memory handle @req or NULL if using IOMEM shmem
* @link: Reference in agent's channel list
*/
@@ -128,6 +127,7 @@ struct scmi_optee_channel {
struct scmi_shared_mem __iomem *shmem;
struct scmi_msg_payld *msg;
} req;
+ struct scmi_shmem_io_ops *io_ops;
struct tee_shm *tee_shm;
struct list_head link;
};
@@ -297,7 +297,7 @@ static int invoke_process_msg_channel(struct scmi_optee_channel *channel, size_t
param[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
param[2].u.memref.shm = channel->tee_shm;
- param[2].u.memref.size = SCMI_OPTEE_MAX_MSG_SIZE;
+ param[2].u.memref.size = SCMI_SHMEM_MAX_PAYLOAD_SIZE;
ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param);
if (ret < 0 || arg.ret) {
@@ -330,7 +330,7 @@ static void scmi_optee_clear_channel(struct scmi_chan_info *cinfo)
static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *channel)
{
- const size_t msg_size = SCMI_OPTEE_MAX_MSG_SIZE;
+ const size_t msg_size = SCMI_SHMEM_MAX_PAYLOAD_SIZE;
void *shbuf;
channel->tee_shm = tee_shm_alloc_kernel_buf(scmi_optee_private->tee_ctx, msg_size);
@@ -350,7 +350,8 @@ static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *ch
static int setup_static_shmem(struct device *dev, struct scmi_chan_info *cinfo,
struct scmi_optee_channel *channel)
{
- channel->req.shmem = core->shmem->setup_iomap(cinfo, dev, true, NULL);
+ channel->req.shmem = core->shmem->setup_iomap(cinfo, dev, true, NULL,
+ &channel->io_ops);
if (IS_ERR(channel->req.shmem))
return PTR_ERR(channel->req.shmem);
@@ -465,7 +466,8 @@ static int scmi_optee_send_message(struct scmi_chan_info *cinfo,
ret = invoke_process_msg_channel(channel,
core->msg->command_size(xfer));
} else {
- core->shmem->tx_prepare(channel->req.shmem, xfer, cinfo);
+ core->shmem->tx_prepare(channel->req.shmem, xfer, cinfo,
+ channel->io_ops->toio);
ret = invoke_process_smt_channel(channel);
}
@@ -484,7 +486,8 @@ static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo,
core->msg->fetch_response(channel->req.msg,
channel->rx_len, xfer);
else
- core->shmem->fetch_response(channel->req.shmem, xfer);
+ core->shmem->fetch_response(channel->req.shmem, xfer,
+ channel->io_ops->fromio);
}
static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret,
@@ -514,7 +517,7 @@ static struct scmi_desc scmi_optee_desc = {
.ops = &scmi_optee_ops,
.max_rx_timeout_ms = 30,
.max_msg = 20,
- .max_msg_size = SCMI_OPTEE_MAX_MSG_SIZE,
+ .max_msg_size = SCMI_SHMEM_MAX_PAYLOAD_SIZE,
.sync_cmds_completed_on_ret = true,
};
diff --git a/drivers/firmware/arm_scmi/transports/smc.c b/drivers/firmware/arm_scmi/transports/smc.c
index f8dd108777f9..f632a62cfb3e 100644
--- a/drivers/firmware/arm_scmi/transports/smc.c
+++ b/drivers/firmware/arm_scmi/transports/smc.c
@@ -45,6 +45,7 @@
* @irq: An optional IRQ for completion
* @cinfo: SCMI channel info
* @shmem: Transmit/Receive shared memory area
+ * @io_ops: Transport specific I/O operations
* @shmem_lock: Lock to protect access to Tx/Rx shared memory area.
* Used when NOT operating in atomic mode.
* @inflight: Atomic flag to protect access to Tx/Rx shared memory area.
@@ -60,6 +61,7 @@ struct scmi_smc {
int irq;
struct scmi_chan_info *cinfo;
struct scmi_shared_mem __iomem *shmem;
+ struct scmi_shmem_io_ops *io_ops;
/* Protect access to shmem area */
struct mutex shmem_lock;
#define INFLIGHT_NONE MSG_TOKEN_MAX
@@ -144,7 +146,8 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
if (!scmi_info)
return -ENOMEM;
- scmi_info->shmem = core->shmem->setup_iomap(cinfo, dev, tx, &res);
+ scmi_info->shmem = core->shmem->setup_iomap(cinfo, dev, tx, &res,
+ &scmi_info->io_ops);
if (IS_ERR(scmi_info->shmem))
return PTR_ERR(scmi_info->shmem);
@@ -229,7 +232,8 @@ static int smc_send_message(struct scmi_chan_info *cinfo,
*/
smc_channel_lock_acquire(scmi_info, xfer);
- core->shmem->tx_prepare(scmi_info->shmem, xfer, cinfo);
+ core->shmem->tx_prepare(scmi_info->shmem, xfer, cinfo,
+ scmi_info->io_ops->toio);
if (scmi_info->cap_id != ULONG_MAX)
arm_smccc_1_1_invoke(scmi_info->func_id, scmi_info->cap_id, 0,
@@ -253,7 +257,8 @@ static void smc_fetch_response(struct scmi_chan_info *cinfo,
{
struct scmi_smc *scmi_info = cinfo->transport_info;
- core->shmem->fetch_response(scmi_info->shmem, xfer);
+ core->shmem->fetch_response(scmi_info->shmem, xfer,
+ scmi_info->io_ops->fromio);
}
static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret,
@@ -277,7 +282,7 @@ static struct scmi_desc scmi_smc_desc = {
.ops = &scmi_smc_ops,
.max_rx_timeout_ms = 30,
.max_msg = 20,
- .max_msg_size = 128,
+ .max_msg_size = SCMI_SHMEM_MAX_PAYLOAD_SIZE,
/*
* Setting .sync_cmds_atomic_replies to true for SMC assumes that,
* once the SMC instruction has completed successfully, the issued
diff --git a/drivers/firmware/arm_scmi/transports/virtio.c b/drivers/firmware/arm_scmi/transports/virtio.c
index d349766bc0b2..41aea33776a9 100644
--- a/drivers/firmware/arm_scmi/transports/virtio.c
+++ b/drivers/firmware/arm_scmi/transports/virtio.c
@@ -32,8 +32,8 @@
#define VIRTIO_MAX_RX_TIMEOUT_MS 60000
#define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */
-#define VIRTIO_SCMI_MAX_PDU_SIZE \
- (VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD)
+#define VIRTIO_SCMI_MAX_PDU_SIZE(ci) \
+ ((ci)->max_msg_size + SCMI_MSG_MAX_PROT_OVERHEAD)
#define DESCRIPTORS_PER_TX_MSG 2
/**
@@ -90,6 +90,7 @@ enum poll_states {
* @input: SDU used for (delayed) responses and notifications
* @list: List which scmi_vio_msg may be part of
* @rx_len: Input SDU size in bytes, once input has been received
+ * @max_len: Maximumm allowed SDU size in bytes
* @poll_idx: Last used index registered for polling purposes if this message
* transaction reply was configured for polling.
* @poll_status: Polling state for this message.
@@ -102,6 +103,7 @@ struct scmi_vio_msg {
struct scmi_msg_payld *input;
struct list_head list;
unsigned int rx_len;
+ unsigned int max_len;
unsigned int poll_idx;
enum poll_states poll_status;
/* Lock to protect access to poll_status */
@@ -234,7 +236,7 @@ static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
unsigned long flags;
struct device *dev = &vioch->vqueue->vdev->dev;
- sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE);
+ sg_init_one(&sg_in, msg->input, msg->max_len);
spin_lock_irqsave(&vioch->lock, flags);
@@ -439,9 +441,9 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
if (!msg)
return -ENOMEM;
+ msg->max_len = VIRTIO_SCMI_MAX_PDU_SIZE(cinfo);
if (tx) {
- msg->request = devm_kzalloc(dev,
- VIRTIO_SCMI_MAX_PDU_SIZE,
+ msg->request = devm_kzalloc(dev, msg->max_len,
GFP_KERNEL);
if (!msg->request)
return -ENOMEM;
@@ -449,8 +451,7 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
refcount_set(&msg->users, 1);
}
- msg->input = devm_kzalloc(dev, VIRTIO_SCMI_MAX_PDU_SIZE,
- GFP_KERNEL);
+ msg->input = devm_kzalloc(dev, msg->max_len, GFP_KERNEL);
if (!msg->input)
return -ENOMEM;
diff --git a/drivers/firmware/arm_scmi/vendors/imx/Kconfig b/drivers/firmware/arm_scmi/vendors/imx/Kconfig
index 2883ed24a84d..a01bf5e47301 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/Kconfig
+++ b/drivers/firmware/arm_scmi/vendors/imx/Kconfig
@@ -15,6 +15,7 @@ config IMX_SCMI_BBM_EXT
config IMX_SCMI_MISC_EXT
tristate "i.MX SCMI MISC EXTENSION"
depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
+ depends on IMX_SCMI_MISC_DRV
default y if ARCH_MXC
help
This enables i.MX System MISC control logic such as gpio expander
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index 94a6b4e667de..87c323de17b9 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -630,6 +630,9 @@ static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain)
if (ret)
return ERR_PTR(ret);
+ if (!buf.opp_count)
+ return ERR_PTR(-ENOENT);
+
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return ERR_PTR(-ENOMEM);
@@ -1046,7 +1049,7 @@ static struct platform_driver scpi_driver = {
.dev_groups = versions_groups,
},
.probe = scpi_probe,
- .remove_new = scpi_remove,
+ .remove = scpi_remove,
};
module_platform_driver(scpi_driver);
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index 419220fa42fd..5365e9a43000 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -378,7 +378,7 @@ const char *cs_dsp_mem_region_name(unsigned int type)
return NULL;
}
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_mem_region_name, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mem_region_name, "FW_CS_DSP");
#ifdef CONFIG_DEBUG_FS
static void cs_dsp_debugfs_save_wmfwname(struct cs_dsp *dsp, const char *s)
@@ -519,7 +519,7 @@ void cs_dsp_init_debugfs(struct cs_dsp *dsp, struct dentry *debugfs_root)
dsp->debugfs_root = root;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_init_debugfs, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_init_debugfs, "FW_CS_DSP");
/**
* cs_dsp_cleanup_debugfs() - Removes DSP representation from debugfs
@@ -531,17 +531,17 @@ void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp)
debugfs_remove_recursive(dsp->debugfs_root);
dsp->debugfs_root = ERR_PTR(-ENODEV);
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_cleanup_debugfs, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_cleanup_debugfs, "FW_CS_DSP");
#else
void cs_dsp_init_debugfs(struct cs_dsp *dsp, struct dentry *debugfs_root)
{
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_init_debugfs, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_init_debugfs, "FW_CS_DSP");
void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp)
{
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_cleanup_debugfs, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_cleanup_debugfs, "FW_CS_DSP");
static inline void cs_dsp_debugfs_save_wmfwname(struct cs_dsp *dsp,
const char *s)
@@ -749,7 +749,7 @@ int cs_dsp_coeff_write_acked_control(struct cs_dsp_coeff_ctl *ctl, unsigned int
return -ETIMEDOUT;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_write_acked_control, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_write_acked_control, "FW_CS_DSP");
static int cs_dsp_coeff_write_ctrl_raw(struct cs_dsp_coeff_ctl *ctl,
unsigned int off, const void *buf, size_t len)
@@ -827,7 +827,7 @@ int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl,
return 1;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_write_ctrl, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_write_ctrl, "FW_CS_DSP");
/**
* cs_dsp_coeff_lock_and_write_ctrl() - Writes the given buffer to the given coefficient control
@@ -926,7 +926,7 @@ int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl,
return ret;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_read_ctrl, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_read_ctrl, "FW_CS_DSP");
/**
* cs_dsp_coeff_lock_and_read_ctrl() - Reads the given coefficient control into the given buffer
@@ -1679,7 +1679,7 @@ struct cs_dsp_coeff_ctl *cs_dsp_get_ctl(struct cs_dsp *dsp, const char *name, in
return rslt;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_get_ctl, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_get_ctl, "FW_CS_DSP");
static void cs_dsp_ctl_fixup_base(struct cs_dsp *dsp,
const struct cs_dsp_alg_region *alg_region)
@@ -1769,7 +1769,7 @@ struct cs_dsp_alg_region *cs_dsp_find_alg_region(struct cs_dsp *dsp,
return NULL;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_find_alg_region, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_find_alg_region, "FW_CS_DSP");
static struct cs_dsp_alg_region *cs_dsp_create_region(struct cs_dsp *dsp,
int type, __be32 id,
@@ -2404,7 +2404,7 @@ int cs_dsp_adsp1_init(struct cs_dsp *dsp)
return cs_dsp_common_init(dsp);
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_init, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_init, "FW_CS_DSP");
/**
* cs_dsp_adsp1_power_up() - Load and start the named firmware
@@ -2496,7 +2496,7 @@ err_mutex:
mutex_unlock(&dsp->pwr_lock);
return ret;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_power_up, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_power_up, "FW_CS_DSP");
/**
* cs_dsp_adsp1_power_down() - Halts the DSP
@@ -2528,7 +2528,7 @@ void cs_dsp_adsp1_power_down(struct cs_dsp *dsp)
mutex_unlock(&dsp->pwr_lock);
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_power_down, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_power_down, "FW_CS_DSP");
static int cs_dsp_adsp2v2_enable_core(struct cs_dsp *dsp)
{
@@ -2680,7 +2680,7 @@ int cs_dsp_set_dspclk(struct cs_dsp *dsp, unsigned int freq)
return ret;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_set_dspclk, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_set_dspclk, "FW_CS_DSP");
static void cs_dsp_stop_watchdog(struct cs_dsp *dsp)
{
@@ -2770,7 +2770,7 @@ err_mutex:
return ret;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_power_up, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_power_up, "FW_CS_DSP");
/**
* cs_dsp_power_down() - Powers-down the DSP
@@ -2804,7 +2804,7 @@ void cs_dsp_power_down(struct cs_dsp *dsp)
cs_dsp_dbg(dsp, "Shutdown complete\n");
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_power_down, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_power_down, "FW_CS_DSP");
static int cs_dsp_adsp2_start_core(struct cs_dsp *dsp)
{
@@ -2890,7 +2890,7 @@ err:
return ret;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_run, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_run, "FW_CS_DSP");
/**
* cs_dsp_stop() - Stops the firmware
@@ -2929,7 +2929,7 @@ void cs_dsp_stop(struct cs_dsp *dsp)
cs_dsp_dbg(dsp, "Execution stopped\n");
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_stop, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_stop, "FW_CS_DSP");
static int cs_dsp_halo_start_core(struct cs_dsp *dsp)
{
@@ -2991,7 +2991,7 @@ int cs_dsp_adsp2_init(struct cs_dsp *dsp)
return cs_dsp_common_init(dsp);
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp2_init, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp2_init, "FW_CS_DSP");
/**
* cs_dsp_halo_init() - Initialise a cs_dsp structure representing a HALO Core DSP
@@ -3008,7 +3008,7 @@ int cs_dsp_halo_init(struct cs_dsp *dsp)
return cs_dsp_common_init(dsp);
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_init, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_init, "FW_CS_DSP");
/**
* cs_dsp_remove() - Clean a cs_dsp before deletion
@@ -3028,7 +3028,7 @@ void cs_dsp_remove(struct cs_dsp *dsp)
cs_dsp_free_ctl_blk(ctl);
}
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_remove, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_remove, "FW_CS_DSP");
/**
* cs_dsp_read_raw_data_block() - Reads a block of data from DSP memory
@@ -3065,7 +3065,7 @@ int cs_dsp_read_raw_data_block(struct cs_dsp *dsp, int mem_type, unsigned int me
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_read_raw_data_block, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_read_raw_data_block, "FW_CS_DSP");
/**
* cs_dsp_read_data_word() - Reads a word from DSP memory
@@ -3089,7 +3089,7 @@ int cs_dsp_read_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_add
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_read_data_word, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_read_data_word, "FW_CS_DSP");
/**
* cs_dsp_write_data_word() - Writes a word to DSP memory
@@ -3115,7 +3115,7 @@ int cs_dsp_write_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_ad
return regmap_raw_write(dsp->regmap, reg, &val, sizeof(val));
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_write_data_word, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_write_data_word, "FW_CS_DSP");
/**
* cs_dsp_remove_padding() - Convert unpacked words to packed bytes
@@ -3139,7 +3139,7 @@ void cs_dsp_remove_padding(u32 *buf, int nwords)
*pack_out++ = (u8)(word >> 16);
}
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_remove_padding, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_remove_padding, "FW_CS_DSP");
/**
* cs_dsp_adsp2_bus_error() - Handle a DSP bus error interrupt
@@ -3209,7 +3209,7 @@ void cs_dsp_adsp2_bus_error(struct cs_dsp *dsp)
error:
mutex_unlock(&dsp->pwr_lock);
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp2_bus_error, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp2_bus_error, "FW_CS_DSP");
/**
* cs_dsp_halo_bus_error() - Handle a DSP bus error interrupt
@@ -3269,7 +3269,7 @@ void cs_dsp_halo_bus_error(struct cs_dsp *dsp)
exit_unlock:
mutex_unlock(&dsp->pwr_lock);
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_bus_error, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_bus_error, "FW_CS_DSP");
/**
* cs_dsp_halo_wdt_expire() - Handle DSP watchdog expiry
@@ -3289,7 +3289,7 @@ void cs_dsp_halo_wdt_expire(struct cs_dsp *dsp)
mutex_unlock(&dsp->pwr_lock);
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_wdt_expire, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_wdt_expire, "FW_CS_DSP");
static const struct cs_dsp_ops cs_dsp_adsp1_ops = {
.validate_version = cs_dsp_validate_version,
@@ -3419,7 +3419,7 @@ int cs_dsp_chunk_write(struct cs_dsp_chunk *ch, int nbits, u32 val)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_write, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_write, "FW_CS_DSP");
/**
* cs_dsp_chunk_flush() - Pad remaining data with zero and commit to chunk
@@ -3438,7 +3438,7 @@ int cs_dsp_chunk_flush(struct cs_dsp_chunk *ch)
return cs_dsp_chunk_write(ch, CS_DSP_DATA_WORD_BITS - ch->cachebits, 0);
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_flush, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_flush, "FW_CS_DSP");
/**
* cs_dsp_chunk_read() - Parse data from a DSP memory chunk
@@ -3480,7 +3480,7 @@ int cs_dsp_chunk_read(struct cs_dsp_chunk *ch, int nbits)
return result;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_read, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_read, "FW_CS_DSP");
struct cs_dsp_wseq_op {
@@ -3605,7 +3605,7 @@ int cs_dsp_wseq_init(struct cs_dsp *dsp, struct cs_dsp_wseq *wseqs, unsigned int
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_wseq_init, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_wseq_init, "FW_CS_DSP");
static struct cs_dsp_wseq_op *cs_dsp_wseq_find_op(u32 addr, u8 op_code,
struct list_head *wseq_ops)
@@ -3720,7 +3720,7 @@ op_new_free:
return ret;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_wseq_write, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_wseq_write, "FW_CS_DSP");
/**
* cs_dsp_wseq_multi_write() - Add or update multiple entries in a write sequence
@@ -3752,7 +3752,7 @@ int cs_dsp_wseq_multi_write(struct cs_dsp *dsp, struct cs_dsp_wseq *wseq,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_wseq_multi_write, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_wseq_multi_write, "FW_CS_DSP");
MODULE_DESCRIPTION("Cirrus Logic DSP Support");
MODULE_AUTHOR("Simon Trimmer <[email protected]>");
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 72f2537d90ca..5fe61b9ab5f9 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -76,20 +76,14 @@ config EFI_ZBOOT
bool "Enable the generic EFI decompressor"
depends on EFI_GENERIC_STUB && !ARM
select HAVE_KERNEL_GZIP
- select HAVE_KERNEL_LZ4
- select HAVE_KERNEL_LZMA
- select HAVE_KERNEL_LZO
- select HAVE_KERNEL_XZ
select HAVE_KERNEL_ZSTD
help
Create the bootable image as an EFI application that carries the
actual kernel image in compressed form, and decompresses it into
- memory before executing it via LoadImage/StartImage EFI boot service
- calls. For compatibility with non-EFI loaders, the payload can be
- decompressed and executed by the loader as well, provided that the
- loader implements the decompression algorithm and that non-EFI boot
- is supported by the encapsulated image. (The compression algorithm
- used is described in the zboot image header)
+ memory before executing it. For compatibility with non-EFI loaders,
+ the payload can be decompressed and executed by the loader as well,
+ provided that the loader implements the decompression algorithm.
+ (The compression algorithm used is described in the zboot header)
config EFI_ARMSTUB_DTB_LOADER
bool "Enable the DTB loader"
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 552c78f5f059..a253b6144945 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -6,7 +6,7 @@
#include <linux/slab.h>
#include <linux/ucs2_string.h>
-MODULE_IMPORT_NS(EFIVAR);
+MODULE_IMPORT_NS("EFIVAR");
#define DUMP_NAME_LEN 66
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 70490bf2697b..60c64b81d2c3 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -273,6 +273,7 @@ static __init int efivar_ssdt_load(void)
efi_char16_t *name = NULL;
efi_status_t status;
efi_guid_t guid;
+ int ret = 0;
if (!efivar_ssdt[0])
return 0;
@@ -294,8 +295,8 @@ static __init int efivar_ssdt_load(void)
efi_char16_t *name_tmp =
krealloc(name, name_size, GFP_KERNEL);
if (!name_tmp) {
- kfree(name);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
name = name_tmp;
continue;
@@ -309,26 +310,38 @@ static __init int efivar_ssdt_load(void)
pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid);
status = efi.get_variable(name, &guid, NULL, &data_size, NULL);
- if (status != EFI_BUFFER_TOO_SMALL || !data_size)
- return -EIO;
+ if (status != EFI_BUFFER_TOO_SMALL || !data_size) {
+ ret = -EIO;
+ goto out;
+ }
data = kmalloc(data_size, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ if (!data) {
+ ret = -ENOMEM;
+ goto out;
+ }
status = efi.get_variable(name, &guid, NULL, &data_size, data);
if (status == EFI_SUCCESS) {
- acpi_status ret = acpi_load_table(data, NULL);
- if (ret)
- pr_err("failed to load table: %u\n", ret);
- else
+ acpi_status acpi_ret = acpi_load_table(data, NULL);
+ if (ACPI_FAILURE(acpi_ret)) {
+ pr_err("efivar_ssdt: failed to load table: %u\n",
+ acpi_ret);
+ } else {
+ /*
+ * The @data will be in use by ACPI engine,
+ * do not free it!
+ */
continue;
+ }
} else {
- pr_err("failed to get var data: 0x%lx\n", status);
+ pr_err("efivar_ssdt: failed to get var data: 0x%lx\n", status);
}
kfree(data);
}
- return 0;
+out:
+ kfree(name);
+ return ret;
}
#else
static inline int efivar_ssdt_load(void) { return 0; }
@@ -433,7 +446,9 @@ static int __init efisubsys_init(void)
error = generic_ops_register();
if (error)
goto err_put;
- efivar_ssdt_load();
+ error = efivar_ssdt_load();
+ if (error)
+ pr_err("efi: failed to load SSDT, error %d.\n", error);
platform_device_register_simple("efivars", 0, NULL, 0);
}
diff --git a/drivers/firmware/efi/embedded-firmware.c b/drivers/firmware/efi/embedded-firmware.c
index f5be8e22305b..b49a09d7e665 100644
--- a/drivers/firmware/efi/embedded-firmware.c
+++ b/drivers/firmware/efi/embedded-firmware.c
@@ -16,9 +16,9 @@
/* Exported for use by lib/test_firmware.c only */
LIST_HEAD(efi_embedded_fw_list);
-EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_list, TEST_FIRMWARE);
+EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_list, "TEST_FIRMWARE");
bool efi_embedded_fw_checked;
-EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_checked, TEST_FIRMWARE);
+EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_checked, "TEST_FIRMWARE");
static const struct dmi_system_id * const embedded_fw_table[] = {
#ifdef CONFIG_TOUCHSCREEN_DMI
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 7a81c0ce4780..4bb7b0584bc9 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -75,8 +75,6 @@ static LIST_HEAD(entry_list);
struct esre_attribute {
struct attribute attr;
ssize_t (*show)(struct esre_entry *entry, char *buf);
- ssize_t (*store)(struct esre_entry *entry,
- const char *buf, size_t count);
};
static struct esre_entry *to_entry(struct kobject *kobj)
diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot
index 65ffd0b760b2..48842b5c106b 100644
--- a/drivers/firmware/efi/libstub/Makefile.zboot
+++ b/drivers/firmware/efi/libstub/Makefile.zboot
@@ -12,22 +12,16 @@ quiet_cmd_copy_and_pad = PAD $@
$(obj)/vmlinux.bin: $(obj)/$(EFI_ZBOOT_PAYLOAD) FORCE
$(call if_changed,copy_and_pad)
-comp-type-$(CONFIG_KERNEL_GZIP) := gzip
-comp-type-$(CONFIG_KERNEL_LZ4) := lz4
-comp-type-$(CONFIG_KERNEL_LZMA) := lzma
-comp-type-$(CONFIG_KERNEL_LZO) := lzo
-comp-type-$(CONFIG_KERNEL_XZ) := xzkern
-comp-type-$(CONFIG_KERNEL_ZSTD) := zstd22
-
# in GZIP, the appended le32 carrying the uncompressed size is part of the
# format, but in other cases, we just append it at the end for convenience,
# causing the original tools to complain when checking image integrity.
-# So disregard it when calculating the payload size in the zimage header.
-zboot-method-y := $(comp-type-y)_with_size
-zboot-size-len-y := 4
+comp-type-y := gzip
+zboot-method-y := gzip
+zboot-size-len-y := 0
-zboot-method-$(CONFIG_KERNEL_GZIP) := gzip
-zboot-size-len-$(CONFIG_KERNEL_GZIP) := 0
+comp-type-$(CONFIG_KERNEL_ZSTD) := zstd
+zboot-method-$(CONFIG_KERNEL_ZSTD) := zstd22_with_size
+zboot-size-len-$(CONFIG_KERNEL_ZSTD) := 4
$(obj)/vmlinuz: $(obj)/vmlinux.bin FORCE
$(call if_changed,$(zboot-method-y))
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index de659f6a815f..c0c81ca4237e 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -327,7 +327,7 @@ fail:
* Size of memory allocated return in *cmd_line_len.
* Returns NULL on error.
*/
-char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len)
+char *efi_convert_cmdline(efi_loaded_image_t *image)
{
const efi_char16_t *options = efi_table_attr(image, load_options);
u32 options_size = efi_table_attr(image, load_options_size);
@@ -405,7 +405,6 @@ char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len)
snprintf((char *)cmdline_addr, options_bytes, "%.*ls",
options_bytes - 1, options);
- *cmd_line_len = options_bytes;
return (char *)cmdline_addr;
}
@@ -621,10 +620,6 @@ efi_status_t efi_load_initrd(efi_loaded_image_t *image,
status = efi_load_initrd_dev_path(&initrd, hard_limit);
if (status == EFI_SUCCESS) {
efi_info("Loaded initrd from LINUX_EFI_INITRD_MEDIA_GUID device path\n");
- if (initrd.size > 0 &&
- efi_measure_tagged_event(initrd.base, initrd.size,
- EFISTUB_EVT_INITRD) == EFI_SUCCESS)
- efi_info("Measured initrd data into PCR 9\n");
} else if (status == EFI_NOT_FOUND) {
status = efi_load_initrd_cmdline(image, &initrd, soft_limit,
hard_limit);
@@ -637,6 +632,11 @@ efi_status_t efi_load_initrd(efi_loaded_image_t *image,
if (status != EFI_SUCCESS)
goto failed;
+ if (initrd.size > 0 &&
+ efi_measure_tagged_event(initrd.base, initrd.size,
+ EFISTUB_EVT_INITRD) == EFI_SUCCESS)
+ efi_info("Measured initrd data into PCR 9\n");
+
status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, sizeof(initrd),
(void **)&tbl);
if (status != EFI_SUCCESS)
diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c
index 958a680e0660..382b54f40603 100644
--- a/drivers/firmware/efi/libstub/efi-stub.c
+++ b/drivers/firmware/efi/libstub/efi-stub.c
@@ -112,7 +112,6 @@ static u32 get_supported_rt_services(void)
efi_status_t efi_handle_cmdline(efi_loaded_image_t *image, char **cmdline_ptr)
{
- int cmdline_size = 0;
efi_status_t status;
char *cmdline;
@@ -121,35 +120,32 @@ efi_status_t efi_handle_cmdline(efi_loaded_image_t *image, char **cmdline_ptr)
* protocol. We are going to copy the command line into the
* device tree, so this can be allocated anywhere.
*/
- cmdline = efi_convert_cmdline(image, &cmdline_size);
+ cmdline = efi_convert_cmdline(image);
if (!cmdline) {
efi_err("getting command line via LOADED_IMAGE_PROTOCOL\n");
return EFI_OUT_OF_RESOURCES;
}
- if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) ||
- IS_ENABLED(CONFIG_CMDLINE_FORCE) ||
- cmdline_size == 0) {
- status = efi_parse_options(CONFIG_CMDLINE);
- if (status != EFI_SUCCESS) {
- efi_err("Failed to parse options\n");
+ if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
+ status = efi_parse_options(cmdline);
+ if (status != EFI_SUCCESS)
goto fail_free_cmdline;
- }
}
- if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && cmdline_size > 0) {
- status = efi_parse_options(cmdline);
- if (status != EFI_SUCCESS) {
- efi_err("Failed to parse options\n");
+ if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) ||
+ IS_ENABLED(CONFIG_CMDLINE_FORCE) ||
+ cmdline[0] == 0) {
+ status = efi_parse_options(CONFIG_CMDLINE);
+ if (status != EFI_SUCCESS)
goto fail_free_cmdline;
- }
}
*cmdline_ptr = cmdline;
return EFI_SUCCESS;
fail_free_cmdline:
- efi_bs_call(free_pool, cmdline_ptr);
+ efi_err("Failed to parse options\n");
+ efi_bs_call(free_pool, cmdline);
return status;
}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 685098f9626f..76e44c185f29 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -1056,7 +1056,7 @@ void efi_free(unsigned long size, unsigned long addr);
void efi_apply_loadoptions_quirk(const void **load_options, u32 *load_options_size);
-char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len);
+char *efi_convert_cmdline(efi_loaded_image_t *image);
efi_status_t efi_get_memory_map(struct efi_boot_memmap **map,
bool install_cfg_tbl);
diff --git a/drivers/firmware/efi/libstub/file.c b/drivers/firmware/efi/libstub/file.c
index d6a025df07dc..bd626d55dcbc 100644
--- a/drivers/firmware/efi/libstub/file.c
+++ b/drivers/firmware/efi/libstub/file.c
@@ -175,6 +175,12 @@ static efi_status_t efi_open_device_path(efi_file_protocol_t **volume,
return status;
}
+#ifndef CONFIG_CMDLINE
+#define CONFIG_CMDLINE
+#endif
+
+static const efi_char16_t builtin_cmdline[] = L"" CONFIG_CMDLINE;
+
/*
* Check the cmdline for a LILO-style file= arguments.
*
@@ -189,6 +195,8 @@ efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
unsigned long *load_addr,
unsigned long *load_size)
{
+ const bool ignore_load_options = IS_ENABLED(CONFIG_CMDLINE_OVERRIDE) ||
+ IS_ENABLED(CONFIG_CMDLINE_FORCE);
const efi_char16_t *cmdline = efi_table_attr(image, load_options);
u32 cmdline_len = efi_table_attr(image, load_options_size);
unsigned long efi_chunk_size = ULONG_MAX;
@@ -197,6 +205,7 @@ efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
unsigned long alloc_addr;
unsigned long alloc_size;
efi_status_t status;
+ bool twopass;
int offset;
if (!load_addr || !load_size)
@@ -209,6 +218,16 @@ efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
efi_chunk_size = EFI_READ_CHUNK_SIZE;
alloc_addr = alloc_size = 0;
+
+ if (!ignore_load_options && cmdline_len > 0) {
+ twopass = IS_ENABLED(CONFIG_CMDLINE_BOOL) ||
+ IS_ENABLED(CONFIG_CMDLINE_EXTEND);
+ } else {
+do_builtin: cmdline = builtin_cmdline;
+ cmdline_len = ARRAY_SIZE(builtin_cmdline) - 1;
+ twopass = false;
+ }
+
do {
struct finfo fi;
unsigned long size;
@@ -290,6 +309,9 @@ efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
efi_call_proto(volume, close);
} while (offset > 0);
+ if (twopass)
+ goto do_builtin;
+
*load_addr = alloc_addr;
*load_size = alloc_size;
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
index 1fd6823248ab..a5c6c4f163fc 100644
--- a/drivers/firmware/efi/libstub/tpm.c
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -57,7 +57,7 @@ static void efi_retrieve_tcg2_eventlog(int version, efi_physical_addr_t log_loca
struct linux_efi_tpm_eventlog *log_tbl = NULL;
unsigned long first_entry_addr, last_entry_addr;
size_t log_size, last_entry_size;
- int final_events_size = 0;
+ u32 final_events_size = 0;
first_entry_addr = (unsigned long) log_location;
@@ -110,9 +110,9 @@ static void efi_retrieve_tcg2_eventlog(int version, efi_physical_addr_t log_loca
*/
if (final_events_table && final_events_table->nr_events) {
struct tcg_pcr_event2_head *header;
- int offset;
+ u32 offset;
void *data;
- int event_size;
+ u32 event_size;
int i = final_events_table->nr_events;
data = (void *)final_events_table;
@@ -124,6 +124,9 @@ static void efi_retrieve_tcg2_eventlog(int version, efi_physical_addr_t log_loca
event_size = __calc_tpm2_event_size(header,
(void *)(long)log_location,
false);
+ /* If calc fails this is a malformed log */
+ if (!event_size)
+ break;
final_events_size += event_size;
i--;
}
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index f8e465da344d..188c8000d245 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -537,7 +537,6 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID;
struct boot_params *boot_params;
struct setup_header *hdr;
- int options_size = 0;
efi_status_t status;
unsigned long alloc;
char *cmdline_ptr;
@@ -569,7 +568,7 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
hdr->initrd_addr_max = INT_MAX;
/* Convert unicode cmdline to ascii */
- cmdline_ptr = efi_convert_cmdline(image, &options_size);
+ cmdline_ptr = efi_convert_cmdline(image);
if (!cmdline_ptr) {
efi_free(PARAM_SIZE, alloc);
efi_exit(handle, EFI_OUT_OF_RESOURCES);
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
index 164203429fa7..c38b1a335590 100644
--- a/drivers/firmware/efi/memattr.c
+++ b/drivers/firmware/efi/memattr.c
@@ -22,6 +22,7 @@ unsigned long __ro_after_init efi_mem_attr_table = EFI_INVALID_TABLE_ADDR;
int __init efi_memattr_init(void)
{
efi_memory_attributes_table_t *tbl;
+ unsigned long size;
if (efi_mem_attr_table == EFI_INVALID_TABLE_ADDR)
return 0;
@@ -39,7 +40,22 @@ int __init efi_memattr_init(void)
goto unmap;
}
- tbl_size = sizeof(*tbl) + tbl->num_entries * tbl->desc_size;
+
+ /*
+ * Sanity check: the Memory Attributes Table contains up to 3 entries
+ * for each entry of type EfiRuntimeServicesCode in the EFI memory map.
+ * So if the size of the table exceeds 3x the size of the entire EFI
+ * memory map, there is clearly something wrong, and the table should
+ * just be ignored altogether.
+ */
+ size = tbl->num_entries * tbl->desc_size;
+ if (size > 3 * efi.memmap.nr_map * efi.memmap.desc_size) {
+ pr_warn(FW_BUG "Corrupted EFI Memory Attributes Table detected! (version == %u, desc_size == %u, num_entries == %u)\n",
+ tbl->version, tbl->desc_size, tbl->num_entries);
+ goto unmap;
+ }
+
+ tbl_size = sizeof(*tbl) + size;
memblock_reserve(efi_mem_attr_table, tbl_size);
set_bit(EFI_MEM_ATTR, &efi.flags);
diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c
index e8d69bd548f3..cdd431027065 100644
--- a/drivers/firmware/efi/tpm.c
+++ b/drivers/firmware/efi/tpm.c
@@ -19,7 +19,7 @@ EXPORT_SYMBOL(efi_tpm_final_log_size);
static int __init tpm2_calc_event_log_size(void *data, int count, void *size_info)
{
struct tcg_pcr_event2_head *header;
- int event_size, size = 0;
+ u32 event_size, size = 0;
while (count > 0) {
header = data + size;
@@ -40,7 +40,8 @@ int __init efi_tpm_eventlog_init(void)
{
struct linux_efi_tpm_eventlog *log_tbl;
struct efi_tcg2_final_events_table *final_tbl;
- int tbl_size;
+ unsigned int tbl_size;
+ int final_tbl_size;
int ret = 0;
if (efi.tpm_log == EFI_INVALID_TABLE_ADDR) {
@@ -60,7 +61,12 @@ int __init efi_tpm_eventlog_init(void)
}
tbl_size = sizeof(*log_tbl) + log_tbl->size;
- memblock_reserve(efi.tpm_log, tbl_size);
+ if (memblock_reserve(efi.tpm_log, tbl_size)) {
+ pr_err("TPM Event Log memblock reserve fails (0x%lx, 0x%x)\n",
+ efi.tpm_log, tbl_size);
+ ret = -ENOMEM;
+ goto out;
+ }
if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR) {
pr_info("TPM Final Events table not present\n");
@@ -80,26 +86,26 @@ int __init efi_tpm_eventlog_init(void)
goto out;
}
- tbl_size = 0;
+ final_tbl_size = 0;
if (final_tbl->nr_events != 0) {
void *events = (void *)efi.tpm_final_log
+ sizeof(final_tbl->version)
+ sizeof(final_tbl->nr_events);
- tbl_size = tpm2_calc_event_log_size(events,
- final_tbl->nr_events,
- log_tbl->log);
+ final_tbl_size = tpm2_calc_event_log_size(events,
+ final_tbl->nr_events,
+ log_tbl->log);
}
- if (tbl_size < 0) {
+ if (final_tbl_size < 0) {
pr_err(FW_BUG "Failed to parse event in TPM Final Events Log\n");
ret = -EINVAL;
goto out_calc;
}
memblock_reserve(efi.tpm_final_log,
- tbl_size + sizeof(*final_tbl));
- efi_tpm_final_log_size = tbl_size;
+ final_tbl_size + sizeof(*final_tbl));
+ efi_tpm_final_log_size = final_tbl_size;
out_calc:
early_memunmap(final_tbl, sizeof(*final_tbl));
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 4056ba7f3440..3700e9869767 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -149,7 +149,7 @@ int efivar_lock(void)
}
return 0;
}
-EXPORT_SYMBOL_NS_GPL(efivar_lock, EFIVAR);
+EXPORT_SYMBOL_NS_GPL(efivar_lock, "EFIVAR");
/*
* efivar_lock() - obtain the efivar lock if it is free
@@ -165,7 +165,7 @@ int efivar_trylock(void)
}
return 0;
}
-EXPORT_SYMBOL_NS_GPL(efivar_trylock, EFIVAR);
+EXPORT_SYMBOL_NS_GPL(efivar_trylock, "EFIVAR");
/*
* efivar_unlock() - release the efivar lock
@@ -174,7 +174,7 @@ void efivar_unlock(void)
{
up(&efivars_lock);
}
-EXPORT_SYMBOL_NS_GPL(efivar_unlock, EFIVAR);
+EXPORT_SYMBOL_NS_GPL(efivar_unlock, "EFIVAR");
/*
* efivar_get_variable() - retrieve a variable identified by name/vendor
@@ -186,7 +186,7 @@ efi_status_t efivar_get_variable(efi_char16_t *name, efi_guid_t *vendor,
{
return __efivars->ops->get_variable(name, vendor, attr, size, data);
}
-EXPORT_SYMBOL_NS_GPL(efivar_get_variable, EFIVAR);
+EXPORT_SYMBOL_NS_GPL(efivar_get_variable, "EFIVAR");
/*
* efivar_get_next_variable() - enumerate the next name/vendor pair
@@ -198,7 +198,7 @@ efi_status_t efivar_get_next_variable(unsigned long *name_size,
{
return __efivars->ops->get_next_variable(name_size, name, vendor);
}
-EXPORT_SYMBOL_NS_GPL(efivar_get_next_variable, EFIVAR);
+EXPORT_SYMBOL_NS_GPL(efivar_get_next_variable, "EFIVAR");
/*
* efivar_set_variable_locked() - set a variable identified by name/vendor
@@ -230,7 +230,7 @@ efi_status_t efivar_set_variable_locked(efi_char16_t *name, efi_guid_t *vendor,
return setvar(name, vendor, attr, data_size, data);
}
-EXPORT_SYMBOL_NS_GPL(efivar_set_variable_locked, EFIVAR);
+EXPORT_SYMBOL_NS_GPL(efivar_set_variable_locked, "EFIVAR");
/*
* efivar_set_variable() - set a variable identified by name/vendor
@@ -252,7 +252,7 @@ efi_status_t efivar_set_variable(efi_char16_t *name, efi_guid_t *vendor,
efivar_unlock();
return status;
}
-EXPORT_SYMBOL_NS_GPL(efivar_set_variable, EFIVAR);
+EXPORT_SYMBOL_NS_GPL(efivar_set_variable, "EFIVAR");
efi_status_t efivar_query_variable_info(u32 attr,
u64 *storage_space,
@@ -264,4 +264,4 @@ efi_status_t efivar_query_variable_info(u32 attr,
return __efivars->ops->query_variable_info(attr, storage_space,
remaining_space, max_variable_size);
}
-EXPORT_SYMBOL_NS_GPL(efivar_query_variable_info, EFIVAR);
+EXPORT_SYMBOL_NS_GPL(efivar_query_variable_info, "EFIVAR");
diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
index 208652a8087c..882db32e51be 100644
--- a/drivers/firmware/google/coreboot_table.c
+++ b/drivers/firmware/google/coreboot_table.c
@@ -220,7 +220,7 @@ MODULE_DEVICE_TABLE(of, coreboot_of_match);
static struct platform_driver coreboot_table_driver = {
.probe = coreboot_table_probe,
- .remove_new = coreboot_table_remove,
+ .remove = coreboot_table_remove,
.driver = {
.name = "coreboot_table",
.acpi_match_table = ACPI_PTR(cros_coreboot_acpi_match),
diff --git a/drivers/firmware/google/framebuffer-coreboot.c b/drivers/firmware/google/framebuffer-coreboot.c
index daadd71d8ddd..c68c9f56370f 100644
--- a/drivers/firmware/google/framebuffer-coreboot.c
+++ b/drivers/firmware/google/framebuffer-coreboot.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/platform_data/simplefb.h>
#include <linux/platform_device.h>
+#include <linux/sysfb.h>
#include "coreboot_table.h"
@@ -36,6 +37,19 @@ static int framebuffer_probe(struct coreboot_device *dev)
.format = NULL,
};
+ /*
+ * On coreboot systems, the advertised LB_TAG_FRAMEBUFFER entry
+ * in the coreboot table should only be used if the payload did
+ * not pass a framebuffer information to the Linux kernel.
+ *
+ * If the global screen_info data has been filled, the Generic
+ * System Framebuffers (sysfb) will already register a platform
+ * device and pass that screen_info as platform_data to a driver
+ * that can scan-out using the system provided framebuffer.
+ */
+ if (sysfb_handles_screen_info())
+ return -ENODEV;
+
if (!fb->physical_address)
return -ENODEV;
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index d304913314e4..24e666d5c3d1 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -918,7 +918,8 @@ static __init int gsmi_init(void)
gsmi_dev.pdev = platform_device_register_full(&gsmi_dev_info);
if (IS_ERR(gsmi_dev.pdev)) {
printk(KERN_ERR "gsmi: unable to register platform device\n");
- return PTR_ERR(gsmi_dev.pdev);
+ ret = PTR_ERR(gsmi_dev.pdev);
+ goto out_unregister;
}
/* SMI access needs to be serialized */
@@ -1056,10 +1057,11 @@ out_err:
gsmi_buf_free(gsmi_dev.name_buf);
kmem_cache_destroy(gsmi_dev.mem_pool);
platform_device_unregister(gsmi_dev.pdev);
- pr_info("gsmi: failed to load: %d\n", ret);
+out_unregister:
#ifdef CONFIG_PM
platform_driver_unregister(&gsmi_driver_info);
#endif
+ pr_info("gsmi: failed to load: %d\n", ret);
return ret;
}
diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig
index 477d3f32d99a..907cd149c40a 100644
--- a/drivers/firmware/imx/Kconfig
+++ b/drivers/firmware/imx/Kconfig
@@ -25,7 +25,6 @@ config IMX_SCU
config IMX_SCMI_MISC_DRV
tristate "IMX SCMI MISC Protocol driver"
- depends on IMX_SCMI_MISC_EXT || COMPILE_TEST
default y if ARCH_MXC
help
The System Controller Management Interface firmware (SCMI FW) is
diff --git a/drivers/firmware/imx/imx-dsp.c b/drivers/firmware/imx/imx-dsp.c
index 01c8ef14eaec..ed79e823157a 100644
--- a/drivers/firmware/imx/imx-dsp.c
+++ b/drivers/firmware/imx/imx-dsp.c
@@ -180,7 +180,7 @@ static struct platform_driver imx_dsp_driver = {
.name = "imx-dsp",
},
.probe = imx_dsp_probe,
- .remove_new = imx_dsp_remove,
+ .remove = imx_dsp_remove,
};
builtin_platform_driver(imx_dsp_driver);
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index 8e59be3782cb..55b9cfad8a04 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -116,7 +116,7 @@ static void __meminit release_firmware_map_entry(struct kobject *kobj)
kfree(entry);
}
-static struct kobj_type __refdata memmap_ktype = {
+static const struct kobj_type memmap_ktype = {
.release = release_firmware_map_entry,
.sysfs_ops = &memmap_attr_ops,
.default_groups = def_groups,
diff --git a/drivers/firmware/microchip/mpfs-auto-update.c b/drivers/firmware/microchip/mpfs-auto-update.c
index 0f7ec8848202..e194f7acb2a9 100644
--- a/drivers/firmware/microchip/mpfs-auto-update.c
+++ b/drivers/firmware/microchip/mpfs-auto-update.c
@@ -402,10 +402,10 @@ static int mpfs_auto_update_available(struct mpfs_auto_update_priv *priv)
return -EIO;
/*
- * Bit 5 of byte 1 is "UL_Auto Update" & if it is set, Auto Update is
+ * Bit 5 of byte 1 is "UL_IAP" & if it is set, Auto Update is
* not possible.
*/
- if (response_msg[1] & AUTO_UPDATE_FEATURE_ENABLED)
+ if ((((u8 *)response_msg)[1] & AUTO_UPDATE_FEATURE_ENABLED))
return -EPERM;
return 0;
@@ -458,7 +458,7 @@ static struct platform_driver mpfs_auto_update_driver = {
.name = "mpfs-auto-update",
},
.probe = mpfs_auto_update_probe,
- .remove_new = mpfs_auto_update_remove,
+ .remove = mpfs_auto_update_remove,
};
module_platform_driver(mpfs_auto_update_driver);
diff --git a/drivers/firmware/mtk-adsp-ipc.c b/drivers/firmware/mtk-adsp-ipc.c
index a762302978de..2b79371c61c9 100644
--- a/drivers/firmware/mtk-adsp-ipc.c
+++ b/drivers/firmware/mtk-adsp-ipc.c
@@ -95,10 +95,9 @@ static int mtk_adsp_ipc_probe(struct platform_device *pdev)
adsp_chan->idx = i;
adsp_chan->ch = mbox_request_channel_byname(cl, adsp_mbox_ch_names[i]);
if (IS_ERR(adsp_chan->ch)) {
- ret = PTR_ERR(adsp_chan->ch);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to request mbox chan %s ret %d\n",
- adsp_mbox_ch_names[i], ret);
+ ret = dev_err_probe(dev, PTR_ERR(adsp_chan->ch),
+ "Failed to request mbox channel %s\n",
+ adsp_mbox_ch_names[i]);
for (j = 0; j < i; j++) {
adsp_chan = &adsp_ipc->chans[j];
@@ -133,7 +132,7 @@ static struct platform_driver mtk_adsp_ipc_driver = {
.name = "mtk-adsp-ipc",
},
.probe = mtk_adsp_ipc_probe,
- .remove_new = mtk_adsp_ipc_remove,
+ .remove = mtk_adsp_ipc_remove,
};
builtin_platform_driver(mtk_adsp_ipc_driver);
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index 2328ca58bba6..a1ebbe9b73b1 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -78,6 +78,7 @@ struct psci_0_1_function_ids get_psci_0_1_function_ids(void)
static u32 psci_cpu_suspend_feature;
static bool psci_system_reset2_supported;
+static bool psci_system_off2_hibernate_supported;
static inline bool psci_has_ext_power_state(void)
{
@@ -333,6 +334,36 @@ static void psci_sys_poweroff(void)
invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
}
+#ifdef CONFIG_HIBERNATION
+static int psci_sys_hibernate(struct sys_off_data *data)
+{
+ /*
+ * If no hibernate type is specified SYSTEM_OFF2 defaults to selecting
+ * HIBERNATE_OFF.
+ *
+ * There are hypervisors in the wild that do not align with the spec and
+ * reject calls that explicitly provide a hibernate type. For
+ * compatibility with these nonstandard implementations, pass 0 as the
+ * type.
+ */
+ if (system_entering_hibernation())
+ invoke_psci_fn(PSCI_FN_NATIVE(1_3, SYSTEM_OFF2), 0, 0, 0);
+ return NOTIFY_DONE;
+}
+
+static int __init psci_hibernate_init(void)
+{
+ if (psci_system_off2_hibernate_supported) {
+ /* Higher priority than EFI shutdown, but only for hibernate */
+ register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_FIRMWARE + 2,
+ psci_sys_hibernate, NULL);
+ }
+ return 0;
+}
+subsys_initcall(psci_hibernate_init);
+#endif
+
static int psci_features(u32 psci_func_id)
{
return invoke_psci_fn(PSCI_1_0_FN_PSCI_FEATURES,
@@ -364,6 +395,7 @@ static const struct {
PSCI_ID_NATIVE(1_1, SYSTEM_RESET2),
PSCI_ID(1_1, MEM_PROTECT),
PSCI_ID_NATIVE(1_1, MEM_PROTECT_CHECK_RANGE),
+ PSCI_ID_NATIVE(1_3, SYSTEM_OFF2),
};
static int psci_debugfs_read(struct seq_file *s, void *data)
@@ -525,6 +557,18 @@ static void __init psci_init_system_reset2(void)
psci_system_reset2_supported = true;
}
+static void __init psci_init_system_off2(void)
+{
+ int ret;
+
+ ret = psci_features(PSCI_FN_NATIVE(1_3, SYSTEM_OFF2));
+ if (ret < 0)
+ return;
+
+ if (ret & PSCI_1_3_OFF_TYPE_HIBERNATE_OFF)
+ psci_system_off2_hibernate_supported = true;
+}
+
static void __init psci_init_system_suspend(void)
{
int ret;
@@ -655,6 +699,7 @@ static int __init psci_probe(void)
psci_init_cpu_suspend();
psci_init_system_suspend();
psci_init_system_reset2();
+ psci_init_system_off2();
kvm_init_hyp_services();
}
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index 2e4260ba5f79..72bf87ddcd96 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -904,6 +904,32 @@ int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
}
EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg);
+#define QCOM_SCM_CP_APERTURE_CONTEXT_MASK GENMASK(7, 0)
+
+bool qcom_scm_set_gpu_smmu_aperture_is_available(void)
+{
+ return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
+ QCOM_SCM_MP_CP_SMMU_APERTURE_ID);
+}
+EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture_is_available);
+
+int qcom_scm_set_gpu_smmu_aperture(unsigned int context_bank)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_MP,
+ .cmd = QCOM_SCM_MP_CP_SMMU_APERTURE_ID,
+ .arginfo = QCOM_SCM_ARGS(4),
+ .args[0] = 0xffff0000 | FIELD_PREP(QCOM_SCM_CP_APERTURE_CONTEXT_MASK, context_bank),
+ .args[1] = 0xffffffff,
+ .args[2] = 0xffffffff,
+ .args[3] = 0xffffffff,
+ .owner = ARM_SMCCC_OWNER_SIP
+ };
+
+ return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture);
+
int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
{
struct qcom_scm_desc desc = {
@@ -1742,12 +1768,16 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
+ any potential issues with this, only allow validated machines for now.
*/
static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
+ { .compatible = "dell,xps13-9345" },
{ .compatible = "lenovo,flex-5g" },
{ .compatible = "lenovo,thinkpad-t14s" },
{ .compatible = "lenovo,thinkpad-x13s", },
+ { .compatible = "lenovo,yoga-slim7x" },
+ { .compatible = "microsoft,arcata", },
{ .compatible = "microsoft,romulus13", },
{ .compatible = "microsoft,romulus15", },
{ .compatible = "qcom,sc8180x-primus" },
+ { .compatible = "qcom,x1e001de-devkit" },
{ .compatible = "qcom,x1e80100-crd" },
{ .compatible = "qcom,x1e80100-qcp" },
{ }
diff --git a/drivers/firmware/qcom/qcom_scm.h b/drivers/firmware/qcom/qcom_scm.h
index 685b8f59e7a6..e36b2f67607f 100644
--- a/drivers/firmware/qcom/qcom_scm.h
+++ b/drivers/firmware/qcom/qcom_scm.h
@@ -116,6 +116,7 @@ struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void);
#define QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE 0x05
#define QCOM_SCM_MP_VIDEO_VAR 0x08
#define QCOM_SCM_MP_ASSIGN 0x16
+#define QCOM_SCM_MP_CP_SMMU_APERTURE_ID 0x1b
#define QCOM_SCM_MP_SHM_BRIDGE_ENABLE 0x1c
#define QCOM_SCM_MP_SHM_BRIDGE_DELETE 0x1d
#define QCOM_SCM_MP_SHM_BRIDGE_CREATE 0x1e
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index 85c525745b31..d58da3e4500a 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -757,7 +757,7 @@ MODULE_DEVICE_TABLE(acpi, fw_cfg_sysfs_acpi_match);
static struct platform_driver fw_cfg_sysfs_driver = {
.probe = fw_cfg_sysfs_probe,
- .remove_new = fw_cfg_sysfs_remove,
+ .remove = fw_cfg_sysfs_remove,
.driver = {
.name = "fw_cfg",
.of_match_table = fw_cfg_sysfs_mmio_match,
diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
index 18cc34987108..7ecde6921a0a 100644
--- a/drivers/firmware/raspberrypi.c
+++ b/drivers/firmware/raspberrypi.c
@@ -406,7 +406,7 @@ static struct platform_driver rpi_firmware_driver = {
},
.probe = rpi_firmware_probe,
.shutdown = rpi_firmware_shutdown,
- .remove_new = rpi_firmware_remove,
+ .remove = rpi_firmware_remove,
};
module_platform_driver(rpi_firmware_driver);
diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c
index e20cee9c2d32..1ea39a0a76c7 100644
--- a/drivers/firmware/stratix10-rsu.c
+++ b/drivers/firmware/stratix10-rsu.c
@@ -802,7 +802,7 @@ static void stratix10_rsu_remove(struct platform_device *pdev)
static struct platform_driver stratix10_rsu_driver = {
.probe = stratix10_rsu_probe,
- .remove_new = stratix10_rsu_remove,
+ .remove = stratix10_rsu_remove,
.driver = {
.name = "stratix10-rsu",
.dev_groups = rsu_groups,
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index 528f37417aea..c5c78b869561 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -1271,7 +1271,7 @@ static void stratix10_svc_drv_remove(struct platform_device *pdev)
static struct platform_driver stratix10_svc_driver = {
.probe = stratix10_svc_drv_probe,
- .remove_new = stratix10_svc_drv_remove,
+ .remove = stratix10_svc_drv_remove,
.driver = {
.name = "stratix10-svc",
.of_match_table = stratix10_svc_drv_match,
diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c
index a3df782fa687..7c5c03f274b9 100644
--- a/drivers/firmware/sysfb.c
+++ b/drivers/firmware/sysfb.c
@@ -79,6 +79,25 @@ void sysfb_disable(struct device *dev)
}
EXPORT_SYMBOL_GPL(sysfb_disable);
+/**
+ * sysfb_handles_screen_info() - reports if sysfb handles the global screen_info
+ *
+ * Callers can use sysfb_handles_screen_info() to determine whether the Generic
+ * System Framebuffers (sysfb) can handle the global screen_info data structure
+ * or not. Drivers might need this information to know if they have to setup the
+ * system framebuffer, or if they have to delegate this action to sysfb instead.
+ *
+ * Returns:
+ * True if sysfb handles the global screen_info data structure.
+ */
+bool sysfb_handles_screen_info(void)
+{
+ const struct screen_info *si = &screen_info;
+
+ return !!screen_info_video_type(si);
+}
+EXPORT_SYMBOL_GPL(sysfb_handles_screen_info);
+
#if defined(CONFIG_PCI)
static bool sysfb_pci_dev_is_enabled(struct pci_dev *pdev)
{
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
index 2bee6e918f81..c3a1dc344961 100644
--- a/drivers/firmware/tegra/bpmp.c
+++ b/drivers/firmware/tegra/bpmp.c
@@ -3,7 +3,6 @@
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*/
-#include <linux/cleanup.h>
#include <linux/clk/tegra.h>
#include <linux/genalloc.h>
#include <linux/mailbox_client.h>
@@ -35,24 +34,29 @@ channel_to_ops(struct tegra_bpmp_channel *channel)
struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
{
- struct device_node *np __free(device_node);
struct platform_device *pdev;
struct tegra_bpmp *bpmp;
+ struct device_node *np;
np = of_parse_phandle(dev->of_node, "nvidia,bpmp", 0);
if (!np)
return ERR_PTR(-ENOENT);
pdev = of_find_device_by_node(np);
- if (!pdev)
- return ERR_PTR(-ENODEV);
+ if (!pdev) {
+ bpmp = ERR_PTR(-ENODEV);
+ goto put;
+ }
bpmp = platform_get_drvdata(pdev);
if (!bpmp) {
+ bpmp = ERR_PTR(-EPROBE_DEFER);
put_device(&pdev->dev);
- return ERR_PTR(-EPROBE_DEFER);
+ goto put;
}
+put:
+ of_node_put(np);
return bpmp;
}
EXPORT_SYMBOL_GPL(tegra_bpmp_get);
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 160968301b1f..806a975fff22 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -2,13 +2,14 @@
/*
* Texas Instruments System Control Interface Protocol Driver
*
- * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com/
* Nishanth Menon
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/bitmap.h>
+#include <linux/cpu.h>
#include <linux/debugfs.h>
#include <linux/export.h>
#include <linux/io.h>
@@ -19,11 +20,14 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
#include <linux/property.h>
#include <linux/semaphore.h>
#include <linux/slab.h>
#include <linux/soc/ti/ti-msgmgr.h>
#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/suspend.h>
+#include <linux/sys_soc.h>
#include <linux/reboot.h>
#include "ti_sci.h"
@@ -98,6 +102,7 @@ struct ti_sci_desc {
* @minfo: Message info
* @node: list head
* @host_id: Host ID
+ * @fw_caps: FW/SoC low power capabilities
* @users: Number of users of this instance
*/
struct ti_sci_info {
@@ -114,6 +119,7 @@ struct ti_sci_info {
struct ti_sci_xfers_info minfo;
struct list_head node;
u8 host_id;
+ u64 fw_caps;
/* protected by ti_sci_list_mutex */
int users;
};
@@ -1651,6 +1657,364 @@ fail:
return ret;
}
+/**
+ * ti_sci_cmd_prepare_sleep() - Prepare system for system suspend
+ * @handle: pointer to TI SCI handle
+ * @mode: Device identifier
+ * @ctx_lo: Low part of address for context save
+ * @ctx_hi: High part of address for context save
+ * @debug_flags: Debug flags to pass to firmware
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode,
+ u32 ctx_lo, u32 ctx_hi, u32 debug_flags)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_prepare_sleep *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PREPARE_SLEEP,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+
+ req = (struct ti_sci_msg_req_prepare_sleep *)xfer->xfer_buf;
+ req->mode = mode;
+ req->ctx_lo = ctx_lo;
+ req->ctx_hi = ctx_hi;
+ req->debug_flags = debug_flags;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ dev_err(dev, "Failed to prepare sleep\n");
+ ret = -ENODEV;
+ }
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_msg_cmd_query_fw_caps() - Get the FW/SoC capabilities
+ * @handle: Pointer to TI SCI handle
+ * @fw_caps: Each bit in fw_caps indicating one FW/SOC capability
+ *
+ * Check if the firmware supports any optional low power modes.
+ * Old revisions of TIFS (< 08.04) will NACK the request which results in
+ * -ENODEV being returned.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_msg_cmd_query_fw_caps(const struct ti_sci_handle *handle,
+ u64 *fw_caps)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_msg_resp_query_fw_caps *resp;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_FW_CAPS,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(struct ti_sci_msg_hdr),
+ sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_query_fw_caps *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ dev_err(dev, "Failed to get capabilities\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ if (fw_caps)
+ *fw_caps = resp->fw_caps;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_set_io_isolation() - Enable IO isolation in LPM
+ * @handle: Pointer to TI SCI handle
+ * @state: The desired state of the IO isolation
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_set_io_isolation(const struct ti_sci_handle *handle,
+ u8 state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_io_isolation *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_IO_ISOLATION,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_io_isolation *)xfer->xfer_buf;
+ req->state = state;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ dev_err(dev, "Failed to set IO isolation\n");
+ ret = -ENODEV;
+ }
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_msg_cmd_lpm_wake_reason() - Get the wakeup source from LPM
+ * @handle: Pointer to TI SCI handle
+ * @source: The wakeup source that woke the SoC from LPM
+ * @timestamp: Timestamp of the wakeup event
+ * @pin: The pin that has triggered wake up
+ * @mode: The last entered low power mode
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_msg_cmd_lpm_wake_reason(const struct ti_sci_handle *handle,
+ u32 *source, u64 *timestamp, u8 *pin, u8 *mode)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_msg_resp_lpm_wake_reason *resp;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_WAKE_REASON,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(struct ti_sci_msg_hdr),
+ sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_lpm_wake_reason *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ dev_err(dev, "Failed to get wake reason\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ if (source)
+ *source = resp->wake_source;
+ if (timestamp)
+ *timestamp = resp->wake_timestamp;
+ if (pin)
+ *pin = resp->wake_pin;
+ if (mode)
+ *mode = resp->mode;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_set_device_constraint() - Set LPM constraint on behalf of a device
+ * @handle: pointer to TI SCI handle
+ * @id: Device identifier
+ * @state: The desired state of device constraint: set or clear
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_set_device_constraint(const struct ti_sci_handle *handle,
+ u32 id, u8 state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_lpm_set_device_constraint *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_SET_DEVICE_CONSTRAINT,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_lpm_set_device_constraint *)xfer->xfer_buf;
+ req->id = id;
+ req->state = state;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ dev_err(dev, "Failed to set device constraint\n");
+ ret = -ENODEV;
+ }
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_set_latency_constraint() - Set LPM resume latency constraint
+ * @handle: pointer to TI SCI handle
+ * @latency: maximum acceptable latency (in ms) to wake up from LPM
+ * @state: The desired state of latency constraint: set or clear
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_set_latency_constraint(const struct ti_sci_handle *handle,
+ u16 latency, u8 state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_lpm_set_latency_constraint *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_SET_LATENCY_CONSTRAINT,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_lpm_set_latency_constraint *)xfer->xfer_buf;
+ req->latency = latency;
+ req->state = state;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ dev_err(dev, "Failed to set device constraint\n");
+ ret = -ENODEV;
+ }
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
{
struct ti_sci_info *info;
@@ -2793,6 +3157,7 @@ static void ti_sci_setup_ops(struct ti_sci_info *info)
struct ti_sci_core_ops *core_ops = &ops->core_ops;
struct ti_sci_dev_ops *dops = &ops->dev_ops;
struct ti_sci_clk_ops *cops = &ops->clk_ops;
+ struct ti_sci_pm_ops *pmops = &ops->pm_ops;
struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
@@ -2832,6 +3197,13 @@ static void ti_sci_setup_ops(struct ti_sci_info *info)
cops->set_freq = ti_sci_cmd_clk_set_freq;
cops->get_freq = ti_sci_cmd_clk_get_freq;
+ if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) {
+ pr_debug("detected DM managed LPM in fw_caps\n");
+ pmops->lpm_wake_reason = ti_sci_msg_cmd_lpm_wake_reason;
+ pmops->set_device_constraint = ti_sci_cmd_set_device_constraint;
+ pmops->set_latency_constraint = ti_sci_cmd_set_latency_constraint;
+ }
+
rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
rm_core_ops->get_range_from_shost =
ti_sci_cmd_get_resource_range_from_shost;
@@ -3262,6 +3634,111 @@ static int tisci_reboot_handler(struct sys_off_data *data)
return NOTIFY_BAD;
}
+static int ti_sci_prepare_system_suspend(struct ti_sci_info *info)
+{
+ /*
+ * Map and validate the target Linux suspend state to TISCI LPM.
+ * Default is to let Device Manager select the low power mode.
+ */
+ switch (pm_suspend_target_state) {
+ case PM_SUSPEND_MEM:
+ if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) {
+ /*
+ * For the DM_MANAGED mode the context is reserved for
+ * internal use and can be 0
+ */
+ return ti_sci_cmd_prepare_sleep(&info->handle,
+ TISCI_MSG_VALUE_SLEEP_MODE_DM_MANAGED,
+ 0, 0, 0);
+ } else {
+ /* DM Managed is not supported by the firmware. */
+ dev_err(info->dev, "Suspend to memory is not supported by the firmware\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ /*
+ * Do not fail if we don't have action to take for a
+ * specific suspend mode.
+ */
+ return 0;
+ }
+}
+
+static int __maybe_unused ti_sci_suspend(struct device *dev)
+{
+ struct ti_sci_info *info = dev_get_drvdata(dev);
+ struct device *cpu_dev, *cpu_dev_max = NULL;
+ s32 val, cpu_lat = 0;
+ int i, ret;
+
+ if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) {
+ for_each_possible_cpu(i) {
+ cpu_dev = get_cpu_device(i);
+ val = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_RESUME_LATENCY);
+ if (val != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) {
+ cpu_lat = max(cpu_lat, val);
+ cpu_dev_max = cpu_dev;
+ }
+ }
+ if (cpu_dev_max) {
+ dev_dbg(cpu_dev_max, "%s: sending max CPU latency=%u\n", __func__, cpu_lat);
+ ret = ti_sci_cmd_set_latency_constraint(&info->handle,
+ cpu_lat, TISCI_MSG_CONSTRAINT_SET);
+ if (ret)
+ return ret;
+ }
+ }
+
+ ret = ti_sci_prepare_system_suspend(info);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int __maybe_unused ti_sci_suspend_noirq(struct device *dev)
+{
+ struct ti_sci_info *info = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_ENABLE);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int __maybe_unused ti_sci_resume_noirq(struct device *dev)
+{
+ struct ti_sci_info *info = dev_get_drvdata(dev);
+ int ret = 0;
+ u32 source;
+ u64 time;
+ u8 pin;
+ u8 mode;
+
+ ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_DISABLE);
+ if (ret)
+ return ret;
+
+ ret = ti_sci_msg_cmd_lpm_wake_reason(&info->handle, &source, &time, &pin, &mode);
+ /* Do not fail to resume on error as the wake reason is not critical */
+ if (!ret)
+ dev_info(dev, "ti_sci: wakeup source:0x%x, pin:0x%x, mode:0x%x\n",
+ source, pin, mode);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ti_sci_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = ti_sci_suspend,
+ .suspend_noirq = ti_sci_suspend_noirq,
+ .resume_noirq = ti_sci_resume_noirq,
+#endif
+};
+
/* Description for K2G */
static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
.default_host_id = 2,
@@ -3390,6 +3867,13 @@ static int ti_sci_probe(struct platform_device *pdev)
goto out;
}
+ ti_sci_msg_cmd_query_fw_caps(&info->handle, &info->fw_caps);
+ dev_dbg(dev, "Detected firmware capabilities: %s%s%s\n",
+ info->fw_caps & MSG_FLAG_CAPS_GENERIC ? "Generic" : "",
+ info->fw_caps & MSG_FLAG_CAPS_LPM_PARTIAL_IO ? " Partial-IO" : "",
+ info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED ? " DM-Managed" : ""
+ );
+
ti_sci_setup_ops(info);
ret = devm_register_restart_handler(dev, tisci_reboot_handler, info);
@@ -3421,8 +3905,9 @@ static struct platform_driver ti_sci_driver = {
.probe = ti_sci_probe,
.driver = {
.name = "ti-sci",
- .of_match_table = of_match_ptr(ti_sci_of_match),
+ .of_match_table = ti_sci_of_match,
.suppress_bind_attrs = true,
+ .pm = &ti_sci_pm_ops,
},
};
module_platform_driver(ti_sci_driver);
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h
index 5846c60220f5..053387d7baa0 100644
--- a/drivers/firmware/ti_sci.h
+++ b/drivers/firmware/ti_sci.h
@@ -6,7 +6,7 @@
* The system works in a message response protocol
* See: https://software-dl.ti.com/tisci/esd/latest/index.html for details
*
- * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __TI_SCI_H
@@ -19,6 +19,7 @@
#define TI_SCI_MSG_WAKE_REASON 0x0003
#define TI_SCI_MSG_GOODBYE 0x0004
#define TI_SCI_MSG_SYS_RESET 0x0005
+#define TI_SCI_MSG_QUERY_FW_CAPS 0x0022
/* Device requests */
#define TI_SCI_MSG_SET_DEVICE_STATE 0x0200
@@ -35,6 +36,13 @@
#define TI_SCI_MSG_QUERY_CLOCK_FREQ 0x010d
#define TI_SCI_MSG_GET_CLOCK_FREQ 0x010e
+/* Low Power Mode Requests */
+#define TI_SCI_MSG_PREPARE_SLEEP 0x0300
+#define TI_SCI_MSG_LPM_WAKE_REASON 0x0306
+#define TI_SCI_MSG_SET_IO_ISOLATION 0x0307
+#define TI_SCI_MSG_LPM_SET_DEVICE_CONSTRAINT 0x0309
+#define TI_SCI_MSG_LPM_SET_LATENCY_CONSTRAINT 0x030A
+
/* Resource Management Requests */
#define TI_SCI_MSG_GET_RESOURCE_RANGE 0x1500
@@ -133,6 +141,27 @@ struct ti_sci_msg_req_reboot {
} __packed;
/**
+ * struct ti_sci_msg_resp_query_fw_caps - Response for query firmware caps
+ * @hdr: Generic header
+ * @fw_caps: Each bit in fw_caps indicating one FW/SOC capability
+ * MSG_FLAG_CAPS_GENERIC: Generic capability (LPM not supported)
+ * MSG_FLAG_CAPS_LPM_PARTIAL_IO: Partial IO in LPM
+ * MSG_FLAG_CAPS_LPM_DM_MANAGED: LPM can be managed by DM
+ *
+ * Response to a generic message with message type TI_SCI_MSG_QUERY_FW_CAPS
+ * providing currently available SOC/firmware capabilities. SoC that don't
+ * support low power modes return only MSG_FLAG_CAPS_GENERIC capability.
+ */
+struct ti_sci_msg_resp_query_fw_caps {
+ struct ti_sci_msg_hdr hdr;
+#define MSG_FLAG_CAPS_GENERIC TI_SCI_MSG_FLAG(0)
+#define MSG_FLAG_CAPS_LPM_PARTIAL_IO TI_SCI_MSG_FLAG(4)
+#define MSG_FLAG_CAPS_LPM_DM_MANAGED TI_SCI_MSG_FLAG(5)
+#define MSG_MASK_CAPS_LPM GENMASK_ULL(4, 1)
+ u64 fw_caps;
+} __packed;
+
+/**
* struct ti_sci_msg_req_set_device_state - Set the desired state of the device
* @hdr: Generic header
* @id: Indicates which device to modify
@@ -545,6 +574,118 @@ struct ti_sci_msg_resp_get_clock_freq {
u64 freq_hz;
} __packed;
+/**
+ * struct tisci_msg_req_prepare_sleep - Request for TISCI_MSG_PREPARE_SLEEP.
+ *
+ * @hdr TISCI header to provide ACK/NAK flags to the host.
+ * @mode Low power mode to enter.
+ * @ctx_lo Low 32-bits of physical pointer to address to use for context save.
+ * @ctx_hi High 32-bits of physical pointer to address to use for context save.
+ * @debug_flags Flags that can be set to halt the sequence during suspend or
+ * resume to allow JTAG connection and debug.
+ *
+ * This message is used as the first step of entering a low power mode. It
+ * allows configurable information, including which state to enter to be
+ * easily shared from the application, as this is a non-secure message and
+ * therefore can be sent by anyone.
+ */
+struct ti_sci_msg_req_prepare_sleep {
+ struct ti_sci_msg_hdr hdr;
+
+#define TISCI_MSG_VALUE_SLEEP_MODE_DM_MANAGED 0xfd
+ u8 mode;
+ u32 ctx_lo;
+ u32 ctx_hi;
+ u32 debug_flags;
+} __packed;
+
+/**
+ * struct tisci_msg_set_io_isolation_req - Request for TI_SCI_MSG_SET_IO_ISOLATION.
+ *
+ * @hdr: Generic header
+ * @state: The deseared state of the IO isolation.
+ *
+ * This message is used to enable/disable IO isolation for low power modes.
+ * Response is generic ACK / NACK message.
+ */
+struct ti_sci_msg_req_set_io_isolation {
+ struct ti_sci_msg_hdr hdr;
+ u8 state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_lpm_wake_reason - Response for TI_SCI_MSG_LPM_WAKE_REASON.
+ *
+ * @hdr: Generic header.
+ * @wake_source: The wake up source that woke soc from LPM.
+ * @wake_timestamp: Timestamp at which soc woke.
+ * @wake_pin: The pin that has triggered wake up.
+ * @mode: The last entered low power mode.
+ * @rsvd: Reserved for future use.
+ *
+ * Response to a generic message with message type TI_SCI_MSG_LPM_WAKE_REASON,
+ * used to query the wake up source, pin and entered low power mode.
+ */
+struct ti_sci_msg_resp_lpm_wake_reason {
+ struct ti_sci_msg_hdr hdr;
+ u32 wake_source;
+ u64 wake_timestamp;
+ u8 wake_pin;
+ u8 mode;
+ u32 rsvd[2];
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_lpm_set_device_constraint - Request for
+ * TISCI_MSG_LPM_SET_DEVICE_CONSTRAINT.
+ *
+ * @hdr: TISCI header to provide ACK/NAK flags to the host.
+ * @id: Device ID of device whose constraint has to be modified.
+ * @state: The desired state of device constraint: set or clear.
+ * @rsvd: Reserved for future use.
+ *
+ * This message is used by host to set constraint on the device. This can be
+ * sent anytime after boot before prepare sleep message. Any device can set a
+ * constraint on the low power mode that the SoC can enter. It allows
+ * configurable information to be easily shared from the application, as this
+ * is a non-secure message and therefore can be sent by anyone. By setting a
+ * constraint, the device ensures that it will not be powered off or reset in
+ * the selected mode. Note: Access Restriction: Exclusivity flag of Device will
+ * be honored. If some other host already has constraint on this device ID,
+ * NACK will be returned.
+ */
+struct ti_sci_msg_req_lpm_set_device_constraint {
+ struct ti_sci_msg_hdr hdr;
+ u32 id;
+ u8 state;
+ u32 rsvd[2];
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_lpm_set_latency_constraint - Request for
+ * TISCI_MSG_LPM_SET_LATENCY_CONSTRAINT.
+ *
+ * @hdr: TISCI header to provide ACK/NAK flags to the host.
+ * @wkup_latency: The maximum acceptable latency to wake up from low power mode
+ * in milliseconds. The deeper the state, the higher the latency.
+ * @state: The desired state of wakeup latency constraint: set or clear.
+ * @rsvd: Reserved for future use.
+ *
+ * This message is used by host to set wakeup latency from low power mode. This can
+ * be sent anytime after boot before prepare sleep message, and can be sent after
+ * current low power mode is exited. Any device can set a constraint on the low power
+ * mode that the SoC can enter. It allows configurable information to be easily shared
+ * from the application, as this is a non-secure message and therefore can be sent by
+ * anyone. By setting a wakeup latency constraint, the host ensures that the resume time
+ * from selected low power mode will be less than the constraint value.
+ */
+struct ti_sci_msg_req_lpm_set_latency_constraint {
+ struct ti_sci_msg_hdr hdr;
+ u16 latency;
+ u8 state;
+ u32 rsvd;
+} __packed;
+
#define TI_SCI_IRQ_SECONDARY_HOST_INVALID 0xff
/**
diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c
index f3bc0d427825..47fe6261f5a3 100644
--- a/drivers/firmware/turris-mox-rwtm.c
+++ b/drivers/firmware/turris-mox-rwtm.c
@@ -61,6 +61,27 @@ enum mbox_cmd {
MBOX_CMD_OTP_WRITE = 8,
};
+/**
+ * struct mox_rwtm - driver private data structure
+ * @mbox_client: rWTM mailbox client
+ * @mbox: rWTM mailbox channel
+ * @hwrng: RNG driver structure
+ * @reply: last mailbox reply, filled in receive callback
+ * @buf: DMA buffer
+ * @buf_phys: physical address of the DMA buffer
+ * @busy: mutex to protect mailbox command execution
+ * @cmd_done: command done completion
+ * @has_board_info: whether board information is present
+ * @serial_number: serial number of the device
+ * @board_version: board version / revision of the device
+ * @ram_size: RAM size of the device
+ * @mac_address1: first MAC address of the device
+ * @mac_address2: second MAC address of the device
+ * @has_pubkey: whether board ECDSA public key is present
+ * @pubkey: board ECDSA public key
+ * @last_sig: last ECDSA signature generated with board ECDSA private key
+ * @last_sig_done: whether the last ECDSA signing is complete
+ */
struct mox_rwtm {
struct mbox_client mbox_client;
struct mbox_chan *mbox;
@@ -74,13 +95,11 @@ struct mox_rwtm {
struct mutex busy;
struct completion cmd_done;
- /* board information */
bool has_board_info;
u64 serial_number;
int board_version, ram_size;
u8 mac_address1[ETH_ALEN], mac_address2[ETH_ALEN];
- /* public key burned in eFuse */
bool has_pubkey;
u8 pubkey[135];
diff --git a/drivers/firmware/xilinx/zynqmp-debug.c b/drivers/firmware/xilinx/zynqmp-debug.c
index 8528850af889..22853ae0efdf 100644
--- a/drivers/firmware/xilinx/zynqmp-debug.c
+++ b/drivers/firmware/xilinx/zynqmp-debug.c
@@ -31,13 +31,51 @@ static char debugfs_buf[PAGE_SIZE];
#define PM_API(id) {id, #id, strlen(#id)}
static struct pm_api_info pm_api_list[] = {
+ PM_API(PM_FORCE_POWERDOWN),
+ PM_API(PM_REQUEST_WAKEUP),
+ PM_API(PM_SYSTEM_SHUTDOWN),
+ PM_API(PM_REQUEST_NODE),
+ PM_API(PM_RELEASE_NODE),
+ PM_API(PM_SET_REQUIREMENT),
PM_API(PM_GET_API_VERSION),
+ PM_API(PM_REGISTER_NOTIFIER),
+ PM_API(PM_RESET_ASSERT),
+ PM_API(PM_RESET_GET_STATUS),
+ PM_API(PM_GET_CHIPID),
+ PM_API(PM_PINCTRL_SET_FUNCTION),
+ PM_API(PM_PINCTRL_CONFIG_PARAM_GET),
+ PM_API(PM_PINCTRL_CONFIG_PARAM_SET),
+ PM_API(PM_IOCTL),
+ PM_API(PM_CLOCK_ENABLE),
+ PM_API(PM_CLOCK_DISABLE),
+ PM_API(PM_CLOCK_GETSTATE),
+ PM_API(PM_CLOCK_SETDIVIDER),
+ PM_API(PM_CLOCK_GETDIVIDER),
+ PM_API(PM_CLOCK_SETPARENT),
+ PM_API(PM_CLOCK_GETPARENT),
PM_API(PM_QUERY_DATA),
};
static struct dentry *firmware_debugfs_root;
/**
+ * zynqmp_pm_ioctl - PM IOCTL for device control and configs
+ * @node: Node ID of the device
+ * @ioctl: ID of the requested IOCTL
+ * @arg1: Argument 1 of requested IOCTL call
+ * @arg2: Argument 2 of requested IOCTL call
+ * @arg3: Argument 3 of requested IOCTL call
+ * @out: Returned output value
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_ioctl(const u32 node, const u32 ioctl, const u32 arg1,
+ const u32 arg2, const u32 arg3, u32 *out)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, out, 5, node, ioctl, arg1, arg2, arg3);
+}
+
+/**
* zynqmp_pm_argument_value() - Extract argument value from a PM-API request
* @arg: Entered PM-API argument in string format
*
@@ -95,6 +133,128 @@ static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret)
sprintf(debugfs_buf, "PM-API Version = %d.%d\n",
pm_api_version >> 16, pm_api_version & 0xffff);
break;
+ case PM_FORCE_POWERDOWN:
+ ret = zynqmp_pm_force_pwrdwn(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_REQUEST_ACK_NO);
+ break;
+ case PM_REQUEST_WAKEUP:
+ ret = zynqmp_pm_request_wake(pm_api_arg[0],
+ pm_api_arg[1], pm_api_arg[2],
+ pm_api_arg[3] ? pm_api_arg[3] :
+ ZYNQMP_PM_REQUEST_ACK_NO);
+ break;
+ case PM_SYSTEM_SHUTDOWN:
+ ret = zynqmp_pm_system_shutdown(pm_api_arg[0], pm_api_arg[1]);
+ break;
+ case PM_REQUEST_NODE:
+ ret = zynqmp_pm_request_node(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_CAPABILITY_ACCESS,
+ pm_api_arg[2] ? pm_api_arg[2] : 0,
+ pm_api_arg[3] ? pm_api_arg[3] :
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+ break;
+ case PM_RELEASE_NODE:
+ ret = zynqmp_pm_release_node(pm_api_arg[0]);
+ break;
+ case PM_SET_REQUIREMENT:
+ ret = zynqmp_pm_set_requirement(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_CAPABILITY_CONTEXT,
+ pm_api_arg[2] ?
+ pm_api_arg[2] : 0,
+ pm_api_arg[3] ? pm_api_arg[3] :
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+ break;
+ case PM_REGISTER_NOTIFIER:
+ ret = zynqmp_pm_register_notifier(pm_api_arg[0],
+ pm_api_arg[1] ?
+ pm_api_arg[1] : 0,
+ pm_api_arg[2] ?
+ pm_api_arg[2] : 0,
+ pm_api_arg[3] ?
+ pm_api_arg[3] : 0);
+ break;
+ case PM_RESET_ASSERT:
+ ret = zynqmp_pm_reset_assert(pm_api_arg[0], pm_api_arg[1]);
+ break;
+ case PM_RESET_GET_STATUS:
+ ret = zynqmp_pm_reset_get_status(pm_api_arg[0], &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf, "Reset status: %u\n",
+ pm_api_ret[0]);
+ break;
+ case PM_GET_CHIPID:
+ ret = zynqmp_pm_get_chipid(&pm_api_ret[0], &pm_api_ret[1]);
+ if (!ret)
+ sprintf(debugfs_buf, "Idcode: %#x, Version:%#x\n",
+ pm_api_ret[0], pm_api_ret[1]);
+ break;
+ case PM_PINCTRL_SET_FUNCTION:
+ ret = zynqmp_pm_pinctrl_set_function(pm_api_arg[0],
+ pm_api_arg[1]);
+ break;
+ case PM_PINCTRL_CONFIG_PARAM_GET:
+ ret = zynqmp_pm_pinctrl_get_config(pm_api_arg[0], pm_api_arg[1],
+ &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf,
+ "Pin: %llu, Param: %llu, Value: %u\n",
+ pm_api_arg[0], pm_api_arg[1],
+ pm_api_ret[0]);
+ break;
+ case PM_PINCTRL_CONFIG_PARAM_SET:
+ ret = zynqmp_pm_pinctrl_set_config(pm_api_arg[0],
+ pm_api_arg[1],
+ pm_api_arg[2]);
+ break;
+ case PM_IOCTL:
+ ret = zynqmp_pm_ioctl(pm_api_arg[0], pm_api_arg[1],
+ pm_api_arg[2], pm_api_arg[3],
+ pm_api_arg[4], &pm_api_ret[0]);
+ if (!ret && (pm_api_arg[1] == IOCTL_GET_RPU_OPER_MODE ||
+ pm_api_arg[1] == IOCTL_GET_PLL_FRAC_MODE ||
+ pm_api_arg[1] == IOCTL_GET_PLL_FRAC_DATA ||
+ pm_api_arg[1] == IOCTL_READ_GGS ||
+ pm_api_arg[1] == IOCTL_READ_PGGS ||
+ pm_api_arg[1] == IOCTL_READ_REG))
+ sprintf(debugfs_buf, "IOCTL return value: %u\n",
+ pm_api_ret[1]);
+ if (!ret && pm_api_arg[1] == IOCTL_GET_QOS)
+ sprintf(debugfs_buf, "Default QoS: %u\nCurrent QoS: %u\n",
+ pm_api_ret[1], pm_api_ret[2]);
+ break;
+ case PM_CLOCK_ENABLE:
+ ret = zynqmp_pm_clock_enable(pm_api_arg[0]);
+ break;
+ case PM_CLOCK_DISABLE:
+ ret = zynqmp_pm_clock_disable(pm_api_arg[0]);
+ break;
+ case PM_CLOCK_GETSTATE:
+ ret = zynqmp_pm_clock_getstate(pm_api_arg[0], &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf, "Clock state: %u\n",
+ pm_api_ret[0]);
+ break;
+ case PM_CLOCK_SETDIVIDER:
+ ret = zynqmp_pm_clock_setdivider(pm_api_arg[0], pm_api_arg[1]);
+ break;
+ case PM_CLOCK_GETDIVIDER:
+ ret = zynqmp_pm_clock_getdivider(pm_api_arg[0], &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf, "Divider Value: %d\n",
+ pm_api_ret[0]);
+ break;
+ case PM_CLOCK_SETPARENT:
+ ret = zynqmp_pm_clock_setparent(pm_api_arg[0], pm_api_arg[1]);
+ break;
+ case PM_CLOCK_GETPARENT:
+ ret = zynqmp_pm_clock_getparent(pm_api_arg[0], &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf,
+ "Clock parent Index: %u\n", pm_api_ret[0]);
+ break;
case PM_QUERY_DATA:
qdata.qid = pm_api_arg[0];
qdata.arg1 = pm_api_arg[1];
@@ -150,7 +310,7 @@ static ssize_t zynqmp_pm_debugfs_api_write(struct file *file,
char *kern_buff, *tmp_buff;
char *pm_api_req;
u32 pm_id = 0;
- u64 pm_api_arg[4] = {0, 0, 0, 0};
+ u64 pm_api_arg[5] = {0, 0, 0, 0, 0};
/* Return values from PM APIs calls */
u32 pm_api_ret[4] = {0, 0, 0, 0};
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index add8acf66a9c..720fa8b5d8e9 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -3,7 +3,7 @@
* Xilinx Zynq MPSoC Firmware layer
*
* Copyright (C) 2014-2022 Xilinx, Inc.
- * Copyright (C) 2022 - 2023, Advanced Micro Devices, Inc.
+ * Copyright (C) 2022 - 2024, Advanced Micro Devices, Inc.
*
* Michal Simek <[email protected]>
* Davorin Mista <[email protected]>
@@ -46,6 +46,7 @@ static DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER);
static u32 ioctl_features[FEATURE_PAYLOAD_SIZE];
static u32 query_features[FEATURE_PAYLOAD_SIZE];
+static u32 sip_svc_version;
static struct platform_device *em_dev;
/**
@@ -151,6 +152,9 @@ static noinline int do_fw_call_smc(u32 *ret_payload, u32 num_args, ...)
ret_payload[1] = upper_32_bits(res.a0);
ret_payload[2] = lower_32_bits(res.a1);
ret_payload[3] = upper_32_bits(res.a1);
+ ret_payload[4] = lower_32_bits(res.a2);
+ ret_payload[5] = upper_32_bits(res.a2);
+ ret_payload[6] = lower_32_bits(res.a3);
}
return zynqmp_pm_ret_code((enum pm_ret_status)res.a0);
@@ -191,6 +195,9 @@ static noinline int do_fw_call_hvc(u32 *ret_payload, u32 num_args, ...)
ret_payload[1] = upper_32_bits(res.a0);
ret_payload[2] = lower_32_bits(res.a1);
ret_payload[3] = upper_32_bits(res.a1);
+ ret_payload[4] = lower_32_bits(res.a2);
+ ret_payload[5] = upper_32_bits(res.a2);
+ ret_payload[6] = lower_32_bits(res.a3);
}
return zynqmp_pm_ret_code((enum pm_ret_status)res.a0);
@@ -218,11 +225,14 @@ static int __do_feature_check_call(const u32 api_id, u32 *ret_payload)
* Feature check of TF-A APIs is done in the TF-A layer and it expects for
* MODULE_ID_MASK bits of SMC's arg[0] to be the same as PM_MODULE_ID.
*/
- if (module_id == TF_A_MODULE_ID)
+ if (module_id == TF_A_MODULE_ID) {
module_id = PM_MODULE_ID;
+ smc_arg[1] = api_id;
+ } else {
+ smc_arg[1] = (api_id & API_ID_MASK);
+ }
smc_arg[0] = PM_SIP_SVC | FIELD_PREP(MODULE_ID_MASK, module_id) | feature_check_api_id;
- smc_arg[1] = (api_id & API_ID_MASK);
ret = do_fw_call(ret_payload, 2, smc_arg[0], smc_arg[1]);
if (ret)
@@ -332,6 +342,70 @@ int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id)
EXPORT_SYMBOL_GPL(zynqmp_pm_is_function_supported);
/**
+ * zynqmp_pm_invoke_fw_fn() - Invoke the system-level platform management layer
+ * caller function depending on the configuration
+ * @pm_api_id: Requested PM-API call
+ * @ret_payload: Returned value array
+ * @num_args: Number of arguments to requested PM-API call
+ *
+ * Invoke platform management function for SMC or HVC call, depending on
+ * configuration.
+ * Following SMC Calling Convention (SMCCC) for SMC64:
+ * Pm Function Identifier,
+ * PM_SIP_SVC + PASS_THROUGH_FW_CMD_ID =
+ * ((SMC_TYPE_FAST << FUNCID_TYPE_SHIFT)
+ * ((SMC_64) << FUNCID_CC_SHIFT)
+ * ((SIP_START) << FUNCID_OEN_SHIFT)
+ * (PASS_THROUGH_FW_CMD_ID))
+ *
+ * PM_SIP_SVC - Registered ZynqMP SIP Service Call.
+ * PASS_THROUGH_FW_CMD_ID - Fixed SiP SVC call ID for FW specific calls.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_invoke_fw_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...)
+{
+ /*
+ * Added SIP service call Function Identifier
+ * Make sure to stay in x0 register
+ */
+ u64 smc_arg[SMC_ARG_CNT_64];
+ int ret, i;
+ va_list arg_list;
+ u32 args[SMC_ARG_CNT_32] = {0};
+ u32 module_id;
+
+ if (num_args > SMC_ARG_CNT_32)
+ return -EINVAL;
+
+ va_start(arg_list, num_args);
+
+ /* Check if feature is supported or not */
+ ret = zynqmp_pm_feature(pm_api_id);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < num_args; i++)
+ args[i] = va_arg(arg_list, u32);
+
+ va_end(arg_list);
+
+ module_id = FIELD_GET(PLM_MODULE_ID_MASK, pm_api_id);
+
+ if (module_id == 0)
+ module_id = XPM_MODULE_ID;
+
+ smc_arg[0] = PM_SIP_SVC | PASS_THROUGH_FW_CMD_ID;
+ smc_arg[1] = ((u64)args[0] << 32U) | FIELD_PREP(PLM_MODULE_ID_MASK, module_id) |
+ (pm_api_id & API_ID_MASK);
+ for (i = 1; i < (SMC_ARG_CNT_64 - 1); i++)
+ smc_arg[i + 1] = ((u64)args[(i * 2)] << 32U) | args[(i * 2) - 1];
+
+ return do_fw_call(ret_payload, 8, smc_arg[0], smc_arg[1], smc_arg[2], smc_arg[3],
+ smc_arg[4], smc_arg[5], smc_arg[6], smc_arg[7]);
+}
+
+/**
* zynqmp_pm_invoke_fn() - Invoke the system-level platform management layer
* caller function depending on the configuration
* @pm_api_id: Requested PM-API call
@@ -489,6 +563,35 @@ int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily)
EXPORT_SYMBOL_GPL(zynqmp_pm_get_family_info);
/**
+ * zynqmp_pm_get_sip_svc_version() - Get SiP service call version
+ * @version: Returned version value
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_get_sip_svc_version(u32 *version)
+{
+ struct arm_smccc_res res;
+ u64 args[SMC_ARG_CNT_64] = {0};
+
+ if (!version)
+ return -EINVAL;
+
+ /* Check if SiP SVC version already verified */
+ if (sip_svc_version > 0) {
+ *version = sip_svc_version;
+ return 0;
+ }
+
+ args[0] = GET_SIP_SVC_VERSION;
+
+ arm_smccc_smc(args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7], &res);
+
+ *version = ((lower_32_bits(res.a0) << 16U) | lower_32_bits(res.a1));
+
+ return zynqmp_pm_ret_code(XST_PM_SUCCESS);
+}
+
+/**
* zynqmp_pm_get_trustzone_version() - Get secure trustzone firmware version
* @version: Returned version value
*
@@ -552,10 +655,34 @@ static int get_set_conduit_method(struct device_node *np)
*/
int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out)
{
- int ret;
+ int ret, i = 0;
+ u32 ret_payload[PAYLOAD_ARG_CNT] = {0};
+
+ if (sip_svc_version >= SIP_SVC_PASSTHROUGH_VERSION) {
+ ret = zynqmp_pm_invoke_fw_fn(PM_QUERY_DATA, ret_payload, 4,
+ qdata.qid, qdata.arg1,
+ qdata.arg2, qdata.arg3);
+ /* To support backward compatibility */
+ if (!ret && !ret_payload[0]) {
+ /*
+ * TF-A passes return status on 0th index but
+ * api to get clock name reads data from 0th
+ * index so pass data at 0th index instead of
+ * return status
+ */
+ if (qdata.qid == PM_QID_CLOCK_GET_NAME ||
+ qdata.qid == PM_QID_PINCTRL_GET_FUNCTION_NAME)
+ i = 1;
+
+ for (; i < PAYLOAD_ARG_CNT; i++, out++)
+ *out = ret_payload[i];
+
+ return ret;
+ }
+ }
- ret = zynqmp_pm_invoke_fn(PM_QUERY_DATA, out, 4, qdata.qid, qdata.arg1, qdata.arg2,
- qdata.arg3);
+ ret = zynqmp_pm_invoke_fn(PM_QUERY_DATA, out, 4, qdata.qid,
+ qdata.arg1, qdata.arg2, qdata.arg3);
/*
* For clock name query, all bytes in SMC response are clock name
@@ -920,7 +1047,7 @@ int zynqmp_pm_set_boot_health_status(u32 value)
*
* Return: Returns status, either success or error+reason
*/
-int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
+int zynqmp_pm_reset_assert(const u32 reset,
const enum zynqmp_pm_reset_action assert_flag)
{
return zynqmp_pm_invoke_fn(PM_RESET_ASSERT, NULL, 2, reset, assert_flag);
@@ -934,7 +1061,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_reset_assert);
*
* Return: Returns status, either success or error+reason
*/
-int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status)
+int zynqmp_pm_reset_get_status(const u32 reset, u32 *status)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -1118,8 +1245,11 @@ int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
if (pm_family_code == ZYNQMP_FAMILY_CODE &&
param == PM_PINCTRL_CONFIG_TRI_STATE) {
ret = zynqmp_pm_feature(PM_PINCTRL_CONFIG_PARAM_SET);
- if (ret < PM_PINCTRL_PARAM_SET_VERSION)
+ if (ret < PM_PINCTRL_PARAM_SET_VERSION) {
+ pr_warn("The requested pinctrl feature is not supported in the current firmware.\n"
+ "Expected firmware version is 2023.1 and above for this feature to work.\r\n");
return -EOPNOTSUPP;
+ }
}
return zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_SET, NULL, 3, pin, param, value);
@@ -1887,6 +2017,11 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
if (ret)
return ret;
+ /* Get SiP SVC version number */
+ ret = zynqmp_pm_get_sip_svc_version(&sip_svc_version);
+ if (ret)
+ return ret;
+
ret = do_feature_check_call(PM_FEATURE_CHECK);
if (ret >= 0 && ((ret & FIRMWARE_VERSION_MASK) >= PM_API_VERSION_1))
feature_check_enabled = true;
@@ -1983,6 +2118,6 @@ static struct platform_driver zynqmp_firmware_driver = {
.dev_groups = zynqmp_firmware_groups,
},
.probe = zynqmp_firmware_probe,
- .remove_new = zynqmp_firmware_remove,
+ .remove = zynqmp_firmware_remove,
};
module_platform_driver(zynqmp_firmware_driver);