aboutsummaryrefslogtreecommitdiff
path: root/drivers/soc/qcom
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/soc/qcom')
-rw-r--r--drivers/soc/qcom/Kconfig6
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/icc-bwmon.c231
-rw-r--r--drivers/soc/qcom/ice.c366
-rw-r--r--drivers/soc/qcom/llcc-qcom.c104
-rw-r--r--drivers/soc/qcom/pmic_glink.c65
-rw-r--r--drivers/soc/qcom/qcom_aoss.c2
-rw-r--r--drivers/soc/qcom/qcom_gsbi.c2
-rw-r--r--drivers/soc/qcom/rmtfs_mem.c3
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c2
-rw-r--r--drivers/soc/qcom/rpmpd.c833
-rw-r--r--drivers/soc/qcom/smd-rpm.c2
-rw-r--r--drivers/soc/qcom/smem.c4
-rw-r--r--drivers/soc/qcom/smsm.c11
-rw-r--r--drivers/soc/qcom/socinfo.c16
15 files changed, 1298 insertions, 350 deletions
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index a8f283086a21..a491718f8064 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -72,7 +72,7 @@ config QCOM_LLCC
config QCOM_KRYO_L2_ACCESSORS
bool
- depends on ARCH_QCOM && ARM64 || COMPILE_TEST
+ depends on (ARCH_QCOM || COMPILE_TEST) && ARM64
config QCOM_MDT_LOADER
tristate
@@ -275,4 +275,8 @@ config QCOM_ICC_BWMON
the fixed bandwidth votes from cpufreq (CPU nodes) thus achieve high
memory throughput even with lower CPU frequencies.
+config QCOM_INLINE_CRYPTO_ENGINE
+ tristate
+ select QCOM_SCM
+
endmenu
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 6e88da899f60..0f43a88b4894 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -32,3 +32,4 @@ obj-$(CONFIG_QCOM_RPMHPD) += rpmhpd.o
obj-$(CONFIG_QCOM_RPMPD) += rpmpd.o
obj-$(CONFIG_QCOM_KRYO_L2_ACCESSORS) += kryo-l2-accessors.o
obj-$(CONFIG_QCOM_ICC_BWMON) += icc-bwmon.o
+obj-$(CONFIG_QCOM_INLINE_CRYPTO_ENGINE) += ice.o
diff --git a/drivers/soc/qcom/icc-bwmon.c b/drivers/soc/qcom/icc-bwmon.c
index d07be3700db6..fd58c5b69897 100644
--- a/drivers/soc/qcom/icc-bwmon.c
+++ b/drivers/soc/qcom/icc-bwmon.c
@@ -34,14 +34,27 @@
/* Internal sampling clock frequency */
#define HW_TIMER_HZ 19200000
-#define BWMON_V4_GLOBAL_IRQ_CLEAR 0x008
-#define BWMON_V4_GLOBAL_IRQ_ENABLE 0x00c
+#define BWMON_V4_GLOBAL_IRQ_CLEAR 0x108
+#define BWMON_V4_GLOBAL_IRQ_ENABLE 0x10c
/*
* All values here and further are matching regmap fields, so without absolute
* register offsets.
*/
#define BWMON_V4_GLOBAL_IRQ_ENABLE_ENABLE BIT(0)
+/*
+ * Starting with SDM845, the BWMON4 register space has changed a bit:
+ * the global registers were jammed into the beginning of the monitor region.
+ * To keep the proper offsets, one would have to map <GLOBAL_BASE 0x200> and
+ * <GLOBAL_BASE+0x100 0x300>, which is straight up wrong.
+ * To facilitate for that, while allowing the older, arguably more proper
+ * implementations to work, offset the global registers by -0x100 to avoid
+ * having to map half of the global registers twice.
+ */
+#define BWMON_V4_845_OFFSET 0x100
+#define BWMON_V4_GLOBAL_IRQ_CLEAR_845 (BWMON_V4_GLOBAL_IRQ_CLEAR - BWMON_V4_845_OFFSET)
+#define BWMON_V4_GLOBAL_IRQ_ENABLE_845 (BWMON_V4_GLOBAL_IRQ_ENABLE - BWMON_V4_845_OFFSET)
+
#define BWMON_V4_IRQ_STATUS 0x100
#define BWMON_V4_IRQ_CLEAR 0x108
@@ -118,9 +131,13 @@
#define BWMON_NEEDS_FORCE_CLEAR BIT(1)
enum bwmon_fields {
+ /* Global region fields, keep them at the top */
F_GLOBAL_IRQ_CLEAR,
F_GLOBAL_IRQ_ENABLE,
- F_IRQ_STATUS,
+ F_NUM_GLOBAL_FIELDS,
+
+ /* Monitor region fields */
+ F_IRQ_STATUS = F_NUM_GLOBAL_FIELDS,
F_IRQ_CLEAR,
F_IRQ_ENABLE,
F_ENABLE,
@@ -157,6 +174,9 @@ struct icc_bwmon_data {
const struct regmap_config *regmap_cfg;
const struct reg_field *regmap_fields;
+
+ const struct regmap_config *global_regmap_cfg;
+ const struct reg_field *global_regmap_fields;
};
struct icc_bwmon {
@@ -164,8 +184,8 @@ struct icc_bwmon {
const struct icc_bwmon_data *data;
int irq;
- struct regmap *regmap;
struct regmap_field *regs[F_NUM_FIELDS];
+ struct regmap_field *global_regs[F_NUM_GLOBAL_FIELDS];
unsigned int max_bw_kbps;
unsigned int min_bw_kbps;
@@ -175,8 +195,8 @@ struct icc_bwmon {
/* BWMON v4 */
static const struct reg_field msm8998_bwmon_reg_fields[] = {
- [F_GLOBAL_IRQ_CLEAR] = REG_FIELD(BWMON_V4_GLOBAL_IRQ_CLEAR, 0, 0),
- [F_GLOBAL_IRQ_ENABLE] = REG_FIELD(BWMON_V4_GLOBAL_IRQ_ENABLE, 0, 0),
+ [F_GLOBAL_IRQ_CLEAR] = {},
+ [F_GLOBAL_IRQ_ENABLE] = {},
[F_IRQ_STATUS] = REG_FIELD(BWMON_V4_IRQ_STATUS, 4, 7),
[F_IRQ_CLEAR] = REG_FIELD(BWMON_V4_IRQ_CLEAR, 4, 7),
[F_IRQ_ENABLE] = REG_FIELD(BWMON_V4_IRQ_ENABLE, 4, 7),
@@ -202,7 +222,6 @@ static const struct reg_field msm8998_bwmon_reg_fields[] = {
};
static const struct regmap_range msm8998_bwmon_reg_noread_ranges[] = {
- regmap_reg_range(BWMON_V4_GLOBAL_IRQ_CLEAR, BWMON_V4_GLOBAL_IRQ_CLEAR),
regmap_reg_range(BWMON_V4_IRQ_CLEAR, BWMON_V4_IRQ_CLEAR),
regmap_reg_range(BWMON_V4_CLEAR, BWMON_V4_CLEAR),
};
@@ -222,16 +241,33 @@ static const struct regmap_access_table msm8998_bwmon_reg_volatile_table = {
.n_yes_ranges = ARRAY_SIZE(msm8998_bwmon_reg_volatile_ranges),
};
+static const struct reg_field msm8998_bwmon_global_reg_fields[] = {
+ [F_GLOBAL_IRQ_CLEAR] = REG_FIELD(BWMON_V4_GLOBAL_IRQ_CLEAR, 0, 0),
+ [F_GLOBAL_IRQ_ENABLE] = REG_FIELD(BWMON_V4_GLOBAL_IRQ_ENABLE, 0, 0),
+};
+
+static const struct regmap_range msm8998_bwmon_global_reg_noread_ranges[] = {
+ regmap_reg_range(BWMON_V4_GLOBAL_IRQ_CLEAR, BWMON_V4_GLOBAL_IRQ_CLEAR),
+};
+
+static const struct regmap_access_table msm8998_bwmon_global_reg_read_table = {
+ .no_ranges = msm8998_bwmon_global_reg_noread_ranges,
+ .n_no_ranges = ARRAY_SIZE(msm8998_bwmon_global_reg_noread_ranges),
+};
+
/*
* Fill the cache for non-readable registers only as rest does not really
* matter and can be read from the device.
*/
static const struct reg_default msm8998_bwmon_reg_defaults[] = {
- { BWMON_V4_GLOBAL_IRQ_CLEAR, 0x0 },
{ BWMON_V4_IRQ_CLEAR, 0x0 },
{ BWMON_V4_CLEAR, 0x0 },
};
+static const struct reg_default msm8998_bwmon_global_reg_defaults[] = {
+ { BWMON_V4_GLOBAL_IRQ_CLEAR, 0x0 },
+};
+
static const struct regmap_config msm8998_bwmon_regmap_cfg = {
.reg_bits = 32,
.reg_stride = 4,
@@ -252,6 +288,93 @@ static const struct regmap_config msm8998_bwmon_regmap_cfg = {
.cache_type = REGCACHE_RBTREE,
};
+static const struct regmap_config msm8998_bwmon_global_regmap_cfg = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ /*
+ * No concurrent access expected - driver has one interrupt handler,
+ * regmap is not shared, no driver or user-space API.
+ */
+ .disable_locking = true,
+ .rd_table = &msm8998_bwmon_global_reg_read_table,
+ .reg_defaults = msm8998_bwmon_global_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(msm8998_bwmon_global_reg_defaults),
+ /*
+ * Cache is necessary for using regmap fields with non-readable
+ * registers.
+ */
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static const struct reg_field sdm845_cpu_bwmon_reg_fields[] = {
+ [F_GLOBAL_IRQ_CLEAR] = REG_FIELD(BWMON_V4_GLOBAL_IRQ_CLEAR_845, 0, 0),
+ [F_GLOBAL_IRQ_ENABLE] = REG_FIELD(BWMON_V4_GLOBAL_IRQ_ENABLE_845, 0, 0),
+ [F_IRQ_STATUS] = REG_FIELD(BWMON_V4_IRQ_STATUS, 4, 7),
+ [F_IRQ_CLEAR] = REG_FIELD(BWMON_V4_IRQ_CLEAR, 4, 7),
+ [F_IRQ_ENABLE] = REG_FIELD(BWMON_V4_IRQ_ENABLE, 4, 7),
+ /* F_ENABLE covers entire register to disable other features */
+ [F_ENABLE] = REG_FIELD(BWMON_V4_ENABLE, 0, 31),
+ [F_CLEAR] = REG_FIELD(BWMON_V4_CLEAR, 0, 1),
+ [F_SAMPLE_WINDOW] = REG_FIELD(BWMON_V4_SAMPLE_WINDOW, 0, 23),
+ [F_THRESHOLD_HIGH] = REG_FIELD(BWMON_V4_THRESHOLD_HIGH, 0, 11),
+ [F_THRESHOLD_MED] = REG_FIELD(BWMON_V4_THRESHOLD_MED, 0, 11),
+ [F_THRESHOLD_LOW] = REG_FIELD(BWMON_V4_THRESHOLD_LOW, 0, 11),
+ [F_ZONE_ACTIONS_ZONE0] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 0, 7),
+ [F_ZONE_ACTIONS_ZONE1] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 8, 15),
+ [F_ZONE_ACTIONS_ZONE2] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 16, 23),
+ [F_ZONE_ACTIONS_ZONE3] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 24, 31),
+ [F_THRESHOLD_COUNT_ZONE0] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 0, 7),
+ [F_THRESHOLD_COUNT_ZONE1] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 8, 15),
+ [F_THRESHOLD_COUNT_ZONE2] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 16, 23),
+ [F_THRESHOLD_COUNT_ZONE3] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 24, 31),
+ [F_ZONE0_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(0), 0, 11),
+ [F_ZONE1_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(1), 0, 11),
+ [F_ZONE2_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(2), 0, 11),
+ [F_ZONE3_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(3), 0, 11),
+};
+
+static const struct regmap_range sdm845_cpu_bwmon_reg_noread_ranges[] = {
+ regmap_reg_range(BWMON_V4_GLOBAL_IRQ_CLEAR_845, BWMON_V4_GLOBAL_IRQ_CLEAR_845),
+ regmap_reg_range(BWMON_V4_IRQ_CLEAR, BWMON_V4_IRQ_CLEAR),
+ regmap_reg_range(BWMON_V4_CLEAR, BWMON_V4_CLEAR),
+};
+
+static const struct regmap_access_table sdm845_cpu_bwmon_reg_read_table = {
+ .no_ranges = sdm845_cpu_bwmon_reg_noread_ranges,
+ .n_no_ranges = ARRAY_SIZE(sdm845_cpu_bwmon_reg_noread_ranges),
+};
+
+/*
+ * Fill the cache for non-readable registers only as rest does not really
+ * matter and can be read from the device.
+ */
+static const struct reg_default sdm845_cpu_bwmon_reg_defaults[] = {
+ { BWMON_V4_GLOBAL_IRQ_CLEAR_845, 0x0 },
+ { BWMON_V4_IRQ_CLEAR, 0x0 },
+ { BWMON_V4_CLEAR, 0x0 },
+};
+
+static const struct regmap_config sdm845_cpu_bwmon_regmap_cfg = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ /*
+ * No concurrent access expected - driver has one interrupt handler,
+ * regmap is not shared, no driver or user-space API.
+ */
+ .disable_locking = true,
+ .rd_table = &sdm845_cpu_bwmon_reg_read_table,
+ .volatile_table = &msm8998_bwmon_reg_volatile_table,
+ .reg_defaults = sdm845_cpu_bwmon_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(sdm845_cpu_bwmon_reg_defaults),
+ /*
+ * Cache is necessary for using regmap fields with non-readable
+ * registers.
+ */
+ .cache_type = REGCACHE_RBTREE,
+};
+
/* BWMON v5 */
static const struct reg_field sdm845_llcc_bwmon_reg_fields[] = {
[F_GLOBAL_IRQ_CLEAR] = {},
@@ -350,6 +473,13 @@ static void bwmon_clear_counters(struct icc_bwmon *bwmon, bool clear_all)
static void bwmon_clear_irq(struct icc_bwmon *bwmon)
{
+ struct regmap_field *global_irq_clr;
+
+ if (bwmon->data->global_regmap_fields)
+ global_irq_clr = bwmon->global_regs[F_GLOBAL_IRQ_CLEAR];
+ else
+ global_irq_clr = bwmon->regs[F_GLOBAL_IRQ_CLEAR];
+
/*
* Clear zone and global interrupts. The order and barriers are
* important. Quoting downstream Qualcomm msm-4.9 tree:
@@ -370,15 +500,22 @@ static void bwmon_clear_irq(struct icc_bwmon *bwmon)
if (bwmon->data->quirks & BWMON_NEEDS_FORCE_CLEAR)
regmap_field_force_write(bwmon->regs[F_IRQ_CLEAR], 0);
if (bwmon->data->quirks & BWMON_HAS_GLOBAL_IRQ)
- regmap_field_force_write(bwmon->regs[F_GLOBAL_IRQ_CLEAR],
+ regmap_field_force_write(global_irq_clr,
BWMON_V4_GLOBAL_IRQ_ENABLE_ENABLE);
}
static void bwmon_disable(struct icc_bwmon *bwmon)
{
+ struct regmap_field *global_irq_en;
+
+ if (bwmon->data->global_regmap_fields)
+ global_irq_en = bwmon->global_regs[F_GLOBAL_IRQ_ENABLE];
+ else
+ global_irq_en = bwmon->regs[F_GLOBAL_IRQ_ENABLE];
+
/* Disable interrupts. Strict ordering, see bwmon_clear_irq(). */
if (bwmon->data->quirks & BWMON_HAS_GLOBAL_IRQ)
- regmap_field_write(bwmon->regs[F_GLOBAL_IRQ_ENABLE], 0x0);
+ regmap_field_write(global_irq_en, 0x0);
regmap_field_write(bwmon->regs[F_IRQ_ENABLE], 0x0);
/*
@@ -390,10 +527,18 @@ static void bwmon_disable(struct icc_bwmon *bwmon)
static void bwmon_enable(struct icc_bwmon *bwmon, unsigned int irq_enable)
{
+ struct regmap_field *global_irq_en;
+
+ if (bwmon->data->global_regmap_fields)
+ global_irq_en = bwmon->global_regs[F_GLOBAL_IRQ_ENABLE];
+ else
+ global_irq_en = bwmon->regs[F_GLOBAL_IRQ_ENABLE];
+
/* Enable interrupts */
if (bwmon->data->quirks & BWMON_HAS_GLOBAL_IRQ)
- regmap_field_write(bwmon->regs[F_GLOBAL_IRQ_ENABLE],
+ regmap_field_write(global_irq_en,
BWMON_V4_GLOBAL_IRQ_ENABLE_ENABLE);
+
regmap_field_write(bwmon->regs[F_IRQ_ENABLE], irq_enable);
/* Enable bwmon */
@@ -556,7 +701,9 @@ static int bwmon_init_regmap(struct platform_device *pdev,
struct device *dev = &pdev->dev;
void __iomem *base;
struct regmap *map;
+ int ret;
+ /* Map the monitor base */
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return dev_err_probe(dev, PTR_ERR(base),
@@ -567,12 +714,35 @@ static int bwmon_init_regmap(struct platform_device *pdev,
return dev_err_probe(dev, PTR_ERR(map),
"failed to initialize regmap\n");
+ BUILD_BUG_ON(ARRAY_SIZE(msm8998_bwmon_global_reg_fields) != F_NUM_GLOBAL_FIELDS);
BUILD_BUG_ON(ARRAY_SIZE(msm8998_bwmon_reg_fields) != F_NUM_FIELDS);
+ BUILD_BUG_ON(ARRAY_SIZE(sdm845_cpu_bwmon_reg_fields) != F_NUM_FIELDS);
BUILD_BUG_ON(ARRAY_SIZE(sdm845_llcc_bwmon_reg_fields) != F_NUM_FIELDS);
- return devm_regmap_field_bulk_alloc(dev, map, bwmon->regs,
+ ret = devm_regmap_field_bulk_alloc(dev, map, bwmon->regs,
bwmon->data->regmap_fields,
F_NUM_FIELDS);
+ if (ret)
+ return ret;
+
+ if (bwmon->data->global_regmap_cfg) {
+ /* Map the global base, if separate */
+ base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base),
+ "failed to map bwmon global registers\n");
+
+ map = devm_regmap_init_mmio(dev, base, bwmon->data->global_regmap_cfg);
+ if (IS_ERR(map))
+ return dev_err_probe(dev, PTR_ERR(map),
+ "failed to initialize global regmap\n");
+
+ ret = devm_regmap_field_bulk_alloc(dev, map, bwmon->global_regs,
+ bwmon->data->global_regmap_fields,
+ F_NUM_GLOBAL_FIELDS);
+ }
+
+ return ret;
}
static int bwmon_probe(struct platform_device *pdev)
@@ -645,6 +815,21 @@ static const struct icc_bwmon_data msm8998_bwmon_data = {
.quirks = BWMON_HAS_GLOBAL_IRQ,
.regmap_fields = msm8998_bwmon_reg_fields,
.regmap_cfg = &msm8998_bwmon_regmap_cfg,
+ .global_regmap_fields = msm8998_bwmon_global_reg_fields,
+ .global_regmap_cfg = &msm8998_bwmon_global_regmap_cfg,
+};
+
+static const struct icc_bwmon_data sdm845_cpu_bwmon_data = {
+ .sample_ms = 4,
+ .count_unit_kb = 64,
+ .default_highbw_kbps = 4800 * 1024, /* 4.8 GBps */
+ .default_medbw_kbps = 512 * 1024, /* 512 MBps */
+ .default_lowbw_kbps = 0,
+ .zone1_thres_count = 16,
+ .zone3_thres_count = 1,
+ .quirks = BWMON_HAS_GLOBAL_IRQ,
+ .regmap_fields = sdm845_cpu_bwmon_reg_fields,
+ .regmap_cfg = &sdm845_cpu_bwmon_regmap_cfg,
};
static const struct icc_bwmon_data sdm845_llcc_bwmon_data = {
@@ -673,16 +858,18 @@ static const struct icc_bwmon_data sc7280_llcc_bwmon_data = {
};
static const struct of_device_id bwmon_of_match[] = {
- {
- .compatible = "qcom,msm8998-bwmon",
- .data = &msm8998_bwmon_data
- }, {
- .compatible = "qcom,sdm845-llcc-bwmon",
- .data = &sdm845_llcc_bwmon_data
- }, {
- .compatible = "qcom,sc7280-llcc-bwmon",
- .data = &sc7280_llcc_bwmon_data
- },
+ /* BWMONv4, separate monitor and global register spaces */
+ { .compatible = "qcom,msm8998-bwmon", .data = &msm8998_bwmon_data },
+ /* BWMONv4, unified register space */
+ { .compatible = "qcom,sdm845-bwmon", .data = &sdm845_cpu_bwmon_data },
+ /* BWMONv5 */
+ { .compatible = "qcom,sdm845-llcc-bwmon", .data = &sdm845_llcc_bwmon_data },
+ { .compatible = "qcom,sc7280-llcc-bwmon", .data = &sc7280_llcc_bwmon_data },
+
+ /* Compatibles kept for legacy reasons */
+ { .compatible = "qcom,sc7280-cpu-bwmon", .data = &sdm845_cpu_bwmon_data },
+ { .compatible = "qcom,sc8280xp-cpu-bwmon", .data = &sdm845_cpu_bwmon_data },
+ { .compatible = "qcom,sm8550-cpu-bwmon", .data = &sdm845_cpu_bwmon_data },
{}
};
MODULE_DEVICE_TABLE(of, bwmon_of_match);
diff --git a/drivers/soc/qcom/ice.c b/drivers/soc/qcom/ice.c
new file mode 100644
index 000000000000..a6123ea96272
--- /dev/null
+++ b/drivers/soc/qcom/ice.c
@@ -0,0 +1,366 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm ICE (Inline Crypto Engine) support.
+ *
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019, Google LLC
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/of_platform.h>
+
+#include <linux/firmware/qcom/qcom_scm.h>
+
+#include <soc/qcom/ice.h>
+
+#define AES_256_XTS_KEY_SIZE 64
+
+/* QCOM ICE registers */
+#define QCOM_ICE_REG_VERSION 0x0008
+#define QCOM_ICE_REG_FUSE_SETTING 0x0010
+#define QCOM_ICE_REG_BIST_STATUS 0x0070
+#define QCOM_ICE_REG_ADVANCED_CONTROL 0x1000
+
+/* BIST ("built-in self-test") status flags */
+#define QCOM_ICE_BIST_STATUS_MASK GENMASK(31, 28)
+
+#define QCOM_ICE_FUSE_SETTING_MASK 0x1
+#define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK 0x2
+#define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK 0x4
+
+#define qcom_ice_writel(engine, val, reg) \
+ writel((val), (engine)->base + (reg))
+
+#define qcom_ice_readl(engine, reg) \
+ readl((engine)->base + (reg))
+
+struct qcom_ice {
+ struct device *dev;
+ void __iomem *base;
+ struct device_link *link;
+
+ struct clk *core_clk;
+};
+
+static bool qcom_ice_check_supported(struct qcom_ice *ice)
+{
+ u32 regval = qcom_ice_readl(ice, QCOM_ICE_REG_VERSION);
+ struct device *dev = ice->dev;
+ int major = FIELD_GET(GENMASK(31, 24), regval);
+ int minor = FIELD_GET(GENMASK(23, 16), regval);
+ int step = FIELD_GET(GENMASK(15, 0), regval);
+
+ /* For now this driver only supports ICE version 3 and 4. */
+ if (major != 3 && major != 4) {
+ dev_warn(dev, "Unsupported ICE version: v%d.%d.%d\n",
+ major, minor, step);
+ return false;
+ }
+
+ dev_info(dev, "Found QC Inline Crypto Engine (ICE) v%d.%d.%d\n",
+ major, minor, step);
+
+ /* If fuses are blown, ICE might not work in the standard way. */
+ regval = qcom_ice_readl(ice, QCOM_ICE_REG_FUSE_SETTING);
+ if (regval & (QCOM_ICE_FUSE_SETTING_MASK |
+ QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK |
+ QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK)) {
+ dev_warn(dev, "Fuses are blown; ICE is unusable!\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void qcom_ice_low_power_mode_enable(struct qcom_ice *ice)
+{
+ u32 regval;
+
+ regval = qcom_ice_readl(ice, QCOM_ICE_REG_ADVANCED_CONTROL);
+
+ /* Enable low power mode sequence */
+ regval |= 0x7000;
+ qcom_ice_writel(ice, regval, QCOM_ICE_REG_ADVANCED_CONTROL);
+}
+
+static void qcom_ice_optimization_enable(struct qcom_ice *ice)
+{
+ u32 regval;
+
+ /* ICE Optimizations Enable Sequence */
+ regval = qcom_ice_readl(ice, QCOM_ICE_REG_ADVANCED_CONTROL);
+ regval |= 0xd807100;
+ /* ICE HPG requires delay before writing */
+ udelay(5);
+ qcom_ice_writel(ice, regval, QCOM_ICE_REG_ADVANCED_CONTROL);
+ udelay(5);
+}
+
+/*
+ * Wait until the ICE BIST (built-in self-test) has completed.
+ *
+ * This may be necessary before ICE can be used.
+ * Note that we don't really care whether the BIST passed or failed;
+ * we really just want to make sure that it isn't still running. This is
+ * because (a) the BIST is a FIPS compliance thing that never fails in
+ * practice, (b) ICE is documented to reject crypto requests if the BIST
+ * fails, so we needn't do it in software too, and (c) properly testing
+ * storage encryption requires testing the full storage stack anyway,
+ * and not relying on hardware-level self-tests.
+ */
+static int qcom_ice_wait_bist_status(struct qcom_ice *ice)
+{
+ u32 regval;
+ int err;
+
+ err = readl_poll_timeout(ice->base + QCOM_ICE_REG_BIST_STATUS,
+ regval, !(regval & QCOM_ICE_BIST_STATUS_MASK),
+ 50, 5000);
+ if (err)
+ dev_err(ice->dev, "Timed out waiting for ICE self-test to complete\n");
+
+ return err;
+}
+
+int qcom_ice_enable(struct qcom_ice *ice)
+{
+ qcom_ice_low_power_mode_enable(ice);
+ qcom_ice_optimization_enable(ice);
+
+ return qcom_ice_wait_bist_status(ice);
+}
+EXPORT_SYMBOL_GPL(qcom_ice_enable);
+
+int qcom_ice_resume(struct qcom_ice *ice)
+{
+ struct device *dev = ice->dev;
+ int err;
+
+ err = clk_prepare_enable(ice->core_clk);
+ if (err) {
+ dev_err(dev, "failed to enable core clock (%d)\n",
+ err);
+ return err;
+ }
+
+ return qcom_ice_wait_bist_status(ice);
+}
+EXPORT_SYMBOL_GPL(qcom_ice_resume);
+
+int qcom_ice_suspend(struct qcom_ice *ice)
+{
+ clk_disable_unprepare(ice->core_clk);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_ice_suspend);
+
+int qcom_ice_program_key(struct qcom_ice *ice,
+ u8 algorithm_id, u8 key_size,
+ const u8 crypto_key[], u8 data_unit_size,
+ int slot)
+{
+ struct device *dev = ice->dev;
+ union {
+ u8 bytes[AES_256_XTS_KEY_SIZE];
+ u32 words[AES_256_XTS_KEY_SIZE / sizeof(u32)];
+ } key;
+ int i;
+ int err;
+
+ /* Only AES-256-XTS has been tested so far. */
+ if (algorithm_id != QCOM_ICE_CRYPTO_ALG_AES_XTS ||
+ key_size != QCOM_ICE_CRYPTO_KEY_SIZE_256) {
+ dev_err_ratelimited(dev,
+ "Unhandled crypto capability; algorithm_id=%d, key_size=%d\n",
+ algorithm_id, key_size);
+ return -EINVAL;
+ }
+
+ memcpy(key.bytes, crypto_key, AES_256_XTS_KEY_SIZE);
+
+ /* The SCM call requires that the key words are encoded in big endian */
+ for (i = 0; i < ARRAY_SIZE(key.words); i++)
+ __cpu_to_be32s(&key.words[i]);
+
+ err = qcom_scm_ice_set_key(slot, key.bytes, AES_256_XTS_KEY_SIZE,
+ QCOM_SCM_ICE_CIPHER_AES_256_XTS,
+ data_unit_size);
+
+ memzero_explicit(&key, sizeof(key));
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(qcom_ice_program_key);
+
+int qcom_ice_evict_key(struct qcom_ice *ice, int slot)
+{
+ return qcom_scm_ice_invalidate_key(slot);
+}
+EXPORT_SYMBOL_GPL(qcom_ice_evict_key);
+
+static struct qcom_ice *qcom_ice_create(struct device *dev,
+ void __iomem *base)
+{
+ struct qcom_ice *engine;
+
+ if (!qcom_scm_is_available())
+ return ERR_PTR(-EPROBE_DEFER);
+
+ if (!qcom_scm_ice_available()) {
+ dev_warn(dev, "ICE SCM interface not found\n");
+ return NULL;
+ }
+
+ engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
+ if (!engine)
+ return ERR_PTR(-ENOMEM);
+
+ engine->dev = dev;
+ engine->base = base;
+
+ /*
+ * Legacy DT binding uses different clk names for each consumer,
+ * so lets try those first. If none of those are a match, it means
+ * the we only have one clock and it is part of the dedicated DT node.
+ * Also, enable the clock before we check what HW version the driver
+ * supports.
+ */
+ engine->core_clk = devm_clk_get_optional_enabled(dev, "ice_core_clk");
+ if (!engine->core_clk)
+ engine->core_clk = devm_clk_get_optional_enabled(dev, "ice");
+ if (!engine->core_clk)
+ engine->core_clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(engine->core_clk))
+ return ERR_CAST(engine->core_clk);
+
+ if (!qcom_ice_check_supported(engine))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ dev_dbg(dev, "Registered Qualcomm Inline Crypto Engine\n");
+
+ return engine;
+}
+
+/**
+ * of_qcom_ice_get() - get an ICE instance from a DT node
+ * @dev: device pointer for the consumer device
+ *
+ * This function will provide an ICE instance either by creating one for the
+ * consumer device if its DT node provides the 'ice' reg range and the 'ice'
+ * clock (for legacy DT style). On the other hand, if consumer provides a
+ * phandle via 'qcom,ice' property to an ICE DT, the ICE instance will already
+ * be created and so this function will return that instead.
+ *
+ * Return: ICE pointer on success, NULL if there is no ICE data provided by the
+ * consumer or ERR_PTR() on error.
+ */
+struct qcom_ice *of_qcom_ice_get(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct qcom_ice *ice;
+ struct device_node *node;
+ struct resource *res;
+ void __iomem *base;
+
+ if (!dev || !dev->of_node)
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * In order to support legacy style devicetree bindings, we need
+ * to create the ICE instance using the consumer device and the reg
+ * range called 'ice' it provides.
+ */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ice");
+ if (res) {
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return ERR_CAST(base);
+
+ /* create ICE instance using consumer dev */
+ return qcom_ice_create(&pdev->dev, base);
+ }
+
+ /*
+ * If the consumer node does not provider an 'ice' reg range
+ * (legacy DT binding), then it must at least provide a phandle
+ * to the ICE devicetree node, otherwise ICE is not supported.
+ */
+ node = of_parse_phandle(dev->of_node, "qcom,ice", 0);
+ if (!node)
+ return NULL;
+
+ pdev = of_find_device_by_node(node);
+ if (!pdev) {
+ dev_err(dev, "Cannot find device node %s\n", node->name);
+ ice = ERR_PTR(-EPROBE_DEFER);
+ goto out;
+ }
+
+ ice = platform_get_drvdata(pdev);
+ if (!ice) {
+ dev_err(dev, "Cannot get ice instance from %s\n",
+ dev_name(&pdev->dev));
+ platform_device_put(pdev);
+ ice = ERR_PTR(-EPROBE_DEFER);
+ goto out;
+ }
+
+ ice->link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER);
+ if (!ice->link) {
+ dev_err(&pdev->dev,
+ "Failed to create device link to consumer %s\n",
+ dev_name(dev));
+ platform_device_put(pdev);
+ ice = ERR_PTR(-EINVAL);
+ }
+
+out:
+ of_node_put(node);
+
+ return ice;
+}
+EXPORT_SYMBOL_GPL(of_qcom_ice_get);
+
+static int qcom_ice_probe(struct platform_device *pdev)
+{
+ struct qcom_ice *engine;
+ void __iomem *base;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base)) {
+ dev_warn(&pdev->dev, "ICE registers not found\n");
+ return PTR_ERR(base);
+ }
+
+ engine = qcom_ice_create(&pdev->dev, base);
+ if (IS_ERR(engine))
+ return PTR_ERR(engine);
+
+ platform_set_drvdata(pdev, engine);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_ice_of_match_table[] = {
+ { .compatible = "qcom,inline-crypto-engine" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qcom_ice_of_match_table);
+
+static struct platform_driver qcom_ice_driver = {
+ .probe = qcom_ice_probe,
+ .driver = {
+ .name = "qcom-ice",
+ .of_match_table = qcom_ice_of_match_table,
+ },
+};
+
+module_platform_driver(qcom_ice_driver);
+
+MODULE_DESCRIPTION("Qualcomm Inline Crypto Engine driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 26efe12012a0..67c19ed2219a 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -62,8 +62,6 @@
#define LLCC_TRP_WRSC_CACHEABLE_EN 0x21f2c
#define LLCC_TRP_ALGO_CFG8 0x21f30
-#define BANK_OFFSET_STRIDE 0x80000
-
#define LLCC_VERSION_2_0_0_0 0x02000000
#define LLCC_VERSION_2_1_0_0 0x02010000
#define LLCC_VERSION_4_1_0_0 0x04010000
@@ -122,10 +120,11 @@ struct llcc_slice_config {
struct qcom_llcc_config {
const struct llcc_slice_config *sct_data;
- int size;
- bool need_llcc_cfg;
const u32 *reg_offset;
const struct llcc_edac_reg_offset *edac_reg_offset;
+ int size;
+ bool need_llcc_cfg;
+ bool no_edac;
};
enum llcc_reg_offset {
@@ -227,6 +226,14 @@ static const struct llcc_slice_config sm6350_data[] = {
{ LLCC_MODPE, 29, 64, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
};
+static const struct llcc_slice_config sm7150_data[] = {
+ { LLCC_CPUSS, 1, 512, 1, 0, 0xF, 0x0, 0, 0, 0, 1, 1 },
+ { LLCC_MDM, 8, 128, 2, 0, 0xF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPUHTW, 11, 256, 1, 1, 0xF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPU, 12, 256, 1, 1, 0xF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_NPU, 23, 512, 1, 0, 0xF, 0x0, 0, 0, 0, 1, 0 },
+};
+
static const struct llcc_slice_config sm8150_data[] = {
{ LLCC_CPUSS, 1, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 1 },
{ LLCC_VIDSC0, 2, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
@@ -454,6 +461,7 @@ static const struct qcom_llcc_config sdm845_cfg = {
.need_llcc_cfg = false,
.reg_offset = llcc_v1_reg_offset,
.edac_reg_offset = &llcc_v1_edac_reg_offset,
+ .no_edac = true,
};
static const struct qcom_llcc_config sm6350_cfg = {
@@ -464,6 +472,14 @@ static const struct qcom_llcc_config sm6350_cfg = {
.edac_reg_offset = &llcc_v1_edac_reg_offset,
};
+static const struct qcom_llcc_config sm7150_cfg = {
+ .sct_data = sm7150_data,
+ .size = ARRAY_SIZE(sm7150_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+};
+
static const struct qcom_llcc_config sm8150_cfg = {
.sct_data = sm8150_data,
.size = ARRAY_SIZE(sm8150_data),
@@ -898,8 +914,8 @@ static int qcom_llcc_remove(struct platform_device *pdev)
return 0;
}
-static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev,
- const char *name)
+static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev, u8 index,
+ const char *name)
{
void __iomem *base;
struct regmap_config llcc_regmap_config = {
@@ -909,7 +925,7 @@ static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev,
.fast_io = true,
};
- base = devm_platform_ioremap_resource_byname(pdev, name);
+ base = devm_platform_ioremap_resource(pdev, index);
if (IS_ERR(base))
return ERR_CAST(base);
@@ -927,6 +943,7 @@ static int qcom_llcc_probe(struct platform_device *pdev)
const struct llcc_slice_config *llcc_cfg;
u32 sz;
u32 version;
+ struct regmap *regmap;
drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
if (!drv_data) {
@@ -934,21 +951,51 @@ static int qcom_llcc_probe(struct platform_device *pdev)
goto err;
}
- drv_data->regmap = qcom_llcc_init_mmio(pdev, "llcc_base");
- if (IS_ERR(drv_data->regmap)) {
- ret = PTR_ERR(drv_data->regmap);
+ /* Initialize the first LLCC bank regmap */
+ regmap = qcom_llcc_init_mmio(pdev, 0, "llcc0_base");
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ goto err;
+ }
+
+ cfg = of_device_get_match_data(&pdev->dev);
+
+ ret = regmap_read(regmap, cfg->reg_offset[LLCC_COMMON_STATUS0], &num_banks);
+ if (ret)
+ goto err;
+
+ num_banks &= LLCC_LB_CNT_MASK;
+ num_banks >>= LLCC_LB_CNT_SHIFT;
+ drv_data->num_banks = num_banks;
+
+ drv_data->regmaps = devm_kcalloc(dev, num_banks, sizeof(*drv_data->regmaps), GFP_KERNEL);
+ if (!drv_data->regmaps) {
+ ret = -ENOMEM;
goto err;
}
- drv_data->bcast_regmap =
- qcom_llcc_init_mmio(pdev, "llcc_broadcast_base");
+ drv_data->regmaps[0] = regmap;
+
+ /* Initialize rest of LLCC bank regmaps */
+ for (i = 1; i < num_banks; i++) {
+ char *base = kasprintf(GFP_KERNEL, "llcc%d_base", i);
+
+ drv_data->regmaps[i] = qcom_llcc_init_mmio(pdev, i, base);
+ if (IS_ERR(drv_data->regmaps[i])) {
+ ret = PTR_ERR(drv_data->regmaps[i]);
+ kfree(base);
+ goto err;
+ }
+
+ kfree(base);
+ }
+
+ drv_data->bcast_regmap = qcom_llcc_init_mmio(pdev, i, "llcc_broadcast_base");
if (IS_ERR(drv_data->bcast_regmap)) {
ret = PTR_ERR(drv_data->bcast_regmap);
goto err;
}
- cfg = of_device_get_match_data(&pdev->dev);
-
/* Extract version of the IP */
ret = regmap_read(drv_data->bcast_regmap, cfg->reg_offset[LLCC_COMMON_HW_INFO],
&version);
@@ -957,15 +1004,6 @@ static int qcom_llcc_probe(struct platform_device *pdev)
drv_data->version = version;
- ret = regmap_read(drv_data->regmap, cfg->reg_offset[LLCC_COMMON_STATUS0],
- &num_banks);
- if (ret)
- goto err;
-
- num_banks &= LLCC_LB_CNT_MASK;
- num_banks >>= LLCC_LB_CNT_SHIFT;
- drv_data->num_banks = num_banks;
-
llcc_cfg = cfg->sct_data;
sz = cfg->size;
@@ -973,16 +1011,6 @@ static int qcom_llcc_probe(struct platform_device *pdev)
if (llcc_cfg[i].slice_id > drv_data->max_slices)
drv_data->max_slices = llcc_cfg[i].slice_id;
- drv_data->offsets = devm_kcalloc(dev, num_banks, sizeof(u32),
- GFP_KERNEL);
- if (!drv_data->offsets) {
- ret = -ENOMEM;
- goto err;
- }
-
- for (i = 0; i < num_banks; i++)
- drv_data->offsets[i] = i * BANK_OFFSET_STRIDE;
-
drv_data->bitmap = devm_bitmap_zalloc(dev, drv_data->max_slices,
GFP_KERNEL);
if (!drv_data->bitmap) {
@@ -1001,7 +1029,14 @@ static int qcom_llcc_probe(struct platform_device *pdev)
goto err;
drv_data->ecc_irq = platform_get_irq_optional(pdev, 0);
- if (drv_data->ecc_irq >= 0) {
+
+ /*
+ * On some platforms, the access to EDAC registers will be locked by
+ * the bootloader. So probing the EDAC driver will result in a crash.
+ * Hence, disable the creation of EDAC platform device for the
+ * problematic platforms.
+ */
+ if (!cfg->no_edac) {
llcc_edac = platform_device_register_data(&pdev->dev,
"qcom_llcc_edac", -1, drv_data,
sizeof(*drv_data));
@@ -1022,6 +1057,7 @@ static const struct of_device_id qcom_llcc_of_match[] = {
{ .compatible = "qcom,sc8280xp-llcc", .data = &sc8280xp_cfg },
{ .compatible = "qcom,sdm845-llcc", .data = &sdm845_cfg },
{ .compatible = "qcom,sm6350-llcc", .data = &sm6350_cfg },
+ { .compatible = "qcom,sm7150-llcc", .data = &sm7150_cfg },
{ .compatible = "qcom,sm8150-llcc", .data = &sm8150_cfg },
{ .compatible = "qcom,sm8250-llcc", .data = &sm8250_cfg },
{ .compatible = "qcom,sm8350-llcc", .data = &sm8350_cfg },
diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c
index bb3fb57abcc6..8bf95df0a56a 100644
--- a/drivers/soc/qcom/pmic_glink.c
+++ b/drivers/soc/qcom/pmic_glink.c
@@ -4,6 +4,7 @@
* Copyright (c) 2022, Linaro Ltd
*/
#include <linux/auxiliary_bus.h>
+#include <linux/of_device.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/rpmsg.h>
@@ -11,12 +12,23 @@
#include <linux/soc/qcom/pdr.h>
#include <linux/soc/qcom/pmic_glink.h>
+enum {
+ PMIC_GLINK_CLIENT_BATT = 0,
+ PMIC_GLINK_CLIENT_ALTMODE,
+ PMIC_GLINK_CLIENT_UCSI,
+};
+
+#define PMIC_GLINK_CLIENT_DEFAULT (BIT(PMIC_GLINK_CLIENT_BATT) | \
+ BIT(PMIC_GLINK_CLIENT_ALTMODE))
+
struct pmic_glink {
struct device *dev;
struct pdr_handle *pdr;
struct rpmsg_endpoint *ept;
+ unsigned long client_mask;
+
struct auxiliary_device altmode_aux;
struct auxiliary_device ps_aux;
struct auxiliary_device ucsi_aux;
@@ -233,6 +245,7 @@ static struct rpmsg_driver pmic_glink_rpmsg_driver = {
static int pmic_glink_probe(struct platform_device *pdev)
{
+ const unsigned long *match_data;
struct pdr_service *service;
struct pmic_glink *pg;
int ret;
@@ -249,12 +262,27 @@ static int pmic_glink_probe(struct platform_device *pdev)
mutex_init(&pg->client_lock);
mutex_init(&pg->state_lock);
- ret = pmic_glink_add_aux_device(pg, &pg->altmode_aux, "altmode");
- if (ret)
- return ret;
- ret = pmic_glink_add_aux_device(pg, &pg->ps_aux, "power-supply");
- if (ret)
- goto out_release_altmode_aux;
+ match_data = (unsigned long *)of_device_get_match_data(&pdev->dev);
+ if (match_data)
+ pg->client_mask = *match_data;
+ else
+ pg->client_mask = PMIC_GLINK_CLIENT_DEFAULT;
+
+ if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI)) {
+ ret = pmic_glink_add_aux_device(pg, &pg->ucsi_aux, "ucsi");
+ if (ret)
+ return ret;
+ }
+ if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_ALTMODE)) {
+ ret = pmic_glink_add_aux_device(pg, &pg->altmode_aux, "altmode");
+ if (ret)
+ goto out_release_ucsi_aux;
+ }
+ if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_BATT)) {
+ ret = pmic_glink_add_aux_device(pg, &pg->ps_aux, "power-supply");
+ if (ret)
+ goto out_release_altmode_aux;
+ }
pg->pdr = pdr_handle_alloc(pmic_glink_pdr_callback, pg);
if (IS_ERR(pg->pdr)) {
@@ -278,9 +306,14 @@ static int pmic_glink_probe(struct platform_device *pdev)
out_release_pdr_handle:
pdr_handle_release(pg->pdr);
out_release_aux_devices:
- pmic_glink_del_aux_device(pg, &pg->ps_aux);
+ if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_BATT))
+ pmic_glink_del_aux_device(pg, &pg->ps_aux);
out_release_altmode_aux:
- pmic_glink_del_aux_device(pg, &pg->altmode_aux);
+ if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_ALTMODE))
+ pmic_glink_del_aux_device(pg, &pg->altmode_aux);
+out_release_ucsi_aux:
+ if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI))
+ pmic_glink_del_aux_device(pg, &pg->ucsi_aux);
return ret;
}
@@ -291,8 +324,12 @@ static int pmic_glink_remove(struct platform_device *pdev)
pdr_handle_release(pg->pdr);
- pmic_glink_del_aux_device(pg, &pg->ps_aux);
- pmic_glink_del_aux_device(pg, &pg->altmode_aux);
+ if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_BATT))
+ pmic_glink_del_aux_device(pg, &pg->ps_aux);
+ if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_ALTMODE))
+ pmic_glink_del_aux_device(pg, &pg->altmode_aux);
+ if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI))
+ pmic_glink_del_aux_device(pg, &pg->ucsi_aux);
mutex_lock(&__pmic_glink_lock);
__pmic_glink = NULL;
@@ -301,8 +338,14 @@ static int pmic_glink_remove(struct platform_device *pdev)
return 0;
}
+/* Do not handle altmode for now on those platforms */
+static const unsigned long pmic_glink_sm8450_client_mask = BIT(PMIC_GLINK_CLIENT_BATT) |
+ BIT(PMIC_GLINK_CLIENT_UCSI);
+
static const struct of_device_id pmic_glink_of_match[] = {
- { .compatible = "qcom,pmic-glink", },
+ { .compatible = "qcom,sm8450-pmic-glink", .data = &pmic_glink_sm8450_client_mask },
+ { .compatible = "qcom,sm8550-pmic-glink", .data = &pmic_glink_sm8450_client_mask },
+ { .compatible = "qcom,pmic-glink" },
{}
};
MODULE_DEVICE_TABLE(of, pmic_glink_of_match);
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
index 18c856056475..e376c32cc16e 100644
--- a/drivers/soc/qcom/qcom_aoss.c
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -395,7 +395,7 @@ static int qmp_cooling_devices_register(struct qmp *qmp)
return -ENOMEM;
for_each_available_child_of_node(np, child) {
- if (!of_find_property(child, "#cooling-cells", NULL))
+ if (!of_property_present(child, "#cooling-cells"))
continue;
ret = qmp_cooling_device_add(qmp, &qmp->cooling_devs[count++],
child);
diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c
index 290bdefbf28a..f1742e5bddb9 100644
--- a/drivers/soc/qcom/qcom_gsbi.c
+++ b/drivers/soc/qcom/qcom_gsbi.c
@@ -114,7 +114,7 @@ struct gsbi_info {
struct regmap *tcsr;
};
-static const struct of_device_id tcsr_dt_match[] = {
+static const struct of_device_id tcsr_dt_match[] __maybe_unused = {
{ .compatible = "qcom,tcsr-ipq8064", .data = &config_ipq8064},
{ .compatible = "qcom,tcsr-apq8064", .data = &config_apq8064},
{ .compatible = "qcom,tcsr-msm8960", .data = &config_msm8960},
diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c
index 538fa182169a..ce48a9f3b4c8 100644
--- a/drivers/soc/qcom/rmtfs_mem.c
+++ b/drivers/soc/qcom/rmtfs_mem.c
@@ -31,7 +31,7 @@ struct qcom_rmtfs_mem {
unsigned int client_id;
- unsigned int perms;
+ u64 perms;
};
static ssize_t qcom_rmtfs_mem_show(struct device *dev,
@@ -126,7 +126,6 @@ static int qcom_rmtfs_mem_release(struct inode *inode, struct file *filp)
}
static struct class rmtfs_class = {
- .owner = THIS_MODULE,
.name = "rmtfs",
};
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index 0f8b2249f889..f93544f6d796 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -1073,7 +1073,7 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
drv->ver.minor = rsc_id & (MINOR_VER_MASK << MINOR_VER_SHIFT);
drv->ver.minor >>= MINOR_VER_SHIFT;
- if (drv->ver.major == 3 && drv->ver.minor == 0)
+ if (drv->ver.major == 3 && drv->ver.minor >= 0)
drv->regs = rpmh_rsc_reg_offset_ver_3_0;
else
drv->regs = rpmh_rsc_reg_offset_ver_2_7;
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
index 337b1ad1cd3b..f8397dcb146c 100644
--- a/drivers/soc/qcom/rpmpd.c
+++ b/drivers/soc/qcom/rpmpd.c
@@ -40,57 +40,6 @@
#define MAX_CORNER_RPMPD_STATE 6
-#define DEFINE_RPMPD_PAIR(_platform, _name, _active, r_type, r_key, \
- r_id) \
- static struct rpmpd _platform##_##_active; \
- static struct rpmpd _platform##_##_name = { \
- .pd = { .name = #_name, }, \
- .peer = &_platform##_##_active, \
- .res_type = RPMPD_##r_type, \
- .res_id = r_id, \
- .key = KEY_##r_key, \
- }; \
- static struct rpmpd _platform##_##_active = { \
- .pd = { .name = #_active, }, \
- .peer = &_platform##_##_name, \
- .active_only = true, \
- .res_type = RPMPD_##r_type, \
- .res_id = r_id, \
- .key = KEY_##r_key, \
- }
-
-#define DEFINE_RPMPD_CORNER(_platform, _name, r_type, r_id) \
- static struct rpmpd _platform##_##_name = { \
- .pd = { .name = #_name, }, \
- .res_type = RPMPD_##r_type, \
- .res_id = r_id, \
- .key = KEY_CORNER, \
- }
-
-#define DEFINE_RPMPD_LEVEL(_platform, _name, r_type, r_id) \
- static struct rpmpd _platform##_##_name = { \
- .pd = { .name = #_name, }, \
- .res_type = RPMPD_##r_type, \
- .res_id = r_id, \
- .key = KEY_LEVEL, \
- }
-
-#define DEFINE_RPMPD_VFC(_platform, _name, r_type, r_id) \
- static struct rpmpd _platform##_##_name = { \
- .pd = { .name = #_name, }, \
- .res_type = RPMPD_##r_type, \
- .res_id = r_id, \
- .key = KEY_FLOOR_CORNER, \
- }
-
-#define DEFINE_RPMPD_VFL(_platform, _name, r_type, r_id) \
- static struct rpmpd _platform##_##_name = { \
- .pd = { .name = #_name, }, \
- .res_type = RPMPD_##r_type, \
- .res_id = r_id, \
- .key = KEY_FLOOR_LEVEL, \
- }
-
struct rpmpd_req {
__le32 key;
__le32 nbytes;
@@ -99,6 +48,7 @@ struct rpmpd_req {
struct rpmpd {
struct generic_pm_domain pd;
+ struct generic_pm_domain *parent;
struct rpmpd *peer;
const bool active_only;
unsigned int corner;
@@ -118,19 +68,459 @@ struct rpmpd_desc {
static DEFINE_MUTEX(rpmpd_lock);
-/* mdm9607 RPM Power Domains */
-DEFINE_RPMPD_PAIR(mdm9607, vddcx, vddcx_ao, SMPA, LEVEL, 3);
-DEFINE_RPMPD_VFL(mdm9607, vddcx_vfl, SMPA, 3);
+/* CX */
+static struct rpmpd cx_rwcx0_lvl_ao;
+static struct rpmpd cx_rwcx0_lvl = {
+ .pd = { .name = "cx", },
+ .peer = &cx_rwcx0_lvl_ao,
+ .res_type = RPMPD_RWCX,
+ .res_id = 0,
+ .key = KEY_LEVEL,
+};
+
+static struct rpmpd cx_rwcx0_lvl_ao = {
+ .pd = { .name = "cx_ao", },
+ .peer = &cx_rwcx0_lvl,
+ .active_only = true,
+ .res_type = RPMPD_RWCX,
+ .res_id = 0,
+ .key = KEY_LEVEL,
+};
+
+static struct rpmpd cx_s1a_corner_ao;
+static struct rpmpd cx_s1a_corner = {
+ .pd = { .name = "cx", },
+ .peer = &cx_s1a_corner_ao,
+ .res_type = RPMPD_SMPA,
+ .res_id = 1,
+ .key = KEY_CORNER,
+};
+
+static struct rpmpd cx_s1a_corner_ao = {
+ .pd = { .name = "cx_ao", },
+ .peer = &cx_s1a_corner,
+ .active_only = true,
+ .res_type = RPMPD_SMPA,
+ .res_id = 1,
+ .key = KEY_CORNER,
+};
+
+static struct rpmpd cx_s2a_corner_ao;
+static struct rpmpd cx_s2a_corner = {
+ .pd = { .name = "cx", },
+ .peer = &cx_s2a_corner_ao,
+ .res_type = RPMPD_SMPA,
+ .res_id = 2,
+ .key = KEY_CORNER,
+};
+
+static struct rpmpd cx_s2a_corner_ao = {
+ .pd = { .name = "cx_ao", },
+ .peer = &cx_s2a_corner,
+ .active_only = true,
+ .res_type = RPMPD_SMPA,
+ .res_id = 2,
+ .key = KEY_CORNER,
+};
+
+static struct rpmpd cx_s2a_lvl_ao;
+static struct rpmpd cx_s2a_lvl = {
+ .pd = { .name = "cx", },
+ .peer = &cx_s2a_lvl_ao,
+ .res_type = RPMPD_SMPA,
+ .res_id = 2,
+ .key = KEY_LEVEL,
+};
+
+static struct rpmpd cx_s2a_lvl_ao = {
+ .pd = { .name = "cx_ao", },
+ .peer = &cx_s2a_lvl,
+ .active_only = true,
+ .res_type = RPMPD_SMPA,
+ .res_id = 2,
+ .key = KEY_LEVEL,
+};
+
+static struct rpmpd cx_s3a_lvl_ao;
+static struct rpmpd cx_s3a_lvl = {
+ .pd = { .name = "cx", },
+ .peer = &cx_s3a_lvl_ao,
+ .res_type = RPMPD_SMPA,
+ .res_id = 3,
+ .key = KEY_LEVEL,
+};
+
+static struct rpmpd cx_s3a_lvl_ao = {
+ .pd = { .name = "cx_ao", },
+ .peer = &cx_s3a_lvl,
+ .active_only = true,
+ .res_type = RPMPD_SMPA,
+ .res_id = 3,
+ .key = KEY_LEVEL,
+};
+
+static struct rpmpd cx_rwcx0_vfl = {
+ .pd = { .name = "cx_vfl", },
+ .res_type = RPMPD_RWCX,
+ .res_id = 0,
+ .key = KEY_FLOOR_LEVEL,
+};
+
+static struct rpmpd cx_rwsc2_vfl = {
+ .pd = { .name = "cx_vfl", },
+ .res_type = RPMPD_RWSC,
+ .res_id = 2,
+ .key = KEY_FLOOR_LEVEL,
+};
+
+static struct rpmpd cx_s1a_vfc = {
+ .pd = { .name = "cx_vfc", },
+ .res_type = RPMPD_SMPA,
+ .res_id = 1,
+ .key = KEY_FLOOR_CORNER,
+};
+
+static struct rpmpd cx_s2a_vfc = {
+ .pd = { .name = "cx_vfc", },
+ .res_type = RPMPD_SMPA,
+ .res_id = 2,
+ .key = KEY_FLOOR_CORNER,
+};
+
+static struct rpmpd cx_s2a_vfl = {
+ .pd = { .name = "cx_vfl", },
+ .res_type = RPMPD_SMPA,
+ .res_id = 2,
+ .key = KEY_FLOOR_LEVEL,
+};
+
+static struct rpmpd cx_s3a_vfl = {
+ .pd = { .name = "cx_vfl", },
+ .res_type = RPMPD_SMPA,
+ .res_id = 3,
+ .key = KEY_FLOOR_LEVEL,
+};
+
+/* G(F)X */
+static struct rpmpd gfx_s2b_corner = {
+ .pd = { .name = "gfx", },
+ .res_type = RPMPD_SMPB,
+ .res_id = 2,
+ .key = KEY_CORNER,
+};
+
+static struct rpmpd gfx_s2b_vfc = {
+ .pd = { .name = "gfx_vfc", },
+ .res_type = RPMPD_SMPB,
+ .res_id = 2,
+ .key = KEY_FLOOR_CORNER,
+};
+
+static struct rpmpd mx_rwmx0_lvl;
+static struct rpmpd gx_rwgx0_lvl_ao;
+static struct rpmpd gx_rwgx0_lvl = {
+ .pd = { .name = "gx", },
+ .peer = &gx_rwgx0_lvl_ao,
+ .res_type = RPMPD_RWGX,
+ .parent = &mx_rwmx0_lvl.pd,
+ .res_id = 0,
+ .key = KEY_LEVEL,
+};
+
+static struct rpmpd mx_rwmx0_lvl_ao;
+static struct rpmpd gx_rwgx0_lvl_ao = {
+ .pd = { .name = "gx_ao", },
+ .peer = &gx_rwgx0_lvl,
+ .parent = &mx_rwmx0_lvl_ao.pd,
+ .active_only = true,
+ .res_type = RPMPD_RWGX,
+ .res_id = 0,
+ .key = KEY_LEVEL,
+};
+
+/* MX */
+static struct rpmpd mx_l3a_corner_ao;
+static struct rpmpd mx_l3a_corner = {
+ .pd = { .name = "mx", },
+ .peer = &mx_l3a_corner_ao,
+ .res_type = RPMPD_LDOA,
+ .res_id = 3,
+ .key = KEY_CORNER,
+};
+
+static struct rpmpd mx_l3a_corner_ao = {
+ .pd = { .name = "mx_ao", },
+ .peer = &mx_l3a_corner,
+ .active_only = true,
+ .res_type = RPMPD_LDOA,
+ .res_id = 3,
+ .key = KEY_CORNER,
+};
+
+static struct rpmpd mx_l12a_lvl_ao;
+static struct rpmpd mx_l12a_lvl = {
+ .pd = { .name = "mx", },
+ .peer = &mx_l12a_lvl_ao,
+ .res_type = RPMPD_LDOA,
+ .res_id = 12,
+ .key = KEY_LEVEL,
+};
+
+static struct rpmpd mx_l12a_lvl_ao = {
+ .pd = { .name = "mx_ao", },
+ .peer = &mx_l12a_lvl,
+ .active_only = true,
+ .res_type = RPMPD_LDOA,
+ .res_id = 12,
+ .key = KEY_LEVEL,
+};
+
+static struct rpmpd mx_s2a_corner_ao;
+static struct rpmpd mx_s2a_corner = {
+ .pd = { .name = "mx", },
+ .peer = &mx_s2a_corner_ao,
+ .res_type = RPMPD_SMPA,
+ .res_id = 2,
+ .key = KEY_CORNER,
+};
+
+static struct rpmpd mx_s2a_corner_ao = {
+ .pd = { .name = "mx_ao", },
+ .peer = &mx_s2a_corner,
+ .active_only = true,
+ .res_type = RPMPD_SMPA,
+ .res_id = 2,
+ .key = KEY_CORNER,
+};
+
+static struct rpmpd mx_rwmx0_lvl_ao;
+static struct rpmpd mx_rwmx0_lvl = {
+ .pd = { .name = "mx", },
+ .peer = &mx_rwmx0_lvl_ao,
+ .res_type = RPMPD_RWMX,
+ .res_id = 0,
+ .key = KEY_LEVEL,
+};
+
+static struct rpmpd mx_rwmx0_lvl_ao = {
+ .pd = { .name = "mx_ao", },
+ .peer = &mx_rwmx0_lvl,
+ .active_only = true,
+ .res_type = RPMPD_RWMX,
+ .res_id = 0,
+ .key = KEY_LEVEL,
+};
+
+static struct rpmpd mx_s6a_lvl_ao;
+static struct rpmpd mx_s6a_lvl = {
+ .pd = { .name = "mx", },
+ .peer = &mx_s6a_lvl_ao,
+ .res_type = RPMPD_SMPA,
+ .res_id = 6,
+ .key = KEY_LEVEL,
+};
+
+static struct rpmpd mx_s6a_lvl_ao = {
+ .pd = { .name = "mx_ao", },
+ .peer = &mx_s6a_lvl,
+ .active_only = true,
+ .res_type = RPMPD_SMPA,
+ .res_id = 6,
+ .key = KEY_LEVEL,
+};
+
+static struct rpmpd mx_s7a_lvl_ao;
+static struct rpmpd mx_s7a_lvl = {
+ .pd = { .name = "mx", },
+ .peer = &mx_s7a_lvl_ao,
+ .res_type = RPMPD_SMPA,
+ .res_id = 7,
+ .key = KEY_LEVEL,
+};
+
+static struct rpmpd mx_s7a_lvl_ao = {
+ .pd = { .name = "mx_ao", },
+ .peer = &mx_s7a_lvl,
+ .active_only = true,
+ .res_type = RPMPD_SMPA,
+ .res_id = 7,
+ .key = KEY_LEVEL,
+};
+
+static struct rpmpd mx_l12a_vfl = {
+ .pd = { .name = "mx_vfl", },
+ .res_type = RPMPD_LDOA,
+ .res_id = 12,
+ .key = KEY_FLOOR_LEVEL,
+};
+
+static struct rpmpd mx_rwmx0_vfl = {
+ .pd = { .name = "mx_vfl", },
+ .res_type = RPMPD_RWMX,
+ .res_id = 0,
+ .key = KEY_FLOOR_LEVEL,
+};
+
+static struct rpmpd mx_rwsm6_vfl = {
+ .pd = { .name = "mx_vfl", },
+ .res_type = RPMPD_RWSM,
+ .res_id = 6,
+ .key = KEY_FLOOR_LEVEL,
+};
+
+/* MD */
+static struct rpmpd md_s1a_corner_ao;
+static struct rpmpd md_s1a_corner = {
+ .pd = { .name = "md", },
+ .peer = &md_s1a_corner_ao,
+ .res_type = RPMPD_SMPA,
+ .res_id = 1,
+ .key = KEY_CORNER,
+};
+
+static struct rpmpd md_s1a_corner_ao = {
+ .pd = { .name = "md_ao", },
+ .peer = &md_s1a_corner,
+ .active_only = true,
+ .res_type = RPMPD_SMPA,
+ .res_id = 1,
+ .key = KEY_CORNER,
+};
+
+static struct rpmpd md_s1a_lvl_ao;
+static struct rpmpd md_s1a_lvl = {
+ .pd = { .name = "md", },
+ .peer = &md_s1a_lvl_ao,
+ .res_type = RPMPD_SMPA,
+ .res_id = 1,
+ .key = KEY_LEVEL,
+};
+
+static struct rpmpd md_s1a_lvl_ao = {
+ .pd = { .name = "md_ao", },
+ .peer = &md_s1a_lvl,
+ .active_only = true,
+ .res_type = RPMPD_SMPA,
+ .res_id = 1,
+ .key = KEY_LEVEL,
+};
+
+static struct rpmpd md_s1a_vfc = {
+ .pd = { .name = "md_vfc", },
+ .res_type = RPMPD_SMPA,
+ .res_id = 1,
+ .key = KEY_FLOOR_CORNER,
+};
+
+/* LPI_CX */
+static struct rpmpd lpi_cx_rwlc0_lvl = {
+ .pd = { .name = "lpi_cx", },
+ .res_type = RPMPD_RWLC,
+ .res_id = 0,
+ .key = KEY_LEVEL,
+};
+
+static struct rpmpd lpi_cx_rwlc0_vfl = {
+ .pd = { .name = "lpi_cx_vfl", },
+ .res_type = RPMPD_RWLC,
+ .res_id = 0,
+ .key = KEY_FLOOR_LEVEL,
+};
+
+/* LPI_MX */
+static struct rpmpd lpi_mx_rwlm0_lvl = {
+ .pd = { .name = "lpi_mx", },
+ .res_type = RPMPD_RWLM,
+ .res_id = 0,
+ .key = KEY_LEVEL,
+};
+
+static struct rpmpd lpi_mx_rwlm0_vfl = {
+ .pd = { .name = "lpi_mx_vfl", },
+ .res_type = RPMPD_RWLM,
+ .res_id = 0,
+ .key = KEY_FLOOR_LEVEL,
+};
+
+/* SSC_CX */
+static struct rpmpd ssc_cx_l26a_corner = {
+ .pd = { .name = "ssc_cx", },
+ .res_type = RPMPD_LDOA,
+ .res_id = 26,
+ .key = KEY_CORNER,
+};
+
+static struct rpmpd ssc_cx_rwlc0_lvl = {
+ .pd = { .name = "ssc_cx", },
+ .res_type = RPMPD_RWLC,
+ .res_id = 0,
+ .key = KEY_LEVEL,
+};
+
+static struct rpmpd ssc_cx_rwsc0_lvl = {
+ .pd = { .name = "ssc_cx", },
+ .res_type = RPMPD_RWSC,
+ .res_id = 0,
+ .key = KEY_LEVEL,
+};
+
+static struct rpmpd ssc_cx_l26a_vfc = {
+ .pd = { .name = "ssc_cx_vfc", },
+ .res_type = RPMPD_LDOA,
+ .res_id = 26,
+ .key = KEY_FLOOR_CORNER,
+};
+
+static struct rpmpd ssc_cx_rwlc0_vfl = {
+ .pd = { .name = "ssc_cx_vfl", },
+ .res_type = RPMPD_RWLC,
+ .res_id = 0,
+ .key = KEY_FLOOR_LEVEL,
+};
+
+static struct rpmpd ssc_cx_rwsc0_vfl = {
+ .pd = { .name = "ssc_cx_vfl", },
+ .res_type = RPMPD_RWSC,
+ .res_id = 0,
+ .key = KEY_FLOOR_LEVEL,
+};
+
+/* SSC_MX */
+static struct rpmpd ssc_mx_rwlm0_lvl = {
+ .pd = { .name = "ssc_mx", },
+ .res_type = RPMPD_RWLM,
+ .res_id = 0,
+ .key = KEY_LEVEL,
+};
+
+static struct rpmpd ssc_mx_rwsm0_lvl = {
+ .pd = { .name = "ssc_mx", },
+ .res_type = RPMPD_RWSM,
+ .res_id = 0,
+ .key = KEY_LEVEL,
+};
+
+static struct rpmpd ssc_mx_rwlm0_vfl = {
+ .pd = { .name = "ssc_mx_vfl", },
+ .res_type = RPMPD_RWLM,
+ .res_id = 0,
+ .key = KEY_FLOOR_LEVEL,
+};
+
+static struct rpmpd ssc_mx_rwsm0_vfl = {
+ .pd = { .name = "ssc_mx_vfl", },
+ .res_type = RPMPD_RWSM,
+ .res_id = 0,
+ .key = KEY_FLOOR_LEVEL,
+};
-DEFINE_RPMPD_PAIR(mdm9607, vddmx, vddmx_ao, LDOA, LEVEL, 12);
-DEFINE_RPMPD_VFL(mdm9607, vddmx_vfl, LDOA, 12);
static struct rpmpd *mdm9607_rpmpds[] = {
- [MDM9607_VDDCX] = &mdm9607_vddcx,
- [MDM9607_VDDCX_AO] = &mdm9607_vddcx_ao,
- [MDM9607_VDDCX_VFL] = &mdm9607_vddcx_vfl,
- [MDM9607_VDDMX] = &mdm9607_vddmx,
- [MDM9607_VDDMX_AO] = &mdm9607_vddmx_ao,
- [MDM9607_VDDMX_VFL] = &mdm9607_vddmx_vfl,
+ [MDM9607_VDDCX] = &cx_s3a_lvl,
+ [MDM9607_VDDCX_AO] = &cx_s3a_lvl_ao,
+ [MDM9607_VDDCX_VFL] = &cx_s3a_vfl,
+ [MDM9607_VDDMX] = &mx_l12a_lvl,
+ [MDM9607_VDDMX_AO] = &mx_l12a_lvl_ao,
+ [MDM9607_VDDMX_VFL] = &mx_l12a_vfl,
};
static const struct rpmpd_desc mdm9607_desc = {
@@ -139,14 +529,10 @@ static const struct rpmpd_desc mdm9607_desc = {
.max_state = RPM_SMD_LEVEL_TURBO,
};
-/* msm8226 RPM Power Domains */
-DEFINE_RPMPD_PAIR(msm8226, vddcx, vddcx_ao, SMPA, CORNER, 1);
-DEFINE_RPMPD_VFC(msm8226, vddcx_vfc, SMPA, 1);
-
static struct rpmpd *msm8226_rpmpds[] = {
- [MSM8226_VDDCX] = &msm8226_vddcx,
- [MSM8226_VDDCX_AO] = &msm8226_vddcx_ao,
- [MSM8226_VDDCX_VFC] = &msm8226_vddcx_vfc,
+ [MSM8226_VDDCX] = &cx_s1a_corner,
+ [MSM8226_VDDCX_AO] = &cx_s1a_corner_ao,
+ [MSM8226_VDDCX_VFC] = &cx_s1a_vfc,
};
static const struct rpmpd_desc msm8226_desc = {
@@ -155,24 +541,15 @@ static const struct rpmpd_desc msm8226_desc = {
.max_state = MAX_CORNER_RPMPD_STATE,
};
-/* msm8939 RPM Power Domains */
-DEFINE_RPMPD_PAIR(msm8939, vddmd, vddmd_ao, SMPA, CORNER, 1);
-DEFINE_RPMPD_VFC(msm8939, vddmd_vfc, SMPA, 1);
-
-DEFINE_RPMPD_PAIR(msm8939, vddcx, vddcx_ao, SMPA, CORNER, 2);
-DEFINE_RPMPD_VFC(msm8939, vddcx_vfc, SMPA, 2);
-
-DEFINE_RPMPD_PAIR(msm8939, vddmx, vddmx_ao, LDOA, CORNER, 3);
-
static struct rpmpd *msm8939_rpmpds[] = {
- [MSM8939_VDDMDCX] = &msm8939_vddmd,
- [MSM8939_VDDMDCX_AO] = &msm8939_vddmd_ao,
- [MSM8939_VDDMDCX_VFC] = &msm8939_vddmd_vfc,
- [MSM8939_VDDCX] = &msm8939_vddcx,
- [MSM8939_VDDCX_AO] = &msm8939_vddcx_ao,
- [MSM8939_VDDCX_VFC] = &msm8939_vddcx_vfc,
- [MSM8939_VDDMX] = &msm8939_vddmx,
- [MSM8939_VDDMX_AO] = &msm8939_vddmx_ao,
+ [MSM8939_VDDMDCX] = &md_s1a_corner,
+ [MSM8939_VDDMDCX_AO] = &md_s1a_corner_ao,
+ [MSM8939_VDDMDCX_VFC] = &md_s1a_vfc,
+ [MSM8939_VDDCX] = &cx_s2a_corner,
+ [MSM8939_VDDCX_AO] = &cx_s2a_corner_ao,
+ [MSM8939_VDDCX_VFC] = &cx_s2a_vfc,
+ [MSM8939_VDDMX] = &mx_l3a_corner,
+ [MSM8939_VDDMX_AO] = &mx_l3a_corner_ao,
};
static const struct rpmpd_desc msm8939_desc = {
@@ -181,18 +558,12 @@ static const struct rpmpd_desc msm8939_desc = {
.max_state = MAX_CORNER_RPMPD_STATE,
};
-/* msm8916 RPM Power Domains */
-DEFINE_RPMPD_PAIR(msm8916, vddcx, vddcx_ao, SMPA, CORNER, 1);
-DEFINE_RPMPD_PAIR(msm8916, vddmx, vddmx_ao, LDOA, CORNER, 3);
-
-DEFINE_RPMPD_VFC(msm8916, vddcx_vfc, SMPA, 1);
-
static struct rpmpd *msm8916_rpmpds[] = {
- [MSM8916_VDDCX] = &msm8916_vddcx,
- [MSM8916_VDDCX_AO] = &msm8916_vddcx_ao,
- [MSM8916_VDDCX_VFC] = &msm8916_vddcx_vfc,
- [MSM8916_VDDMX] = &msm8916_vddmx,
- [MSM8916_VDDMX_AO] = &msm8916_vddmx_ao,
+ [MSM8916_VDDCX] = &cx_s1a_corner,
+ [MSM8916_VDDCX_AO] = &cx_s1a_corner_ao,
+ [MSM8916_VDDCX_VFC] = &cx_s1a_vfc,
+ [MSM8916_VDDMX] = &mx_l3a_corner,
+ [MSM8916_VDDMX_AO] = &mx_l3a_corner_ao,
};
static const struct rpmpd_desc msm8916_desc = {
@@ -201,21 +572,14 @@ static const struct rpmpd_desc msm8916_desc = {
.max_state = MAX_CORNER_RPMPD_STATE,
};
-/* msm8953 RPM Power Domains */
-DEFINE_RPMPD_PAIR(msm8953, vddmd, vddmd_ao, SMPA, LEVEL, 1);
-DEFINE_RPMPD_PAIR(msm8953, vddcx, vddcx_ao, SMPA, LEVEL, 2);
-DEFINE_RPMPD_PAIR(msm8953, vddmx, vddmx_ao, SMPA, LEVEL, 7);
-
-DEFINE_RPMPD_VFL(msm8953, vddcx_vfl, SMPA, 2);
-
static struct rpmpd *msm8953_rpmpds[] = {
- [MSM8953_VDDMD] = &msm8953_vddmd,
- [MSM8953_VDDMD_AO] = &msm8953_vddmd_ao,
- [MSM8953_VDDCX] = &msm8953_vddcx,
- [MSM8953_VDDCX_AO] = &msm8953_vddcx_ao,
- [MSM8953_VDDCX_VFL] = &msm8953_vddcx_vfl,
- [MSM8953_VDDMX] = &msm8953_vddmx,
- [MSM8953_VDDMX_AO] = &msm8953_vddmx_ao,
+ [MSM8953_VDDMD] = &md_s1a_lvl,
+ [MSM8953_VDDMD_AO] = &md_s1a_lvl_ao,
+ [MSM8953_VDDCX] = &cx_s2a_lvl,
+ [MSM8953_VDDCX_AO] = &cx_s2a_lvl_ao,
+ [MSM8953_VDDCX_VFL] = &cx_s2a_vfl,
+ [MSM8953_VDDMX] = &mx_s7a_lvl,
+ [MSM8953_VDDMX_AO] = &mx_s7a_lvl_ao,
};
static const struct rpmpd_desc msm8953_desc = {
@@ -224,20 +588,13 @@ static const struct rpmpd_desc msm8953_desc = {
.max_state = RPM_SMD_LEVEL_TURBO,
};
-/* msm8976 RPM Power Domains */
-DEFINE_RPMPD_PAIR(msm8976, vddcx, vddcx_ao, SMPA, LEVEL, 2);
-DEFINE_RPMPD_PAIR(msm8976, vddmx, vddmx_ao, SMPA, LEVEL, 6);
-
-DEFINE_RPMPD_VFL(msm8976, vddcx_vfl, RWSC, 2);
-DEFINE_RPMPD_VFL(msm8976, vddmx_vfl, RWSM, 6);
-
static struct rpmpd *msm8976_rpmpds[] = {
- [MSM8976_VDDCX] = &msm8976_vddcx,
- [MSM8976_VDDCX_AO] = &msm8976_vddcx_ao,
- [MSM8976_VDDCX_VFL] = &msm8976_vddcx_vfl,
- [MSM8976_VDDMX] = &msm8976_vddmx,
- [MSM8976_VDDMX_AO] = &msm8976_vddmx_ao,
- [MSM8976_VDDMX_VFL] = &msm8976_vddmx_vfl,
+ [MSM8976_VDDCX] = &cx_s2a_lvl,
+ [MSM8976_VDDCX_AO] = &cx_s2a_lvl_ao,
+ [MSM8976_VDDCX_VFL] = &cx_rwsc2_vfl,
+ [MSM8976_VDDMX] = &mx_s6a_lvl,
+ [MSM8976_VDDMX_AO] = &mx_s6a_lvl_ao,
+ [MSM8976_VDDMX_VFL] = &mx_rwsm6_vfl,
};
static const struct rpmpd_desc msm8976_desc = {
@@ -246,23 +603,16 @@ static const struct rpmpd_desc msm8976_desc = {
.max_state = RPM_SMD_LEVEL_TURBO_HIGH,
};
-/* msm8994 RPM Power domains */
-DEFINE_RPMPD_PAIR(msm8994, vddcx, vddcx_ao, SMPA, CORNER, 1);
-DEFINE_RPMPD_PAIR(msm8994, vddmx, vddmx_ao, SMPA, CORNER, 2);
-/* Attention! *Some* 8994 boards with pm8004 may use SMPC here! */
-DEFINE_RPMPD_CORNER(msm8994, vddgfx, SMPB, 2);
-
-DEFINE_RPMPD_VFC(msm8994, vddcx_vfc, SMPA, 1);
-DEFINE_RPMPD_VFC(msm8994, vddgfx_vfc, SMPB, 2);
-
static struct rpmpd *msm8994_rpmpds[] = {
- [MSM8994_VDDCX] = &msm8994_vddcx,
- [MSM8994_VDDCX_AO] = &msm8994_vddcx_ao,
- [MSM8994_VDDCX_VFC] = &msm8994_vddcx_vfc,
- [MSM8994_VDDMX] = &msm8994_vddmx,
- [MSM8994_VDDMX_AO] = &msm8994_vddmx_ao,
- [MSM8994_VDDGFX] = &msm8994_vddgfx,
- [MSM8994_VDDGFX_VFC] = &msm8994_vddgfx_vfc,
+ [MSM8994_VDDCX] = &cx_s1a_corner,
+ [MSM8994_VDDCX_AO] = &cx_s1a_corner_ao,
+ [MSM8994_VDDCX_VFC] = &cx_s1a_vfc,
+ [MSM8994_VDDMX] = &mx_s2a_corner,
+ [MSM8994_VDDMX_AO] = &mx_s2a_corner_ao,
+
+ /* Attention! *Some* 8994 boards with pm8004 may use SMPC here! */
+ [MSM8994_VDDGFX] = &gfx_s2b_corner,
+ [MSM8994_VDDGFX_VFC] = &gfx_s2b_vfc,
};
static const struct rpmpd_desc msm8994_desc = {
@@ -271,22 +621,14 @@ static const struct rpmpd_desc msm8994_desc = {
.max_state = MAX_CORNER_RPMPD_STATE,
};
-/* msm8996 RPM Power domains */
-DEFINE_RPMPD_PAIR(msm8996, vddcx, vddcx_ao, SMPA, CORNER, 1);
-DEFINE_RPMPD_PAIR(msm8996, vddmx, vddmx_ao, SMPA, CORNER, 2);
-DEFINE_RPMPD_CORNER(msm8996, vddsscx, LDOA, 26);
-
-DEFINE_RPMPD_VFC(msm8996, vddcx_vfc, SMPA, 1);
-DEFINE_RPMPD_VFC(msm8996, vddsscx_vfc, LDOA, 26);
-
static struct rpmpd *msm8996_rpmpds[] = {
- [MSM8996_VDDCX] = &msm8996_vddcx,
- [MSM8996_VDDCX_AO] = &msm8996_vddcx_ao,
- [MSM8996_VDDCX_VFC] = &msm8996_vddcx_vfc,
- [MSM8996_VDDMX] = &msm8996_vddmx,
- [MSM8996_VDDMX_AO] = &msm8996_vddmx_ao,
- [MSM8996_VDDSSCX] = &msm8996_vddsscx,
- [MSM8996_VDDSSCX_VFC] = &msm8996_vddsscx_vfc,
+ [MSM8996_VDDCX] = &cx_s1a_corner,
+ [MSM8996_VDDCX_AO] = &cx_s1a_corner_ao,
+ [MSM8996_VDDCX_VFC] = &cx_s1a_vfc,
+ [MSM8996_VDDMX] = &mx_s2a_corner,
+ [MSM8996_VDDMX_AO] = &mx_s2a_corner_ao,
+ [MSM8996_VDDSSCX] = &ssc_cx_l26a_corner,
+ [MSM8996_VDDSSCX_VFC] = &ssc_cx_l26a_vfc,
};
static const struct rpmpd_desc msm8996_desc = {
@@ -295,30 +637,17 @@ static const struct rpmpd_desc msm8996_desc = {
.max_state = MAX_CORNER_RPMPD_STATE,
};
-/* msm8998 RPM Power domains */
-DEFINE_RPMPD_PAIR(msm8998, vddcx, vddcx_ao, RWCX, LEVEL, 0);
-DEFINE_RPMPD_VFL(msm8998, vddcx_vfl, RWCX, 0);
-
-DEFINE_RPMPD_PAIR(msm8998, vddmx, vddmx_ao, RWMX, LEVEL, 0);
-DEFINE_RPMPD_VFL(msm8998, vddmx_vfl, RWMX, 0);
-
-DEFINE_RPMPD_LEVEL(msm8998, vdd_ssccx, RWSC, 0);
-DEFINE_RPMPD_VFL(msm8998, vdd_ssccx_vfl, RWSC, 0);
-
-DEFINE_RPMPD_LEVEL(msm8998, vdd_sscmx, RWSM, 0);
-DEFINE_RPMPD_VFL(msm8998, vdd_sscmx_vfl, RWSM, 0);
-
static struct rpmpd *msm8998_rpmpds[] = {
- [MSM8998_VDDCX] = &msm8998_vddcx,
- [MSM8998_VDDCX_AO] = &msm8998_vddcx_ao,
- [MSM8998_VDDCX_VFL] = &msm8998_vddcx_vfl,
- [MSM8998_VDDMX] = &msm8998_vddmx,
- [MSM8998_VDDMX_AO] = &msm8998_vddmx_ao,
- [MSM8998_VDDMX_VFL] = &msm8998_vddmx_vfl,
- [MSM8998_SSCCX] = &msm8998_vdd_ssccx,
- [MSM8998_SSCCX_VFL] = &msm8998_vdd_ssccx_vfl,
- [MSM8998_SSCMX] = &msm8998_vdd_sscmx,
- [MSM8998_SSCMX_VFL] = &msm8998_vdd_sscmx_vfl,
+ [MSM8998_VDDCX] = &cx_rwcx0_lvl,
+ [MSM8998_VDDCX_AO] = &cx_rwcx0_lvl_ao,
+ [MSM8998_VDDCX_VFL] = &cx_rwcx0_vfl,
+ [MSM8998_VDDMX] = &mx_rwmx0_lvl,
+ [MSM8998_VDDMX_AO] = &mx_rwmx0_lvl_ao,
+ [MSM8998_VDDMX_VFL] = &mx_rwmx0_vfl,
+ [MSM8998_SSCCX] = &ssc_cx_rwsc0_lvl,
+ [MSM8998_SSCCX_VFL] = &ssc_cx_rwsc0_vfl,
+ [MSM8998_SSCMX] = &ssc_mx_rwsm0_lvl,
+ [MSM8998_SSCMX_VFL] = &ssc_mx_rwsm0_vfl,
};
static const struct rpmpd_desc msm8998_desc = {
@@ -327,24 +656,14 @@ static const struct rpmpd_desc msm8998_desc = {
.max_state = RPM_SMD_LEVEL_BINNING,
};
-/* qcs404 RPM Power domains */
-DEFINE_RPMPD_PAIR(qcs404, vddmx, vddmx_ao, RWMX, LEVEL, 0);
-DEFINE_RPMPD_VFL(qcs404, vddmx_vfl, RWMX, 0);
-
-DEFINE_RPMPD_LEVEL(qcs404, vdd_lpicx, RWLC, 0);
-DEFINE_RPMPD_VFL(qcs404, vdd_lpicx_vfl, RWLC, 0);
-
-DEFINE_RPMPD_LEVEL(qcs404, vdd_lpimx, RWLM, 0);
-DEFINE_RPMPD_VFL(qcs404, vdd_lpimx_vfl, RWLM, 0);
-
static struct rpmpd *qcs404_rpmpds[] = {
- [QCS404_VDDMX] = &qcs404_vddmx,
- [QCS404_VDDMX_AO] = &qcs404_vddmx_ao,
- [QCS404_VDDMX_VFL] = &qcs404_vddmx_vfl,
- [QCS404_LPICX] = &qcs404_vdd_lpicx,
- [QCS404_LPICX_VFL] = &qcs404_vdd_lpicx_vfl,
- [QCS404_LPIMX] = &qcs404_vdd_lpimx,
- [QCS404_LPIMX_VFL] = &qcs404_vdd_lpimx_vfl,
+ [QCS404_VDDMX] = &mx_rwmx0_lvl,
+ [QCS404_VDDMX_AO] = &mx_rwmx0_lvl_ao,
+ [QCS404_VDDMX_VFL] = &mx_rwmx0_vfl,
+ [QCS404_LPICX] = &lpi_cx_rwlc0_lvl,
+ [QCS404_LPICX_VFL] = &lpi_cx_rwlc0_vfl,
+ [QCS404_LPIMX] = &lpi_mx_rwlm0_lvl,
+ [QCS404_LPIMX_VFL] = &lpi_mx_rwlm0_vfl,
};
static const struct rpmpd_desc qcs404_desc = {
@@ -353,30 +672,17 @@ static const struct rpmpd_desc qcs404_desc = {
.max_state = RPM_SMD_LEVEL_BINNING,
};
-/* sdm660 RPM Power domains */
-DEFINE_RPMPD_PAIR(sdm660, vddcx, vddcx_ao, RWCX, LEVEL, 0);
-DEFINE_RPMPD_VFL(sdm660, vddcx_vfl, RWCX, 0);
-
-DEFINE_RPMPD_PAIR(sdm660, vddmx, vddmx_ao, RWMX, LEVEL, 0);
-DEFINE_RPMPD_VFL(sdm660, vddmx_vfl, RWMX, 0);
-
-DEFINE_RPMPD_LEVEL(sdm660, vdd_ssccx, RWLC, 0);
-DEFINE_RPMPD_VFL(sdm660, vdd_ssccx_vfl, RWLC, 0);
-
-DEFINE_RPMPD_LEVEL(sdm660, vdd_sscmx, RWLM, 0);
-DEFINE_RPMPD_VFL(sdm660, vdd_sscmx_vfl, RWLM, 0);
-
static struct rpmpd *sdm660_rpmpds[] = {
- [SDM660_VDDCX] = &sdm660_vddcx,
- [SDM660_VDDCX_AO] = &sdm660_vddcx_ao,
- [SDM660_VDDCX_VFL] = &sdm660_vddcx_vfl,
- [SDM660_VDDMX] = &sdm660_vddmx,
- [SDM660_VDDMX_AO] = &sdm660_vddmx_ao,
- [SDM660_VDDMX_VFL] = &sdm660_vddmx_vfl,
- [SDM660_SSCCX] = &sdm660_vdd_ssccx,
- [SDM660_SSCCX_VFL] = &sdm660_vdd_ssccx_vfl,
- [SDM660_SSCMX] = &sdm660_vdd_sscmx,
- [SDM660_SSCMX_VFL] = &sdm660_vdd_sscmx_vfl,
+ [SDM660_VDDCX] = &cx_rwcx0_lvl,
+ [SDM660_VDDCX_AO] = &cx_rwcx0_lvl_ao,
+ [SDM660_VDDCX_VFL] = &cx_rwcx0_vfl,
+ [SDM660_VDDMX] = &mx_rwmx0_lvl,
+ [SDM660_VDDMX_AO] = &mx_rwmx0_lvl_ao,
+ [SDM660_VDDMX_VFL] = &mx_rwmx0_vfl,
+ [SDM660_SSCCX] = &ssc_cx_rwlc0_lvl,
+ [SDM660_SSCCX_VFL] = &ssc_cx_rwlc0_vfl,
+ [SDM660_SSCMX] = &ssc_mx_rwlm0_lvl,
+ [SDM660_SSCMX_VFL] = &ssc_mx_rwlm0_vfl,
};
static const struct rpmpd_desc sdm660_desc = {
@@ -385,25 +691,15 @@ static const struct rpmpd_desc sdm660_desc = {
.max_state = RPM_SMD_LEVEL_TURBO,
};
-/* sm4250/6115 RPM Power domains */
-DEFINE_RPMPD_PAIR(sm6115, vddcx, vddcx_ao, RWCX, LEVEL, 0);
-DEFINE_RPMPD_VFL(sm6115, vddcx_vfl, RWCX, 0);
-
-DEFINE_RPMPD_PAIR(sm6115, vddmx, vddmx_ao, RWMX, LEVEL, 0);
-DEFINE_RPMPD_VFL(sm6115, vddmx_vfl, RWMX, 0);
-
-DEFINE_RPMPD_LEVEL(sm6115, vdd_lpi_cx, RWLC, 0);
-DEFINE_RPMPD_LEVEL(sm6115, vdd_lpi_mx, RWLM, 0);
-
static struct rpmpd *sm6115_rpmpds[] = {
- [SM6115_VDDCX] = &sm6115_vddcx,
- [SM6115_VDDCX_AO] = &sm6115_vddcx_ao,
- [SM6115_VDDCX_VFL] = &sm6115_vddcx_vfl,
- [SM6115_VDDMX] = &sm6115_vddmx,
- [SM6115_VDDMX_AO] = &sm6115_vddmx_ao,
- [SM6115_VDDMX_VFL] = &sm6115_vddmx_vfl,
- [SM6115_VDD_LPI_CX] = &sm6115_vdd_lpi_cx,
- [SM6115_VDD_LPI_MX] = &sm6115_vdd_lpi_mx,
+ [SM6115_VDDCX] = &cx_rwcx0_lvl,
+ [SM6115_VDDCX_AO] = &cx_rwcx0_lvl_ao,
+ [SM6115_VDDCX_VFL] = &cx_rwcx0_vfl,
+ [SM6115_VDDMX] = &mx_rwmx0_lvl,
+ [SM6115_VDDMX_AO] = &mx_rwmx0_lvl_ao,
+ [SM6115_VDDMX_VFL] = &mx_rwmx0_vfl,
+ [SM6115_VDD_LPI_CX] = &lpi_cx_rwlc0_lvl,
+ [SM6115_VDD_LPI_MX] = &lpi_mx_rwlm0_lvl,
};
static const struct rpmpd_desc sm6115_desc = {
@@ -412,20 +708,13 @@ static const struct rpmpd_desc sm6115_desc = {
.max_state = RPM_SMD_LEVEL_TURBO_NO_CPR,
};
-/* sm6125 RPM Power domains */
-DEFINE_RPMPD_PAIR(sm6125, vddcx, vddcx_ao, RWCX, LEVEL, 0);
-DEFINE_RPMPD_VFL(sm6125, vddcx_vfl, RWCX, 0);
-
-DEFINE_RPMPD_PAIR(sm6125, vddmx, vddmx_ao, RWMX, LEVEL, 0);
-DEFINE_RPMPD_VFL(sm6125, vddmx_vfl, RWMX, 0);
-
static struct rpmpd *sm6125_rpmpds[] = {
- [SM6125_VDDCX] = &sm6125_vddcx,
- [SM6125_VDDCX_AO] = &sm6125_vddcx_ao,
- [SM6125_VDDCX_VFL] = &sm6125_vddcx_vfl,
- [SM6125_VDDMX] = &sm6125_vddmx,
- [SM6125_VDDMX_AO] = &sm6125_vddmx_ao,
- [SM6125_VDDMX_VFL] = &sm6125_vddmx_vfl,
+ [SM6125_VDDCX] = &cx_rwcx0_lvl,
+ [SM6125_VDDCX_AO] = &cx_rwcx0_lvl_ao,
+ [SM6125_VDDCX_VFL] = &cx_rwcx0_vfl,
+ [SM6125_VDDMX] = &mx_rwmx0_lvl,
+ [SM6125_VDDMX_AO] = &mx_rwmx0_lvl_ao,
+ [SM6125_VDDMX_VFL] = &mx_rwmx0_vfl,
};
static const struct rpmpd_desc sm6125_desc = {
@@ -434,18 +723,17 @@ static const struct rpmpd_desc sm6125_desc = {
.max_state = RPM_SMD_LEVEL_BINNING,
};
-DEFINE_RPMPD_PAIR(sm6375, vddgx, vddgx_ao, RWGX, LEVEL, 0);
static struct rpmpd *sm6375_rpmpds[] = {
- [SM6375_VDDCX] = &sm6125_vddcx,
- [SM6375_VDDCX_AO] = &sm6125_vddcx_ao,
- [SM6375_VDDCX_VFL] = &sm6125_vddcx_vfl,
- [SM6375_VDDMX] = &sm6125_vddmx,
- [SM6375_VDDMX_AO] = &sm6125_vddmx_ao,
- [SM6375_VDDMX_VFL] = &sm6125_vddmx_vfl,
- [SM6375_VDDGX] = &sm6375_vddgx,
- [SM6375_VDDGX_AO] = &sm6375_vddgx_ao,
- [SM6375_VDD_LPI_CX] = &sm6115_vdd_lpi_cx,
- [SM6375_VDD_LPI_MX] = &sm6115_vdd_lpi_mx,
+ [SM6375_VDDCX] = &cx_rwcx0_lvl,
+ [SM6375_VDDCX_AO] = &cx_rwcx0_lvl_ao,
+ [SM6375_VDDCX_VFL] = &cx_rwcx0_vfl,
+ [SM6375_VDDMX] = &mx_rwmx0_lvl,
+ [SM6375_VDDMX_AO] = &mx_rwmx0_lvl_ao,
+ [SM6375_VDDMX_VFL] = &mx_rwmx0_vfl,
+ [SM6375_VDDGX] = &gx_rwgx0_lvl,
+ [SM6375_VDDGX_AO] = &gx_rwgx0_lvl_ao,
+ [SM6375_VDD_LPI_CX] = &lpi_cx_rwlc0_lvl,
+ [SM6375_VDD_LPI_MX] = &lpi_mx_rwlm0_lvl,
};
static const struct rpmpd_desc sm6375_desc = {
@@ -455,14 +743,14 @@ static const struct rpmpd_desc sm6375_desc = {
};
static struct rpmpd *qcm2290_rpmpds[] = {
- [QCM2290_VDDCX] = &sm6115_vddcx,
- [QCM2290_VDDCX_AO] = &sm6115_vddcx_ao,
- [QCM2290_VDDCX_VFL] = &sm6115_vddcx_vfl,
- [QCM2290_VDDMX] = &sm6115_vddmx,
- [QCM2290_VDDMX_AO] = &sm6115_vddmx_ao,
- [QCM2290_VDDMX_VFL] = &sm6115_vddmx_vfl,
- [QCM2290_VDD_LPI_CX] = &sm6115_vdd_lpi_cx,
- [QCM2290_VDD_LPI_MX] = &sm6115_vdd_lpi_mx,
+ [QCM2290_VDDCX] = &cx_rwcx0_lvl,
+ [QCM2290_VDDCX_AO] = &cx_rwcx0_lvl_ao,
+ [QCM2290_VDDCX_VFL] = &cx_rwcx0_vfl,
+ [QCM2290_VDDMX] = &mx_rwmx0_lvl,
+ [QCM2290_VDDMX_AO] = &mx_rwmx0_lvl_ao,
+ [QCM2290_VDDMX_VFL] = &mx_rwmx0_vfl,
+ [QCM2290_VDD_LPI_CX] = &lpi_cx_rwlc0_lvl,
+ [QCM2290_VDD_LPI_MX] = &lpi_mx_rwlm0_lvl,
};
static const struct rpmpd_desc qcm2290_desc = {
@@ -673,6 +961,15 @@ static int rpmpd_probe(struct platform_device *pdev)
data->domains[i] = &rpmpds[i]->pd;
}
+ /* Add subdomains */
+ for (i = 0; i < num; i++) {
+ if (!rpmpds[i])
+ continue;
+
+ if (rpmpds[i]->parent)
+ pm_genpd_add_subdomain(rpmpds[i]->parent, &rpmpds[i]->pd);
+ }
+
return of_genpd_add_provider_onecell(pdev->dev.of_node, data);
}
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
index 523627d5d398..0c1aa809cc4e 100644
--- a/drivers/soc/qcom/smd-rpm.c
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -113,7 +113,7 @@ int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
if (WARN_ON(size >= 256))
return -EINVAL;
- pkt = kmalloc(size, GFP_KERNEL);
+ pkt = kmalloc(size, GFP_ATOMIC);
if (!pkt)
return -ENOMEM;
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 4f163d62942c..6be7ea93c78c 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -85,7 +85,7 @@
#define SMEM_GLOBAL_HOST 0xfffe
/* Max number of processors/hosts in a system */
-#define SMEM_HOST_COUNT 15
+#define SMEM_HOST_COUNT 20
/**
* struct smem_proc_comm - proc_comm communication struct (legacy)
@@ -1045,7 +1045,7 @@ static int qcom_smem_probe(struct platform_device *pdev)
int i;
num_regions = 1;
- if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL))
+ if (of_property_present(pdev->dev.of_node, "qcom,rpm-msg-ram"))
num_regions++;
array_size = num_regions * sizeof(struct smem_region);
diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c
index 3e8994d6110e..c58cfff64856 100644
--- a/drivers/soc/qcom/smsm.c
+++ b/drivers/soc/qcom/smsm.c
@@ -452,11 +452,10 @@ static int smsm_get_size_info(struct qcom_smsm *smsm)
} *info;
info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SIZE_INFO, &size);
- if (IS_ERR(info) && PTR_ERR(info) != -ENOENT) {
- if (PTR_ERR(info) != -EPROBE_DEFER)
- dev_err(smsm->dev, "unable to retrieve smsm size info\n");
- return PTR_ERR(info);
- } else if (IS_ERR(info) || size != sizeof(*info)) {
+ if (IS_ERR(info) && PTR_ERR(info) != -ENOENT)
+ return dev_err_probe(smsm->dev, PTR_ERR(info),
+ "unable to retrieve smsm size info\n");
+ else if (IS_ERR(info) || size != sizeof(*info)) {
dev_warn(smsm->dev, "no smsm size info, using defaults\n");
smsm->num_entries = SMSM_DEFAULT_NUM_ENTRIES;
smsm->num_hosts = SMSM_DEFAULT_NUM_HOSTS;
@@ -510,7 +509,7 @@ static int qcom_smsm_probe(struct platform_device *pdev)
return -ENOMEM;
for_each_child_of_node(pdev->dev.of_node, local_node) {
- if (of_find_property(local_node, "#qcom,smem-state-cells", NULL))
+ if (of_property_present(local_node, "#qcom,smem-state-cells"))
break;
}
if (!local_node) {
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index e9012ca1a87b..c2e4a57dd666 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -109,15 +109,20 @@ static const char *const pmic_models[] = {
[32] = "PM8150B",
[33] = "PMK8002",
[36] = "PM8009",
+ [37] = "PMI632",
[38] = "PM8150C",
+ [40] = "PM6150",
[41] = "SMB2351",
+ [44] = "PM8008",
[45] = "PM6125",
+ [46] = "PM7250B",
[47] = "PMK8350",
[48] = "PM8350",
[49] = "PM8350C",
[50] = "PM8350B",
[51] = "PMR735A",
[52] = "PMR735B",
+ [55] = "PM2250",
[58] = "PM8450",
[65] = "PM8010",
};
@@ -405,6 +410,7 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(SA8155) },
{ qcom_board_id(SDA439) },
{ qcom_board_id(SDA429) },
+ { qcom_board_id(SM7150) },
{ qcom_board_id(IPQ8070) },
{ qcom_board_id(IPQ8071) },
{ qcom_board_id(QM215) },
@@ -426,6 +432,7 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(QCM2150) },
{ qcom_board_id(SDA429W) },
{ qcom_board_id(SM8350) },
+ { qcom_board_id(QCM2290) },
{ qcom_board_id(SM6115) },
{ qcom_board_id(SC8280XP) },
{ qcom_board_id(IPQ6005) },
@@ -441,7 +448,16 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(SC7280) },
{ qcom_board_id(SC7180P) },
{ qcom_board_id(SM6375) },
+ { qcom_board_id(IPQ9514) },
+ { qcom_board_id(IPQ9550) },
+ { qcom_board_id(IPQ9554) },
+ { qcom_board_id(IPQ9570) },
+ { qcom_board_id(IPQ9574) },
{ qcom_board_id(SM8550) },
+ { qcom_board_id(IPQ9510) },
+ { qcom_board_id(QRB4210) },
+ { qcom_board_id(QRB2210) },
+ { qcom_board_id(SA8775P) },
{ qcom_board_id(QRU1000) },
{ qcom_board_id(QDU1000) },
{ qcom_board_id(QDU1010) },