aboutsummaryrefslogtreecommitdiff
path: root/drivers/cpufreq
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/Kconfig12
-rw-r--r--drivers/cpufreq/Kconfig.arm94
-rw-r--r--drivers/cpufreq/Makefile9
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c43
-rw-r--r--drivers/cpufreq/amd-pstate-ut.c2
-rw-r--r--drivers/cpufreq/amd-pstate.c706
-rw-r--r--drivers/cpufreq/amd_freq_sensitivity.c2
-rw-r--r--drivers/cpufreq/apple-soc-cpufreq.c353
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c2
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c5
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c11
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c5
-rw-r--r--drivers/cpufreq/cpufreq.c12
-rw-r--r--drivers/cpufreq/cpufreq_stats.c16
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c4
-rw-r--r--drivers/cpufreq/intel_pstate.c24
-rw-r--r--drivers/cpufreq/longhaul.c4
-rw-r--r--drivers/cpufreq/loongson1-cpufreq.c222
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c19
-rw-r--r--drivers/cpufreq/omap-cpufreq.c4
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c234
-rw-r--r--drivers/cpufreq/s3c2410-cpufreq.c155
-rw-r--r--drivers/cpufreq/s3c2412-cpufreq.c240
-rw-r--r--drivers/cpufreq/s3c2416-cpufreq.c492
-rw-r--r--drivers/cpufreq/s3c2440-cpufreq.c321
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq-debugfs.c163
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c648
-rw-r--r--drivers/cpufreq/sa1100-cpufreq.c206
-rw-r--r--drivers/cpufreq/sa1110-cpufreq.c6
-rw-r--r--drivers/cpufreq/spear-cpufreq.c2
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c11
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c3
-rw-r--r--drivers/cpufreq/ti-cpufreq.c36
33 files changed, 1311 insertions, 2755 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 2a84fc63371e..2c839bd2b051 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -3,7 +3,6 @@ menu "CPU Frequency scaling"
config CPU_FREQ
bool "CPU Frequency scaling"
- select SRCU
help
CPU Frequency scaling allows you to change the clock speed of
CPUs on the fly. This is a nice method to save power, because
@@ -37,7 +36,7 @@ config CPU_FREQ_STAT
choice
prompt "Default CPUFreq governor"
- default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ
+ default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1110_CPUFREQ
default CPU_FREQ_DEFAULT_GOV_SCHEDUTIL if ARM64 || ARM
default CPU_FREQ_DEFAULT_GOV_SCHEDUTIL if X86_INTEL_PSTATE && SMP
default CPU_FREQ_DEFAULT_GOV_PERFORMANCE
@@ -271,15 +270,6 @@ config LOONGSON2_CPUFREQ
Loongson2F and its successors support this feature.
If in doubt, say N.
-
-config LOONGSON1_CPUFREQ
- tristate "Loongson1 CPUFreq Driver"
- depends on LOONGSON1_LS1B
- help
- This option adds a CPUFreq driver for loongson1 processors which
- support software configurable cpu frequency.
-
- If in doubt, say N.
endif
if SPARC64
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 82e5de1f6f8c..97acaa2136fd 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -41,6 +41,15 @@ config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
To compile this driver as a module, choose M here: the
module will be called sun50i-cpufreq-nvmem.
+config ARM_APPLE_SOC_CPUFREQ
+ tristate "Apple Silicon SoC CPUFreq support"
+ depends on ARCH_APPLE || (COMPILE_TEST && 64BIT)
+ select PM_OPP
+ default ARCH_APPLE
+ help
+ This adds the CPUFreq driver for Apple Silicon machines
+ (e.g. Apple M1).
+
config ARM_ARMADA_37XX_CPUFREQ
tristate "Armada 37xx CPUFreq support"
depends on ARCH_MVEBU && CPUFREQ_DT
@@ -180,84 +189,6 @@ config ARM_RASPBERRYPI_CPUFREQ
If in doubt, say N.
-config ARM_S3C_CPUFREQ
- bool
- help
- Internal configuration node for common cpufreq on Samsung SoC
-
-config ARM_S3C24XX_CPUFREQ
- bool "CPUfreq driver for Samsung S3C24XX series CPUs (EXPERIMENTAL)"
- depends on ARCH_S3C24XX
- select ARM_S3C_CPUFREQ
- help
- This enables the CPUfreq driver for the Samsung S3C24XX family
- of CPUs.
-
- For details, take a look at <file:Documentation/cpu-freq>.
-
- If in doubt, say N.
-
-config ARM_S3C24XX_CPUFREQ_DEBUG
- bool "Debug CPUfreq Samsung driver core"
- depends on ARM_S3C24XX_CPUFREQ
- help
- Enable s3c_freq_dbg for the Samsung S3C CPUfreq core
-
-config ARM_S3C24XX_CPUFREQ_IODEBUG
- bool "Debug CPUfreq Samsung driver IO timing"
- depends on ARM_S3C24XX_CPUFREQ
- help
- Enable s3c_freq_iodbg for the Samsung S3C CPUfreq core
-
-config ARM_S3C24XX_CPUFREQ_DEBUGFS
- bool "Export debugfs for CPUFreq"
- depends on ARM_S3C24XX_CPUFREQ && DEBUG_FS
- help
- Export status information via debugfs.
-
-config ARM_S3C2410_CPUFREQ
- bool
- depends on ARM_S3C24XX_CPUFREQ && CPU_S3C2410
- help
- CPU Frequency scaling support for S3C2410
-
-config ARM_S3C2412_CPUFREQ
- bool
- depends on ARM_S3C24XX_CPUFREQ && CPU_S3C2412
- default y
- select S3C2412_IOTIMING
- help
- CPU Frequency scaling support for S3C2412 and S3C2413 SoC CPUs.
-
-config ARM_S3C2416_CPUFREQ
- bool "S3C2416 CPU Frequency scaling support"
- depends on CPU_S3C2416
- help
- This adds the CPUFreq driver for the Samsung S3C2416 and
- S3C2450 SoC. The S3C2416 supports changing the rate of the
- armdiv clock source and also entering a so called dynamic
- voltage scaling mode in which it is possible to reduce the
- core voltage of the CPU.
-
- If in doubt, say N.
-
-config ARM_S3C2416_CPUFREQ_VCORESCALE
- bool "Allow voltage scaling for S3C2416 arm core"
- depends on ARM_S3C2416_CPUFREQ && REGULATOR
- help
- Enable CPU voltage scaling when entering the dvs mode.
- It uses information gathered through existing hardware and
- tests but not documented in any datasheet.
-
- If in doubt, say N.
-
-config ARM_S3C2440_CPUFREQ
- bool "S3C2440/S3C2442 CPU Frequency scaling support"
- depends on ARM_S3C24XX_CPUFREQ && (CPU_S3C2440 || CPU_S3C2442)
- default y
- help
- CPU Frequency scaling support for S3C2440 and S3C2442 SoC CPUs.
-
config ARM_S3C64XX_CPUFREQ
bool "Samsung S3C64XX"
depends on CPU_S3C6410
@@ -277,9 +208,6 @@ config ARM_S5PV210_CPUFREQ
If in doubt, say N.
-config ARM_SA1100_CPUFREQ
- bool
-
config ARM_SA1110_CPUFREQ
bool
@@ -340,8 +268,8 @@ config ARM_TEGRA194_CPUFREQ
config ARM_TI_CPUFREQ
bool "Texas Instruments CPUFreq support"
- depends on ARCH_OMAP2PLUS
- default ARCH_OMAP2PLUS
+ depends on ARCH_OMAP2PLUS || ARCH_K3
+ default y
help
This driver enables valid OPPs on the running platform based on
values contained within the SoC in use. Enable this in order to
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 49b98c62c5af..ef8510774913 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o
##################################################################################
# ARM SoC drivers
+obj-$(CONFIG_ARM_APPLE_SOC_CPUFREQ) += apple-soc-cpufreq.o
obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o
obj-$(CONFIG_ARM_ARMADA_8K_CPUFREQ) += armada-8k-cpufreq.o
obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o
@@ -70,15 +71,8 @@ obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
obj-$(CONFIG_ARM_QCOM_CPUFREQ_HW) += qcom-cpufreq-hw.o
obj-$(CONFIG_ARM_QCOM_CPUFREQ_NVMEM) += qcom-cpufreq-nvmem.o
obj-$(CONFIG_ARM_RASPBERRYPI_CPUFREQ) += raspberrypi-cpufreq.o
-obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o
-obj-$(CONFIG_ARM_S3C2412_CPUFREQ) += s3c2412-cpufreq.o
-obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o
-obj-$(CONFIG_ARM_S3C2440_CPUFREQ) += s3c2440-cpufreq.o
obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
-obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o
-obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o
obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
-obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o
obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o
obj-$(CONFIG_ARM_SCMI_CPUFREQ) += scmi-cpufreq.o
obj-$(CONFIG_ARM_SCPI_CPUFREQ) += scpi-cpufreq.o
@@ -110,7 +104,6 @@ obj-$(CONFIG_POWERNV_CPUFREQ) += powernv-cpufreq.o
obj-$(CONFIG_BMIPS_CPUFREQ) += bmips-cpufreq.o
obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o
obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o
-obj-$(CONFIG_LOONGSON1_CPUFREQ) += loongson1-cpufreq.o
obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o
obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o
obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 1bb2b90ebb21..78adfb2ffff6 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -19,6 +19,7 @@
#include <linux/compiler.h>
#include <linux/dmi.h>
#include <linux/slab.h>
+#include <linux/string_helpers.h>
#include <linux/acpi.h>
#include <linux/io.h>
@@ -135,8 +136,8 @@ static int set_boost(struct cpufreq_policy *policy, int val)
{
on_each_cpu_mask(policy->cpus, boost_set_msr_each,
(void *)(long)val, 1);
- pr_debug("CPU %*pbl: Core Boosting %sabled.\n",
- cpumask_pr_args(policy->cpus), val ? "en" : "dis");
+ pr_debug("CPU %*pbl: Core Boosting %s.\n",
+ cpumask_pr_args(policy->cpus), str_enabled_disabled(val));
return 0;
}
@@ -535,15 +536,6 @@ static void free_acpi_perf_data(void)
free_percpu(acpi_perf_data);
}
-static int cpufreq_boost_online(unsigned int cpu)
-{
- /*
- * On the CPU_UP path we simply keep the boost-disable flag
- * in sync with the current global state.
- */
- return boost_set_msr(acpi_cpufreq_driver.boost_enabled);
-}
-
static int cpufreq_boost_down_prep(unsigned int cpu)
{
/*
@@ -897,6 +889,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
pr_warn(FW_WARN "P-state 0 is not max freq\n");
+ if (acpi_cpufreq_driver.set_boost)
+ set_boost(policy, acpi_cpufreq_driver.boost_enabled);
+
return result;
err_unreg:
@@ -916,6 +911,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
pr_debug("%s\n", __func__);
+ cpufreq_boost_down_prep(policy->cpu);
policy->fast_switch_possible = false;
policy->driver_data = NULL;
acpi_processor_unregister_performance(data->acpi_perf_cpu);
@@ -958,12 +954,8 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
.attr = acpi_cpufreq_attr,
};
-static enum cpuhp_state acpi_cpufreq_online;
-
static void __init acpi_cpufreq_boost_init(void)
{
- int ret;
-
if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) {
pr_debug("Boost capabilities not present in the processor\n");
return;
@@ -971,24 +963,6 @@ static void __init acpi_cpufreq_boost_init(void)
acpi_cpufreq_driver.set_boost = set_boost;
acpi_cpufreq_driver.boost_enabled = boost_state(0);
-
- /*
- * This calls the online callback on all online cpu and forces all
- * MSRs to the same value.
- */
- ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "cpufreq/acpi:online",
- cpufreq_boost_online, cpufreq_boost_down_prep);
- if (ret < 0) {
- pr_err("acpi_cpufreq: failed to register hotplug callbacks\n");
- return;
- }
- acpi_cpufreq_online = ret;
-}
-
-static void acpi_cpufreq_boost_exit(void)
-{
- if (acpi_cpufreq_online > 0)
- cpuhp_remove_state_nocalls(acpi_cpufreq_online);
}
static int __init acpi_cpufreq_init(void)
@@ -1032,7 +1006,6 @@ static int __init acpi_cpufreq_init(void)
ret = cpufreq_register_driver(&acpi_cpufreq_driver);
if (ret) {
free_acpi_perf_data();
- acpi_cpufreq_boost_exit();
}
return ret;
}
@@ -1041,8 +1014,6 @@ static void __exit acpi_cpufreq_exit(void)
{
pr_debug("%s\n", __func__);
- acpi_cpufreq_boost_exit();
-
cpufreq_unregister_driver(&acpi_cpufreq_driver);
free_acpi_perf_data();
diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
index e4a5b4d90f83..7f3fe2048981 100644
--- a/drivers/cpufreq/amd-pstate-ut.c
+++ b/drivers/cpufreq/amd-pstate-ut.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-1.0-or-later
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* AMD Processor P-state Frequency Driver Unit Test
*
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 204e39006dda..45c88894fd8e 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -59,8 +59,171 @@
* we disable it by default to go acpi-cpufreq on these processors and add a
* module parameter to be able to enable it manually for debugging.
*/
+static struct cpufreq_driver *current_pstate_driver;
static struct cpufreq_driver amd_pstate_driver;
-static int cppc_load __initdata;
+static struct cpufreq_driver amd_pstate_epp_driver;
+static int cppc_state = AMD_PSTATE_DISABLE;
+struct kobject *amd_pstate_kobj;
+
+/*
+ * AMD Energy Preference Performance (EPP)
+ * The EPP is used in the CCLK DPM controller to drive
+ * the frequency that a core is going to operate during
+ * short periods of activity. EPP values will be utilized for
+ * different OS profiles (balanced, performance, power savings)
+ * display strings corresponding to EPP index in the
+ * energy_perf_strings[]
+ * index String
+ *-------------------------------------
+ * 0 default
+ * 1 performance
+ * 2 balance_performance
+ * 3 balance_power
+ * 4 power
+ */
+enum energy_perf_value_index {
+ EPP_INDEX_DEFAULT = 0,
+ EPP_INDEX_PERFORMANCE,
+ EPP_INDEX_BALANCE_PERFORMANCE,
+ EPP_INDEX_BALANCE_POWERSAVE,
+ EPP_INDEX_POWERSAVE,
+};
+
+static const char * const energy_perf_strings[] = {
+ [EPP_INDEX_DEFAULT] = "default",
+ [EPP_INDEX_PERFORMANCE] = "performance",
+ [EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance",
+ [EPP_INDEX_BALANCE_POWERSAVE] = "balance_power",
+ [EPP_INDEX_POWERSAVE] = "power",
+ NULL
+};
+
+static unsigned int epp_values[] = {
+ [EPP_INDEX_DEFAULT] = 0,
+ [EPP_INDEX_PERFORMANCE] = AMD_CPPC_EPP_PERFORMANCE,
+ [EPP_INDEX_BALANCE_PERFORMANCE] = AMD_CPPC_EPP_BALANCE_PERFORMANCE,
+ [EPP_INDEX_BALANCE_POWERSAVE] = AMD_CPPC_EPP_BALANCE_POWERSAVE,
+ [EPP_INDEX_POWERSAVE] = AMD_CPPC_EPP_POWERSAVE,
+ };
+
+static inline int get_mode_idx_from_str(const char *str, size_t size)
+{
+ int i;
+
+ for (i=0; i < AMD_PSTATE_MAX; i++) {
+ if (!strncmp(str, amd_pstate_mode_string[i], size))
+ return i;
+ }
+ return -EINVAL;
+}
+
+static DEFINE_MUTEX(amd_pstate_limits_lock);
+static DEFINE_MUTEX(amd_pstate_driver_lock);
+
+static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
+{
+ u64 epp;
+ int ret;
+
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ if (!cppc_req_cached) {
+ epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
+ &cppc_req_cached);
+ if (epp)
+ return epp;
+ }
+ epp = (cppc_req_cached >> 24) & 0xFF;
+ } else {
+ ret = cppc_get_epp_perf(cpudata->cpu, &epp);
+ if (ret < 0) {
+ pr_debug("Could not retrieve energy perf value (%d)\n", ret);
+ return -EIO;
+ }
+ }
+
+ return (s16)(epp & 0xff);
+}
+
+static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata)
+{
+ s16 epp;
+ int index = -EINVAL;
+
+ epp = amd_pstate_get_epp(cpudata, 0);
+ if (epp < 0)
+ return epp;
+
+ switch (epp) {
+ case AMD_CPPC_EPP_PERFORMANCE:
+ index = EPP_INDEX_PERFORMANCE;
+ break;
+ case AMD_CPPC_EPP_BALANCE_PERFORMANCE:
+ index = EPP_INDEX_BALANCE_PERFORMANCE;
+ break;
+ case AMD_CPPC_EPP_BALANCE_POWERSAVE:
+ index = EPP_INDEX_BALANCE_POWERSAVE;
+ break;
+ case AMD_CPPC_EPP_POWERSAVE:
+ index = EPP_INDEX_POWERSAVE;
+ break;
+ default:
+ break;
+ }
+
+ return index;
+}
+
+static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
+{
+ int ret;
+ struct cppc_perf_ctrls perf_ctrls;
+
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ u64 value = READ_ONCE(cpudata->cppc_req_cached);
+
+ value &= ~GENMASK_ULL(31, 24);
+ value |= (u64)epp << 24;
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+
+ ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ if (!ret)
+ cpudata->epp_cached = epp;
+ } else {
+ perf_ctrls.energy_perf = epp;
+ ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
+ if (ret) {
+ pr_debug("failed to set energy perf value (%d)\n", ret);
+ return ret;
+ }
+ cpudata->epp_cached = epp;
+ }
+
+ return ret;
+}
+
+static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
+ int pref_index)
+{
+ int epp = -EINVAL;
+ int ret;
+
+ if (!pref_index) {
+ pr_debug("EPP pref_index is invalid\n");
+ return -EINVAL;
+ }
+
+ if (epp == -EINVAL)
+ epp = epp_values[pref_index];
+
+ if (epp > 0 && cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ pr_debug("EPP cannot be set under performance policy\n");
+ return -EBUSY;
+ }
+
+ ret = amd_pstate_set_epp(cpudata, epp);
+
+ return ret;
+}
static inline int pstate_enable(bool enable)
{
@@ -70,11 +233,21 @@ static inline int pstate_enable(bool enable)
static int cppc_enable(bool enable)
{
int cpu, ret = 0;
+ struct cppc_perf_ctrls perf_ctrls;
for_each_present_cpu(cpu) {
ret = cppc_set_enable(cpu, enable);
if (ret)
return ret;
+
+ /* Enable autonomous mode for EPP */
+ if (cppc_state == AMD_PSTATE_ACTIVE) {
+ /* Set desired perf as zero to allow EPP firmware control */
+ perf_ctrls.desired_perf = 0;
+ ret = cppc_set_perf(cpu, &perf_ctrls);
+ if (ret)
+ return ret;
+ }
}
return ret;
@@ -307,6 +480,7 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
max_perf = min_perf;
amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true);
+ cpufreq_cpu_put(policy);
}
static int amd_get_min_freq(struct amd_cpudata *cpudata)
@@ -417,7 +591,7 @@ static void amd_pstate_boost_init(struct amd_cpudata *cpudata)
return;
cpudata->boost_supported = true;
- amd_pstate_driver.boost_enabled = true;
+ current_pstate_driver->boost_enabled = true;
}
static void amd_perf_ctl_reset(unsigned int cpu)
@@ -500,6 +674,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
policy->driver_data = cpudata;
amd_pstate_boost_init(cpudata);
+ if (!current_pstate_driver->adjust_perf)
+ current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
return 0;
@@ -560,7 +736,7 @@ static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
if (max_freq < 0)
return max_freq;
- return sprintf(&buf[0], "%u\n", max_freq);
+ return sysfs_emit(buf, "%u\n", max_freq);
}
static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *policy,
@@ -573,7 +749,7 @@ static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli
if (freq < 0)
return freq;
- return sprintf(&buf[0], "%u\n", freq);
+ return sysfs_emit(buf, "%u\n", freq);
}
/*
@@ -588,13 +764,151 @@ static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
perf = READ_ONCE(cpudata->highest_perf);
- return sprintf(&buf[0], "%u\n", perf);
+ return sysfs_emit(buf, "%u\n", perf);
+}
+
+static ssize_t show_energy_performance_available_preferences(
+ struct cpufreq_policy *policy, char *buf)
+{
+ int i = 0;
+ int offset = 0;
+
+ while (energy_perf_strings[i] != NULL)
+ offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i++]);
+
+ sysfs_emit_at(buf, offset, "\n");
+
+ return offset;
+}
+
+static ssize_t store_energy_performance_preference(
+ struct cpufreq_policy *policy, const char *buf, size_t count)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ char str_preference[21];
+ ssize_t ret;
+
+ ret = sscanf(buf, "%20s", str_preference);
+ if (ret != 1)
+ return -EINVAL;
+
+ ret = match_string(energy_perf_strings, -1, str_preference);
+ if (ret < 0)
+ return -EINVAL;
+
+ mutex_lock(&amd_pstate_limits_lock);
+ ret = amd_pstate_set_energy_pref_index(cpudata, ret);
+ mutex_unlock(&amd_pstate_limits_lock);
+
+ return ret ?: count;
+}
+
+static ssize_t show_energy_performance_preference(
+ struct cpufreq_policy *policy, char *buf)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ int preference;
+
+ preference = amd_pstate_get_energy_pref_index(cpudata);
+ if (preference < 0)
+ return preference;
+
+ return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]);
+}
+
+static ssize_t amd_pstate_show_status(char *buf)
+{
+ if (!current_pstate_driver)
+ return sysfs_emit(buf, "disable\n");
+
+ return sysfs_emit(buf, "%s\n", amd_pstate_mode_string[cppc_state]);
+}
+
+static void amd_pstate_driver_cleanup(void)
+{
+ current_pstate_driver = NULL;
+}
+
+static int amd_pstate_update_status(const char *buf, size_t size)
+{
+ int ret = 0;
+ int mode_idx;
+
+ if (size > 7 || size < 6)
+ return -EINVAL;
+ mode_idx = get_mode_idx_from_str(buf, size);
+
+ switch(mode_idx) {
+ case AMD_PSTATE_DISABLE:
+ if (!current_pstate_driver)
+ return -EINVAL;
+ if (cppc_state == AMD_PSTATE_ACTIVE)
+ return -EBUSY;
+ cpufreq_unregister_driver(current_pstate_driver);
+ amd_pstate_driver_cleanup();
+ break;
+ case AMD_PSTATE_PASSIVE:
+ if (current_pstate_driver) {
+ if (current_pstate_driver == &amd_pstate_driver)
+ return 0;
+ cpufreq_unregister_driver(current_pstate_driver);
+ cppc_state = AMD_PSTATE_PASSIVE;
+ current_pstate_driver = &amd_pstate_driver;
+ }
+
+ ret = cpufreq_register_driver(current_pstate_driver);
+ break;
+ case AMD_PSTATE_ACTIVE:
+ if (current_pstate_driver) {
+ if (current_pstate_driver == &amd_pstate_epp_driver)
+ return 0;
+ cpufreq_unregister_driver(current_pstate_driver);
+ current_pstate_driver = &amd_pstate_epp_driver;
+ cppc_state = AMD_PSTATE_ACTIVE;
+ }
+
+ ret = cpufreq_register_driver(current_pstate_driver);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static ssize_t show_status(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ ssize_t ret;
+
+ mutex_lock(&amd_pstate_driver_lock);
+ ret = amd_pstate_show_status(buf);
+ mutex_unlock(&amd_pstate_driver_lock);
+
+ return ret;
+}
+
+static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
+ const char *buf, size_t count)
+{
+ char *p = memchr(buf, '\n', count);
+ int ret;
+
+ mutex_lock(&amd_pstate_driver_lock);
+ ret = amd_pstate_update_status(buf, p ? p - buf : count);
+ mutex_unlock(&amd_pstate_driver_lock);
+
+ return ret < 0 ? ret : count;
}
cpufreq_freq_attr_ro(amd_pstate_max_freq);
cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
cpufreq_freq_attr_ro(amd_pstate_highest_perf);
+cpufreq_freq_attr_rw(energy_performance_preference);
+cpufreq_freq_attr_ro(energy_performance_available_preferences);
+define_one_global_rw(status);
static struct freq_attr *amd_pstate_attr[] = {
&amd_pstate_max_freq,
@@ -603,6 +917,313 @@ static struct freq_attr *amd_pstate_attr[] = {
NULL,
};
+static struct freq_attr *amd_pstate_epp_attr[] = {
+ &amd_pstate_max_freq,
+ &amd_pstate_lowest_nonlinear_freq,
+ &amd_pstate_highest_perf,
+ &energy_performance_preference,
+ &energy_performance_available_preferences,
+ NULL,
+};
+
+static struct attribute *pstate_global_attributes[] = {
+ &status.attr,
+ NULL
+};
+
+static const struct attribute_group amd_pstate_global_attr_group = {
+ .attrs = pstate_global_attributes,
+};
+
+static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
+{
+ int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
+ struct amd_cpudata *cpudata;
+ struct device *dev;
+ u64 value;
+
+ /*
+ * Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
+ * which is ideal for initialization process.
+ */
+ amd_perf_ctl_reset(policy->cpu);
+ dev = get_cpu_device(policy->cpu);
+ if (!dev)
+ return -ENODEV;
+
+ cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL);
+ if (!cpudata)
+ return -ENOMEM;
+
+ cpudata->cpu = policy->cpu;
+ cpudata->epp_policy = 0;
+
+ ret = amd_pstate_init_perf(cpudata);
+ if (ret)
+ goto free_cpudata1;
+
+ min_freq = amd_get_min_freq(cpudata);
+ max_freq = amd_get_max_freq(cpudata);
+ nominal_freq = amd_get_nominal_freq(cpudata);
+ lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
+ if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
+ dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
+ min_freq, max_freq);
+ ret = -EINVAL;
+ goto free_cpudata1;
+ }
+
+ policy->cpuinfo.min_freq = min_freq;
+ policy->cpuinfo.max_freq = max_freq;
+ /* It will be updated by governor */
+ policy->cur = policy->cpuinfo.min_freq;
+
+ /* Initial processor data capability frequencies */
+ cpudata->max_freq = max_freq;
+ cpudata->min_freq = min_freq;
+ cpudata->nominal_freq = nominal_freq;
+ cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
+
+ policy->driver_data = cpudata;
+
+ cpudata->epp_cached = amd_pstate_get_epp(cpudata, 0);
+
+ policy->min = policy->cpuinfo.min_freq;
+ policy->max = policy->cpuinfo.max_freq;
+
+ /*
+ * Set the policy to powersave to provide a valid fallback value in case
+ * the default cpufreq governor is neither powersave nor performance.
+ */
+ policy->policy = CPUFREQ_POLICY_POWERSAVE;
+
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ policy->fast_switch_possible = true;
+ ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
+ if (ret)
+ return ret;
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+
+ ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, &value);
+ if (ret)
+ return ret;
+ WRITE_ONCE(cpudata->cppc_cap1_cached, value);
+ }
+ amd_pstate_boost_init(cpudata);
+
+ return 0;
+
+free_cpudata1:
+ kfree(cpudata);
+ return ret;
+}
+
+static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
+{
+ pr_debug("CPU %d exiting\n", policy->cpu);
+ policy->fast_switch_possible = false;
+ return 0;
+}
+
+static void amd_pstate_epp_init(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct amd_cpudata *cpudata = policy->driver_data;
+ u32 max_perf, min_perf;
+ u64 value;
+ s16 epp;
+
+ max_perf = READ_ONCE(cpudata->highest_perf);
+ min_perf = READ_ONCE(cpudata->lowest_perf);
+
+ value = READ_ONCE(cpudata->cppc_req_cached);
+
+ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
+ min_perf = max_perf;
+
+ /* Initial min/max values for CPPC Performance Controls Register */
+ value &= ~AMD_CPPC_MIN_PERF(~0L);
+ value |= AMD_CPPC_MIN_PERF(min_perf);
+
+ value &= ~AMD_CPPC_MAX_PERF(~0L);
+ value |= AMD_CPPC_MAX_PERF(max_perf);
+
+ /* CPPC EPP feature require to set zero to the desire perf bit */
+ value &= ~AMD_CPPC_DES_PERF(~0L);
+ value |= AMD_CPPC_DES_PERF(0);
+
+ if (cpudata->epp_policy == cpudata->policy)
+ goto skip_epp;
+
+ cpudata->epp_policy = cpudata->policy;
+
+ /* Get BIOS pre-defined epp value */
+ epp = amd_pstate_get_epp(cpudata, value);
+ if (epp < 0) {
+ /**
+ * This return value can only be negative for shared_memory
+ * systems where EPP register read/write not supported.
+ */
+ goto skip_epp;
+ }
+
+ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
+ epp = 0;
+
+ /* Set initial EPP value */
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ value &= ~GENMASK_ULL(31, 24);
+ value |= (u64)epp << 24;
+ }
+
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+ amd_pstate_set_epp(cpudata, epp);
+skip_epp:
+ cpufreq_cpu_put(policy);
+}
+
+static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ if (!policy->cpuinfo.max_freq)
+ return -ENODEV;
+
+ pr_debug("set_policy: cpuinfo.max %u policy->max %u\n",
+ policy->cpuinfo.max_freq, policy->max);
+
+ cpudata->policy = policy->policy;
+
+ amd_pstate_epp_init(policy->cpu);
+
+ return 0;
+}
+
+static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
+{
+ struct cppc_perf_ctrls perf_ctrls;
+ u64 value, max_perf;
+ int ret;
+
+ ret = amd_pstate_enable(true);
+ if (ret)
+ pr_err("failed to enable amd pstate during resume, return %d\n", ret);
+
+ value = READ_ONCE(cpudata->cppc_req_cached);
+ max_perf = READ_ONCE(cpudata->highest_perf);
+
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ } else {
+ perf_ctrls.max_perf = max_perf;
+ perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached);
+ cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ }
+}
+
+static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
+
+ if (cppc_state == AMD_PSTATE_ACTIVE) {
+ amd_pstate_epp_reenable(cpudata);
+ cpudata->suspended = false;
+ }
+
+ return 0;
+}
+
+static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ struct cppc_perf_ctrls perf_ctrls;
+ int min_perf;
+ u64 value;
+
+ min_perf = READ_ONCE(cpudata->lowest_perf);
+ value = READ_ONCE(cpudata->cppc_req_cached);
+
+ mutex_lock(&amd_pstate_limits_lock);
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
+
+ /* Set max perf same as min perf */
+ value &= ~AMD_CPPC_MAX_PERF(~0L);
+ value |= AMD_CPPC_MAX_PERF(min_perf);
+ value &= ~AMD_CPPC_MIN_PERF(~0L);
+ value |= AMD_CPPC_MIN_PERF(min_perf);
+ wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ } else {
+ perf_ctrls.desired_perf = 0;
+ perf_ctrls.max_perf = min_perf;
+ perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE);
+ cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ }
+ mutex_unlock(&amd_pstate_limits_lock);
+}
+
+static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu);
+
+ if (cpudata->suspended)
+ return 0;
+
+ if (cppc_state == AMD_PSTATE_ACTIVE)
+ amd_pstate_epp_offline(policy);
+
+ return 0;
+}
+
+static int amd_pstate_epp_verify_policy(struct cpufreq_policy_data *policy)
+{
+ cpufreq_verify_within_cpu_limits(policy);
+ pr_debug("policy_max =%d, policy_min=%d\n", policy->max, policy->min);
+ return 0;
+}
+
+static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ int ret;
+
+ /* avoid suspending when EPP is not enabled */
+ if (cppc_state != AMD_PSTATE_ACTIVE)
+ return 0;
+
+ /* set this flag to avoid setting core offline*/
+ cpudata->suspended = true;
+
+ /* disable CPPC in lowlevel firmware */
+ ret = amd_pstate_enable(false);
+ if (ret)
+ pr_err("failed to suspend, return %d\n", ret);
+
+ return 0;
+}
+
+static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ if (cpudata->suspended) {
+ mutex_lock(&amd_pstate_limits_lock);
+
+ /* enable amd pstate from suspend state*/
+ amd_pstate_epp_reenable(cpudata);
+
+ mutex_unlock(&amd_pstate_limits_lock);
+
+ cpudata->suspended = false;
+ }
+
+ return 0;
+}
+
static struct cpufreq_driver amd_pstate_driver = {
.flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS,
.verify = amd_pstate_verify,
@@ -616,6 +1237,20 @@ static struct cpufreq_driver amd_pstate_driver = {
.attr = amd_pstate_attr,
};
+static struct cpufreq_driver amd_pstate_epp_driver = {
+ .flags = CPUFREQ_CONST_LOOPS,
+ .verify = amd_pstate_epp_verify_policy,
+ .setpolicy = amd_pstate_epp_set_policy,
+ .init = amd_pstate_epp_cpu_init,
+ .exit = amd_pstate_epp_cpu_exit,
+ .offline = amd_pstate_epp_cpu_offline,
+ .online = amd_pstate_epp_cpu_online,
+ .suspend = amd_pstate_epp_suspend,
+ .resume = amd_pstate_epp_resume,
+ .name = "amd_pstate_epp",
+ .attr = amd_pstate_epp_attr,
+};
+
static int __init amd_pstate_init(void)
{
int ret;
@@ -625,10 +1260,10 @@ static int __init amd_pstate_init(void)
/*
* by default the pstate driver is disabled to load
* enable the amd_pstate passive mode driver explicitly
- * with amd_pstate=passive in kernel command line
+ * with amd_pstate=passive or other modes in kernel command line
*/
- if (!cppc_load) {
- pr_debug("driver load is disabled, boot with amd_pstate=passive to enable this\n");
+ if (cppc_state == AMD_PSTATE_DISABLE) {
+ pr_debug("driver load is disabled, boot with specific mode to enable this\n");
return -ENODEV;
}
@@ -644,7 +1279,8 @@ static int __init amd_pstate_init(void)
/* capability check */
if (boot_cpu_has(X86_FEATURE_CPPC)) {
pr_debug("AMD CPPC MSR based functionality is supported\n");
- amd_pstate_driver.adjust_perf = amd_pstate_adjust_perf;
+ if (cppc_state == AMD_PSTATE_PASSIVE)
+ current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
} else {
pr_debug("AMD CPPC shared memory based functionality is supported\n");
static_call_update(amd_pstate_enable, cppc_enable);
@@ -655,31 +1291,63 @@ static int __init amd_pstate_init(void)
/* enable amd pstate feature */
ret = amd_pstate_enable(true);
if (ret) {
- pr_err("failed to enable amd-pstate with return %d\n", ret);
+ pr_err("failed to enable with return %d\n", ret);
return ret;
}
- ret = cpufreq_register_driver(&amd_pstate_driver);
+ ret = cpufreq_register_driver(current_pstate_driver);
if (ret)
- pr_err("failed to register amd_pstate_driver with return %d\n",
- ret);
+ pr_err("failed to register with return %d\n", ret);
+
+ amd_pstate_kobj = kobject_create_and_add("amd_pstate", &cpu_subsys.dev_root->kobj);
+ if (!amd_pstate_kobj) {
+ ret = -EINVAL;
+ pr_err("global sysfs registration failed.\n");
+ goto kobject_free;
+ }
+
+ ret = sysfs_create_group(amd_pstate_kobj, &amd_pstate_global_attr_group);
+ if (ret) {
+ pr_err("sysfs attribute export failed with error %d.\n", ret);
+ goto global_attr_free;
+ }
return ret;
+
+global_attr_free:
+ kobject_put(amd_pstate_kobj);
+kobject_free:
+ cpufreq_unregister_driver(current_pstate_driver);
+ return ret;
}
device_initcall(amd_pstate_init);
static int __init amd_pstate_param(char *str)
{
+ size_t size;
+ int mode_idx;
+
if (!str)
return -EINVAL;
- if (!strcmp(str, "disable")) {
- cppc_load = 0;
- pr_info("driver is explicitly disabled\n");
- } else if (!strcmp(str, "passive"))
- cppc_load = 1;
+ size = strlen(str);
+ mode_idx = get_mode_idx_from_str(str, size);
- return 0;
+ if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) {
+ cppc_state = mode_idx;
+ if (cppc_state == AMD_PSTATE_DISABLE)
+ pr_info("driver is explicitly disabled\n");
+
+ if (cppc_state == AMD_PSTATE_ACTIVE)
+ current_pstate_driver = &amd_pstate_epp_driver;
+
+ if (cppc_state == AMD_PSTATE_PASSIVE)
+ current_pstate_driver = &amd_pstate_driver;
+
+ return 0;
+ }
+
+ return -EINVAL;
}
early_param("amd_pstate", amd_pstate_param);
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
index 6448e03bcf48..59b19b9975e8 100644
--- a/drivers/cpufreq/amd_freq_sensitivity.c
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -125,6 +125,8 @@ static int __init amd_freq_sensitivity_init(void)
if (!pcidev) {
if (!boot_cpu_has(X86_FEATURE_PROC_FEEDBACK))
return -ENODEV;
+ } else {
+ pci_dev_put(pcidev);
}
if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val))
diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c
new file mode 100644
index 000000000000..c11d22fd84c3
--- /dev/null
+++ b/drivers/cpufreq/apple-soc-cpufreq.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Apple SoC CPU cluster performance state driver
+ *
+ * Copyright The Asahi Linux Contributors
+ *
+ * Based on scpi-cpufreq.c
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+
+#define APPLE_DVFS_CMD 0x20
+#define APPLE_DVFS_CMD_BUSY BIT(31)
+#define APPLE_DVFS_CMD_SET BIT(25)
+#define APPLE_DVFS_CMD_PS2 GENMASK(16, 12)
+#define APPLE_DVFS_CMD_PS1 GENMASK(4, 0)
+
+/* Same timebase as CPU counter (24MHz) */
+#define APPLE_DVFS_LAST_CHG_TIME 0x38
+
+/*
+ * Apple ran out of bits and had to shift this in T8112...
+ */
+#define APPLE_DVFS_STATUS 0x50
+#define APPLE_DVFS_STATUS_CUR_PS_T8103 GENMASK(7, 4)
+#define APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8103 4
+#define APPLE_DVFS_STATUS_TGT_PS_T8103 GENMASK(3, 0)
+#define APPLE_DVFS_STATUS_CUR_PS_T8112 GENMASK(9, 5)
+#define APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8112 5
+#define APPLE_DVFS_STATUS_TGT_PS_T8112 GENMASK(4, 0)
+
+/*
+ * Div is +1, base clock is 12MHz on existing SoCs.
+ * For documentation purposes. We use the OPP table to
+ * get the frequency.
+ */
+#define APPLE_DVFS_PLL_STATUS 0xc0
+#define APPLE_DVFS_PLL_FACTOR 0xc8
+#define APPLE_DVFS_PLL_FACTOR_MULT GENMASK(31, 16)
+#define APPLE_DVFS_PLL_FACTOR_DIV GENMASK(15, 0)
+
+#define APPLE_DVFS_TRANSITION_TIMEOUT 100
+
+struct apple_soc_cpufreq_info {
+ u64 max_pstate;
+ u64 cur_pstate_mask;
+ u64 cur_pstate_shift;
+};
+
+struct apple_cpu_priv {
+ struct device *cpu_dev;
+ void __iomem *reg_base;
+ const struct apple_soc_cpufreq_info *info;
+};
+
+static struct cpufreq_driver apple_soc_cpufreq_driver;
+
+static const struct apple_soc_cpufreq_info soc_t8103_info = {
+ .max_pstate = 15,
+ .cur_pstate_mask = APPLE_DVFS_STATUS_CUR_PS_T8103,
+ .cur_pstate_shift = APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8103,
+};
+
+static const struct apple_soc_cpufreq_info soc_t8112_info = {
+ .max_pstate = 31,
+ .cur_pstate_mask = APPLE_DVFS_STATUS_CUR_PS_T8112,
+ .cur_pstate_shift = APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8112,
+};
+
+static const struct apple_soc_cpufreq_info soc_default_info = {
+ .max_pstate = 15,
+ .cur_pstate_mask = 0, /* fallback */
+};
+
+static const struct of_device_id apple_soc_cpufreq_of_match[] = {
+ {
+ .compatible = "apple,t8103-cluster-cpufreq",
+ .data = &soc_t8103_info,
+ },
+ {
+ .compatible = "apple,t8112-cluster-cpufreq",
+ .data = &soc_t8112_info,
+ },
+ {
+ .compatible = "apple,cluster-cpufreq",
+ .data = &soc_default_info,
+ },
+ {}
+};
+
+static unsigned int apple_soc_cpufreq_get_rate(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+ struct apple_cpu_priv *priv = policy->driver_data;
+ struct cpufreq_frequency_table *p;
+ unsigned int pstate;
+
+ if (priv->info->cur_pstate_mask) {
+ u64 reg = readq_relaxed(priv->reg_base + APPLE_DVFS_STATUS);
+
+ pstate = (reg & priv->info->cur_pstate_mask) >> priv->info->cur_pstate_shift;
+ } else {
+ /*
+ * For the fallback case we might not know the layout of DVFS_STATUS,
+ * so just use the command register value (which ignores boost limitations).
+ */
+ u64 reg = readq_relaxed(priv->reg_base + APPLE_DVFS_CMD);
+
+ pstate = FIELD_GET(APPLE_DVFS_CMD_PS1, reg);
+ }
+
+ cpufreq_for_each_valid_entry(p, policy->freq_table)
+ if (p->driver_data == pstate)
+ return p->frequency;
+
+ dev_err(priv->cpu_dev, "could not find frequency for pstate %d\n",
+ pstate);
+ return 0;
+}
+
+static int apple_soc_cpufreq_set_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ struct apple_cpu_priv *priv = policy->driver_data;
+ unsigned int pstate = policy->freq_table[index].driver_data;
+ u64 reg;
+
+ /* Fallback for newer SoCs */
+ if (index > priv->info->max_pstate)
+ index = priv->info->max_pstate;
+
+ if (readq_poll_timeout_atomic(priv->reg_base + APPLE_DVFS_CMD, reg,
+ !(reg & APPLE_DVFS_CMD_BUSY), 2,
+ APPLE_DVFS_TRANSITION_TIMEOUT)) {
+ return -EIO;
+ }
+
+ reg &= ~(APPLE_DVFS_CMD_PS1 | APPLE_DVFS_CMD_PS2);
+ reg |= FIELD_PREP(APPLE_DVFS_CMD_PS1, pstate);
+ reg |= FIELD_PREP(APPLE_DVFS_CMD_PS2, pstate);
+ reg |= APPLE_DVFS_CMD_SET;
+
+ writeq_relaxed(reg, priv->reg_base + APPLE_DVFS_CMD);
+
+ return 0;
+}
+
+static unsigned int apple_soc_cpufreq_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ if (apple_soc_cpufreq_set_target(policy, policy->cached_resolved_idx) < 0)
+ return 0;
+
+ return policy->freq_table[policy->cached_resolved_idx].frequency;
+}
+
+static int apple_soc_cpufreq_find_cluster(struct cpufreq_policy *policy,
+ void __iomem **reg_base,
+ const struct apple_soc_cpufreq_info **info)
+{
+ struct of_phandle_args args;
+ const struct of_device_id *match;
+ int ret = 0;
+
+ ret = of_perf_domain_get_sharing_cpumask(policy->cpu, "performance-domains",
+ "#performance-domain-cells",
+ policy->cpus, &args);
+ if (ret < 0)
+ return ret;
+
+ match = of_match_node(apple_soc_cpufreq_of_match, args.np);
+ of_node_put(args.np);
+ if (!match)
+ return -ENODEV;
+
+ *info = match->data;
+
+ *reg_base = of_iomap(args.np, 0);
+ if (IS_ERR(*reg_base))
+ return PTR_ERR(*reg_base);
+
+ return 0;
+}
+
+static struct freq_attr *apple_soc_cpufreq_hw_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL, /* Filled in below if boost is enabled */
+ NULL,
+};
+
+static int apple_soc_cpufreq_init(struct cpufreq_policy *policy)
+{
+ int ret, i;
+ unsigned int transition_latency;
+ void __iomem *reg_base;
+ struct device *cpu_dev;
+ struct apple_cpu_priv *priv;
+ const struct apple_soc_cpufreq_info *info;
+ struct cpufreq_frequency_table *freq_table;
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("failed to get cpu%d device\n", policy->cpu);
+ return -ENODEV;
+ }
+
+ ret = dev_pm_opp_of_add_table(cpu_dev);
+ if (ret < 0) {
+ dev_err(cpu_dev, "%s: failed to add OPP table: %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = apple_soc_cpufreq_find_cluster(policy, &reg_base, &info);
+ if (ret) {
+ dev_err(cpu_dev, "%s: failed to get cluster info: %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
+ if (ret) {
+ dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", __func__, ret);
+ goto out_iounmap;
+ }
+
+ ret = dev_pm_opp_get_opp_count(cpu_dev);
+ if (ret <= 0) {
+ dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
+ ret = -EPROBE_DEFER;
+ goto out_free_opp;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out_free_opp;
+ }
+
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
+ if (ret) {
+ dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
+ goto out_free_priv;
+ }
+
+ /* Get OPP levels (p-state indexes) and stash them in driver_data */
+ for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ unsigned long rate = freq_table[i].frequency * 1000 + 999;
+ struct dev_pm_opp *opp = dev_pm_opp_find_freq_floor(cpu_dev, &rate);
+
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ goto out_free_cpufreq_table;
+ }
+ freq_table[i].driver_data = dev_pm_opp_get_level(opp);
+ dev_pm_opp_put(opp);
+ }
+
+ priv->cpu_dev = cpu_dev;
+ priv->reg_base = reg_base;
+ priv->info = info;
+ policy->driver_data = priv;
+ policy->freq_table = freq_table;
+
+ transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
+ if (!transition_latency)
+ transition_latency = CPUFREQ_ETERNAL;
+
+ policy->cpuinfo.transition_latency = transition_latency;
+ policy->dvfs_possible_from_any_cpu = true;
+ policy->fast_switch_possible = true;
+ policy->suspend_freq = freq_table[0].frequency;
+
+ if (policy_has_boost_freq(policy)) {
+ ret = cpufreq_enable_boost_support();
+ if (ret) {
+ dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
+ } else {
+ apple_soc_cpufreq_hw_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
+ apple_soc_cpufreq_driver.boost_enabled = true;
+ }
+ }
+
+ return 0;
+
+out_free_cpufreq_table:
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+out_free_priv:
+ kfree(priv);
+out_free_opp:
+ dev_pm_opp_remove_all_dynamic(cpu_dev);
+out_iounmap:
+ iounmap(reg_base);
+ return ret;
+}
+
+static int apple_soc_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ struct apple_cpu_priv *priv = policy->driver_data;
+
+ dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
+ dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
+ iounmap(priv->reg_base);
+ kfree(priv);
+
+ return 0;
+}
+
+static struct cpufreq_driver apple_soc_cpufreq_driver = {
+ .name = "apple-cpufreq",
+ .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK | CPUFREQ_IS_COOLING_DEV,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .get = apple_soc_cpufreq_get_rate,
+ .init = apple_soc_cpufreq_init,
+ .exit = apple_soc_cpufreq_exit,
+ .target_index = apple_soc_cpufreq_set_target,
+ .fast_switch = apple_soc_cpufreq_fast_switch,
+ .register_em = cpufreq_register_em_with_opp,
+ .attr = apple_soc_cpufreq_hw_attr,
+ .suspend = cpufreq_generic_suspend,
+};
+
+static int __init apple_soc_cpufreq_module_init(void)
+{
+ if (!of_machine_is_compatible("apple,arm-platform"))
+ return -ENODEV;
+
+ return cpufreq_register_driver(&apple_soc_cpufreq_driver);
+}
+module_init(apple_soc_cpufreq_module_init);
+
+static void __exit apple_soc_cpufreq_module_exit(void)
+{
+ cpufreq_unregister_driver(&apple_soc_cpufreq_driver);
+}
+module_exit(apple_soc_cpufreq_module_exit);
+
+MODULE_DEVICE_TABLE(of, apple_soc_cpufreq_of_match);
+MODULE_AUTHOR("Hector Martin <[email protected]>");
+MODULE_DESCRIPTION("Apple SoC CPU cluster DVFS driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index c10fc33b29b1..b74289a95a17 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -445,7 +445,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
return -ENODEV;
}
- clk = clk_get(cpu_dev, 0);
+ clk = clk_get(cpu_dev, NULL);
if (IS_ERR(clk)) {
dev_err(cpu_dev, "Cannot get clock for CPU0\n");
return PTR_ERR(clk);
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 4153150e20db..ffea6402189d 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -751,10 +751,7 @@ static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
{
- int ret;
-
- ret = cpufreq_unregister_driver(&brcm_avs_driver);
- WARN_ON(ret);
+ cpufreq_unregister_driver(&brcm_avs_driver);
brcm_avs_prepare_uninit(pdev);
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 432dfb4e8027..022e3555407c 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -487,7 +487,8 @@ static unsigned int get_perf_level_count(struct cpufreq_policy *policy)
cpu_data = policy->driver_data;
perf_caps = &cpu_data->perf_caps;
max_cap = arch_scale_cpu_capacity(cpu);
- min_cap = div_u64(max_cap * perf_caps->lowest_perf, perf_caps->highest_perf);
+ min_cap = div_u64((u64)max_cap * perf_caps->lowest_perf,
+ perf_caps->highest_perf);
if ((min_cap == 0) || (max_cap < min_cap))
return 0;
return 1 + max_cap / CPPC_EM_CAP_STEP - min_cap / CPPC_EM_CAP_STEP;
@@ -519,10 +520,10 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
cpu_data = policy->driver_data;
perf_caps = &cpu_data->perf_caps;
max_cap = arch_scale_cpu_capacity(cpu_dev->id);
- min_cap = div_u64(max_cap * perf_caps->lowest_perf,
- perf_caps->highest_perf);
-
- perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap;
+ min_cap = div_u64((u64)max_cap * perf_caps->lowest_perf,
+ perf_caps->highest_perf);
+ perf_step = div_u64((u64)CPPC_EM_CAP_STEP * perf_caps->highest_perf,
+ max_cap);
min_step = min_cap / CPPC_EM_CAP_STEP;
max_step = max_cap / CPPC_EM_CAP_STEP;
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 6ac3800db450..e85703651098 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -103,6 +103,8 @@ static const struct of_device_id allowlist[] __initconst = {
static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "allwinner,sun50i-h6", },
+ { .compatible = "apple,arm-platform", },
+
{ .compatible = "arm,vexpress", },
{ .compatible = "calxeda,highbank", },
@@ -135,6 +137,7 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "nvidia,tegra30", },
{ .compatible = "nvidia,tegra124", },
{ .compatible = "nvidia,tegra210", },
+ { .compatible = "nvidia,tegra234", },
{ .compatible = "qcom,apq8096", },
{ .compatible = "qcom,msm8996", },
@@ -148,6 +151,7 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "qcom,sdm845", },
{ .compatible = "qcom,sm6115", },
{ .compatible = "qcom,sm6350", },
+ { .compatible = "qcom,sm6375", },
{ .compatible = "qcom,sm8150", },
{ .compatible = "qcom,sm8250", },
{ .compatible = "qcom,sm8350", },
@@ -160,6 +164,7 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "ti,am43", },
{ .compatible = "ti,dra7", },
{ .compatible = "ti,omap3", },
+ { .compatible = "ti,am625", },
{ .compatible = "qcom,ipq8064", },
{ .compatible = "qcom,apq8064", },
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 69b3d61852ac..6d8fd3b8dcb5 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -993,7 +993,7 @@ static const struct sysfs_ops sysfs_ops = {
.store = store,
};
-static struct kobj_type ktype_cpufreq = {
+static const struct kobj_type ktype_cpufreq = {
.sysfs_ops = &sysfs_ops,
.default_groups = cpufreq_groups,
.release = cpufreq_sysfs_release,
@@ -1207,6 +1207,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
goto err_free_rcpumask;
+ init_completion(&policy->kobj_unregister);
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
cpufreq_global_kobject, "policy%u", cpu);
if (ret) {
@@ -1245,7 +1246,6 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
init_rwsem(&policy->rwsem);
spin_lock_init(&policy->transition_lock);
init_waitqueue_head(&policy->transition_wait);
- init_completion(&policy->kobj_unregister);
INIT_WORK(&policy->update, handle_update);
policy->cpu = cpu;
@@ -2904,12 +2904,12 @@ EXPORT_SYMBOL_GPL(cpufreq_register_driver);
* Returns zero if successful, and -EINVAL if the cpufreq_driver is
* currently not initialised.
*/
-int cpufreq_unregister_driver(struct cpufreq_driver *driver)
+void cpufreq_unregister_driver(struct cpufreq_driver *driver)
{
unsigned long flags;
- if (!cpufreq_driver || (driver != cpufreq_driver))
- return -EINVAL;
+ if (WARN_ON(!cpufreq_driver || (driver != cpufreq_driver)))
+ return;
pr_debug("unregistering driver %s\n", driver->name);
@@ -2926,8 +2926,6 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
cpus_read_unlock();
-
- return 0;
}
EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 1570d6f3e75d..55c7ffd37d1c 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -128,25 +128,23 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
ssize_t len = 0;
int i, j, count;
- len += scnprintf(buf + len, PAGE_SIZE - len, " From : To\n");
- len += scnprintf(buf + len, PAGE_SIZE - len, " : ");
+ len += sysfs_emit_at(buf, len, " From : To\n");
+ len += sysfs_emit_at(buf, len, " : ");
for (i = 0; i < stats->state_num; i++) {
if (len >= PAGE_SIZE)
break;
- len += scnprintf(buf + len, PAGE_SIZE - len, "%9u ",
- stats->freq_table[i]);
+ len += sysfs_emit_at(buf, len, "%9u ", stats->freq_table[i]);
}
if (len >= PAGE_SIZE)
return PAGE_SIZE;
- len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += sysfs_emit_at(buf, len, "\n");
for (i = 0; i < stats->state_num; i++) {
if (len >= PAGE_SIZE)
break;
- len += scnprintf(buf + len, PAGE_SIZE - len, "%9u: ",
- stats->freq_table[i]);
+ len += sysfs_emit_at(buf, len, "%9u: ", stats->freq_table[i]);
for (j = 0; j < stats->state_num; j++) {
if (len >= PAGE_SIZE)
@@ -157,11 +155,11 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
else
count = stats->trans_table[i * stats->max_state + j];
- len += scnprintf(buf + len, PAGE_SIZE - len, "%9u ", count);
+ len += sysfs_emit_at(buf, len, "%9u ", count);
}
if (len >= PAGE_SIZE)
break;
- len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += sysfs_emit_at(buf, len, "\n");
}
if (len >= PAGE_SIZE) {
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index 9e97f60f8199..ebb3a8102681 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -133,12 +133,14 @@ static int __init davinci_cpufreq_probe(struct platform_device *pdev)
static int __exit davinci_cpufreq_remove(struct platform_device *pdev)
{
+ cpufreq_unregister_driver(&davinci_driver);
+
clk_put(cpufreq.armclk);
if (cpufreq.asyncclk)
clk_put(cpufreq.asyncclk);
- return cpufreq_unregister_driver(&davinci_driver);
+ return 0;
}
static struct platform_driver davinci_cpufreq_driver = {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 6ff73c30769f..cb4beec27555 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -298,6 +298,7 @@ static int hwp_active __read_mostly;
static int hwp_mode_bdw __read_mostly;
static bool per_cpu_limits __read_mostly;
static bool hwp_boost __read_mostly;
+static bool hwp_forced __read_mostly;
static struct cpufreq_driver *intel_pstate_driver __read_mostly;
@@ -451,20 +452,6 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
(u32) cpu->acpi_perf_data.states[i].control);
}
- /*
- * The _PSS table doesn't contain whole turbo frequency range.
- * This just contains +1 MHZ above the max non turbo frequency,
- * with control value corresponding to max turbo ratio. But
- * when cpufreq set policy is called, it will call with this
- * max frequency, which will cause a reduced performance as
- * this driver uses real max turbo frequency as the max
- * frequency. So correct this frequency in _PSS table to
- * correct max turbo frequency based on the turbo state.
- * Also need to convert to MHz as _PSS freq is in MHz.
- */
- if (!global.turbo_disabled)
- cpu->acpi_perf_data.states[0].core_frequency =
- policy->cpuinfo.max_freq / 1000;
cpu->valid_pss_table = true;
pr_debug("_PPC limits will be enforced\n");
@@ -1679,12 +1666,12 @@ static void intel_pstate_update_epp_defaults(struct cpudata *cpudata)
return;
/*
- * If powerup EPP is something other than chipset default 0x80 and
- * - is more performance oriented than 0x80 (default balance_perf EPP)
+ * If the EPP is set by firmware, which means that firmware enabled HWP
+ * - Is equal or less than 0x80 (default balance_perf EPP)
* - But less performance oriented than performance EPP
* then use this as new balance_perf EPP.
*/
- if (cpudata->epp_default < HWP_EPP_BALANCE_PERFORMANCE &&
+ if (hwp_forced && cpudata->epp_default <= HWP_EPP_BALANCE_PERFORMANCE &&
cpudata->epp_default > HWP_EPP_PERFORMANCE) {
epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = cpudata->epp_default;
return;
@@ -2378,6 +2365,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
X86_MATCH(COMETLAKE, core_funcs),
X86_MATCH(ICELAKE_X, core_funcs),
X86_MATCH(TIGERLAKE, core_funcs),
+ X86_MATCH(SAPPHIRERAPIDS_X, core_funcs),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
@@ -3384,7 +3372,7 @@ static int __init intel_pstate_init(void)
id = x86_match_cpu(hwp_support_ids);
if (id) {
- bool hwp_forced = intel_pstate_hwp_is_enabled();
+ hwp_forced = intel_pstate_hwp_is_enabled();
if (hwp_forced)
pr_info("HWP enabled by BIOS\n");
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 3e000e1a75c6..4c57c6725c13 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -407,10 +407,10 @@ static int guess_fsb(int mult)
{
int speed = cpu_khz / 1000;
int i;
- int speeds[] = { 666, 1000, 1333, 2000 };
+ static const int speeds[] = { 666, 1000, 1333, 2000 };
int f_max, f_min;
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < ARRAY_SIZE(speeds); i++) {
f_max = ((speeds[i] * mult) + 50) / 100;
f_max += (ROUNDING / 2);
f_min = f_max - ROUNDING;
diff --git a/drivers/cpufreq/loongson1-cpufreq.c b/drivers/cpufreq/loongson1-cpufreq.c
deleted file mode 100644
index fb72d709db56..000000000000
--- a/drivers/cpufreq/loongson1-cpufreq.c
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * CPU Frequency Scaling for Loongson 1 SoC
- *
- * Copyright (C) 2014-2016 Zhang, Keguang <[email protected]>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/cpu.h>
-#include <linux/cpufreq.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include <cpufreq.h>
-#include <loongson1.h>
-
-struct ls1x_cpufreq {
- struct device *dev;
- struct clk *clk; /* CPU clk */
- struct clk *mux_clk; /* MUX of CPU clk */
- struct clk *pll_clk; /* PLL clk */
- struct clk *osc_clk; /* OSC clk */
- unsigned int max_freq;
- unsigned int min_freq;
-};
-
-static struct ls1x_cpufreq *cpufreq;
-
-static int ls1x_cpufreq_notifier(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- if (val == CPUFREQ_POSTCHANGE)
- current_cpu_data.udelay_val = loops_per_jiffy;
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block ls1x_cpufreq_notifier_block = {
- .notifier_call = ls1x_cpufreq_notifier
-};
-
-static int ls1x_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int index)
-{
- struct device *cpu_dev = get_cpu_device(policy->cpu);
- unsigned int old_freq, new_freq;
-
- old_freq = policy->cur;
- new_freq = policy->freq_table[index].frequency;
-
- /*
- * The procedure of reconfiguring CPU clk is as below.
- *
- * - Reparent CPU clk to OSC clk
- * - Reset CPU clock (very important)
- * - Reconfigure CPU DIV
- * - Reparent CPU clk back to CPU DIV clk
- */
-
- clk_set_parent(policy->clk, cpufreq->osc_clk);
- __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) | RST_CPU_EN | RST_CPU,
- LS1X_CLK_PLL_DIV);
- __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) & ~(RST_CPU_EN | RST_CPU),
- LS1X_CLK_PLL_DIV);
- clk_set_rate(cpufreq->mux_clk, new_freq * 1000);
- clk_set_parent(policy->clk, cpufreq->mux_clk);
- dev_dbg(cpu_dev, "%u KHz --> %u KHz\n", old_freq, new_freq);
-
- return 0;
-}
-
-static int ls1x_cpufreq_init(struct cpufreq_policy *policy)
-{
- struct device *cpu_dev = get_cpu_device(policy->cpu);
- struct cpufreq_frequency_table *freq_tbl;
- unsigned int pll_freq, freq;
- int steps, i;
-
- pll_freq = clk_get_rate(cpufreq->pll_clk) / 1000;
-
- steps = 1 << DIV_CPU_WIDTH;
- freq_tbl = kcalloc(steps, sizeof(*freq_tbl), GFP_KERNEL);
- if (!freq_tbl)
- return -ENOMEM;
-
- for (i = 0; i < (steps - 1); i++) {
- freq = pll_freq / (i + 1);
- if ((freq < cpufreq->min_freq) || (freq > cpufreq->max_freq))
- freq_tbl[i].frequency = CPUFREQ_ENTRY_INVALID;
- else
- freq_tbl[i].frequency = freq;
- dev_dbg(cpu_dev,
- "cpufreq table: index %d: frequency %d\n", i,
- freq_tbl[i].frequency);
- }
- freq_tbl[i].frequency = CPUFREQ_TABLE_END;
-
- policy->clk = cpufreq->clk;
- cpufreq_generic_init(policy, freq_tbl, 0);
-
- return 0;
-}
-
-static int ls1x_cpufreq_exit(struct cpufreq_policy *policy)
-{
- kfree(policy->freq_table);
- return 0;
-}
-
-static struct cpufreq_driver ls1x_cpufreq_driver = {
- .name = "cpufreq-ls1x",
- .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = ls1x_cpufreq_target,
- .get = cpufreq_generic_get,
- .init = ls1x_cpufreq_init,
- .exit = ls1x_cpufreq_exit,
- .attr = cpufreq_generic_attr,
-};
-
-static int ls1x_cpufreq_remove(struct platform_device *pdev)
-{
- cpufreq_unregister_notifier(&ls1x_cpufreq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
- cpufreq_unregister_driver(&ls1x_cpufreq_driver);
-
- return 0;
-}
-
-static int ls1x_cpufreq_probe(struct platform_device *pdev)
-{
- struct plat_ls1x_cpufreq *pdata = dev_get_platdata(&pdev->dev);
- struct clk *clk;
- int ret;
-
- if (!pdata || !pdata->clk_name || !pdata->osc_clk_name) {
- dev_err(&pdev->dev, "platform data missing\n");
- return -EINVAL;
- }
-
- cpufreq =
- devm_kzalloc(&pdev->dev, sizeof(struct ls1x_cpufreq), GFP_KERNEL);
- if (!cpufreq)
- return -ENOMEM;
-
- cpufreq->dev = &pdev->dev;
-
- clk = devm_clk_get(&pdev->dev, pdata->clk_name);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "unable to get %s clock\n",
- pdata->clk_name);
- return PTR_ERR(clk);
- }
- cpufreq->clk = clk;
-
- clk = clk_get_parent(clk);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "unable to get parent of %s clock\n",
- __clk_get_name(cpufreq->clk));
- return PTR_ERR(clk);
- }
- cpufreq->mux_clk = clk;
-
- clk = clk_get_parent(clk);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "unable to get parent of %s clock\n",
- __clk_get_name(cpufreq->mux_clk));
- return PTR_ERR(clk);
- }
- cpufreq->pll_clk = clk;
-
- clk = devm_clk_get(&pdev->dev, pdata->osc_clk_name);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "unable to get %s clock\n",
- pdata->osc_clk_name);
- return PTR_ERR(clk);
- }
- cpufreq->osc_clk = clk;
-
- cpufreq->max_freq = pdata->max_freq;
- cpufreq->min_freq = pdata->min_freq;
-
- ret = cpufreq_register_driver(&ls1x_cpufreq_driver);
- if (ret) {
- dev_err(&pdev->dev,
- "failed to register CPUFreq driver: %d\n", ret);
- return ret;
- }
-
- ret = cpufreq_register_notifier(&ls1x_cpufreq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
-
- if (ret) {
- dev_err(&pdev->dev,
- "failed to register CPUFreq notifier: %d\n",ret);
- cpufreq_unregister_driver(&ls1x_cpufreq_driver);
- }
-
- return ret;
-}
-
-static struct platform_driver ls1x_cpufreq_platdrv = {
- .probe = ls1x_cpufreq_probe,
- .remove = ls1x_cpufreq_remove,
- .driver = {
- .name = "ls1x-cpufreq",
- },
-};
-
-module_platform_driver(ls1x_cpufreq_platdrv);
-
-MODULE_ALIAS("platform:ls1x-cpufreq");
-MODULE_AUTHOR("Kelvin Cheung <[email protected]>");
-MODULE_DESCRIPTION("Loongson1 CPUFreq driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index f0e0a35c7f21..b22f5cc8a463 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -160,6 +160,7 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
struct mtk_cpufreq_data *data;
struct device *dev = &pdev->dev;
struct resource *res;
+ struct of_phandle_args args;
void __iomem *base;
int ret, i;
int index;
@@ -168,11 +169,14 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
if (!data)
return -ENOMEM;
- index = of_perf_domain_get_sharing_cpumask(policy->cpu, "performance-domains",
- "#performance-domain-cells",
- policy->cpus);
- if (index < 0)
- return index;
+ ret = of_perf_domain_get_sharing_cpumask(policy->cpu, "performance-domains",
+ "#performance-domain-cells",
+ policy->cpus, &args);
+ if (ret < 0)
+ return ret;
+
+ index = args.args[0];
+ of_node_put(args.np);
res = platform_get_resource(pdev, IORESOURCE_MEM, index);
if (!res) {
@@ -313,13 +317,16 @@ static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
static int mtk_cpufreq_hw_driver_remove(struct platform_device *pdev)
{
- return cpufreq_unregister_driver(&cpufreq_mtk_hw_driver);
+ cpufreq_unregister_driver(&cpufreq_mtk_hw_driver);
+
+ return 0;
}
static const struct of_device_id mtk_cpufreq_hw_match[] = {
{ .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_offsets },
{}
};
+MODULE_DEVICE_TABLE(of, mtk_cpufreq_hw_match);
static struct platform_driver mtk_cpufreq_hw_driver = {
.probe = mtk_cpufreq_hw_driver_probe,
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 1b50df06c6bc..81649a1969b6 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -184,7 +184,9 @@ static int omap_cpufreq_probe(struct platform_device *pdev)
static int omap_cpufreq_remove(struct platform_device *pdev)
{
- return cpufreq_unregister_driver(&omap_driver);
+ cpufreq_unregister_driver(&omap_driver);
+
+ return 0;
}
static struct platform_driver omap_cpufreq_platdrv = {
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 833589bc95e4..2f581d2d617d 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -4,6 +4,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
#include <linux/cpufreq.h>
#include <linux/init.h>
#include <linux/interconnect.h>
@@ -43,7 +44,6 @@ struct qcom_cpufreq_soc_data {
struct qcom_cpufreq_data {
void __iomem *base;
struct resource *res;
- const struct qcom_cpufreq_soc_data *soc_data;
/*
* Mutex to synchronize between de-init sequence and re-starting LMh
@@ -55,12 +55,18 @@ struct qcom_cpufreq_data {
bool cancel_throttle;
struct delayed_work throttle_work;
struct cpufreq_policy *policy;
+ struct clk_hw cpu_clk;
bool per_core_dcvs;
struct freq_qos_request throttle_freq_req;
};
+static struct {
+ struct qcom_cpufreq_data *data;
+ const struct qcom_cpufreq_soc_data *soc_data;
+} qcom_cpufreq;
+
static unsigned long cpu_hw_rate, xo_rate;
static bool icc_scaling_enabled;
@@ -109,7 +115,7 @@ static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
unsigned int index)
{
struct qcom_cpufreq_data *data = policy->driver_data;
- const struct qcom_cpufreq_soc_data *soc_data = data->soc_data;
+ const struct qcom_cpufreq_soc_data *soc_data = qcom_cpufreq.soc_data;
unsigned long freq = policy->freq_table[index].frequency;
unsigned int i;
@@ -125,7 +131,20 @@ static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
return 0;
}
-static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
+static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
+{
+ unsigned int lval;
+
+ if (qcom_cpufreq.soc_data->reg_current_vote)
+ lval = readl_relaxed(data->base + qcom_cpufreq.soc_data->reg_current_vote) & 0x3ff;
+ else
+ lval = readl_relaxed(data->base + qcom_cpufreq.soc_data->reg_domain_state) & 0xff;
+
+ return lval * xo_rate;
+}
+
+/* Get the frequency requested by the cpufreq core for the CPU */
+static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
{
struct qcom_cpufreq_data *data;
const struct qcom_cpufreq_soc_data *soc_data;
@@ -137,7 +156,7 @@ static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
return 0;
data = policy->driver_data;
- soc_data = data->soc_data;
+ soc_data = qcom_cpufreq.soc_data;
index = readl_relaxed(data->base + soc_data->reg_perf_state);
index = min(index, LUT_MAX_ENTRIES - 1);
@@ -145,11 +164,28 @@ static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
return policy->freq_table[index].frequency;
}
+static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
+{
+ struct qcom_cpufreq_data *data;
+ struct cpufreq_policy *policy;
+
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (!policy)
+ return 0;
+
+ data = policy->driver_data;
+
+ if (data->throttle_irq >= 0)
+ return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
+
+ return qcom_cpufreq_get_freq(cpu);
+}
+
static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct qcom_cpufreq_data *data = policy->driver_data;
- const struct qcom_cpufreq_soc_data *soc_data = data->soc_data;
+ const struct qcom_cpufreq_soc_data *soc_data = qcom_cpufreq.soc_data;
unsigned int index;
unsigned int i;
@@ -173,7 +209,7 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
unsigned long rate;
int ret;
struct qcom_cpufreq_data *drv_data = policy->driver_data;
- const struct qcom_cpufreq_soc_data *soc_data = drv_data->soc_data;
+ const struct qcom_cpufreq_soc_data *soc_data = qcom_cpufreq.soc_data;
table = kcalloc(LUT_MAX_ENTRIES + 1, sizeof(*table), GFP_KERNEL);
if (!table)
@@ -193,6 +229,7 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
}
} else if (ret != -ENODEV) {
dev_err(cpu_dev, "Invalid opp table in device tree\n");
+ kfree(table);
return ret;
} else {
policy->fast_switch_possible = true;
@@ -286,18 +323,6 @@ static void qcom_get_related_cpus(int index, struct cpumask *m)
}
}
-static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
-{
- unsigned int lval;
-
- if (data->soc_data->reg_current_vote)
- lval = readl_relaxed(data->base + data->soc_data->reg_current_vote) & 0x3ff;
- else
- lval = readl_relaxed(data->base + data->soc_data->reg_domain_state) & 0xff;
-
- return lval * xo_rate;
-}
-
static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
{
struct cpufreq_policy *policy = data->policy;
@@ -341,7 +366,7 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
* If h/w throttled frequency is higher than what cpufreq has requested
* for, then stop polling and switch back to interrupt mechanism.
*/
- if (throttled_freq >= qcom_cpufreq_hw_get(cpu))
+ if (throttled_freq >= qcom_cpufreq_get_freq(cpu))
enable_irq(data->throttle_irq);
else
mod_delayed_work(system_highpri_wq, &data->throttle_work,
@@ -367,9 +392,9 @@ static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data)
disable_irq_nosync(c_data->throttle_irq);
schedule_delayed_work(&c_data->throttle_work, 0);
- if (c_data->soc_data->reg_intr_clr)
+ if (qcom_cpufreq.soc_data->reg_intr_clr)
writel_relaxed(GT_IRQ_STATUS,
- c_data->base + c_data->soc_data->reg_intr_clr);
+ c_data->base + qcom_cpufreq.soc_data->reg_intr_clr);
return IRQ_HANDLED;
}
@@ -503,8 +528,6 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
struct of_phandle_args args;
struct device_node *cpu_np;
struct device *cpu_dev;
- struct resource *res;
- void __iomem *base;
struct qcom_cpufreq_data *data;
int ret, index;
@@ -526,51 +549,18 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
return ret;
index = args.args[0];
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, index);
- if (!res) {
- dev_err(dev, "failed to get mem resource %d\n", index);
- return -ENODEV;
- }
-
- if (!request_mem_region(res->start, resource_size(res), res->name)) {
- dev_err(dev, "failed to request resource %pR\n", res);
- return -EBUSY;
- }
-
- base = ioremap(res->start, resource_size(res));
- if (!base) {
- dev_err(dev, "failed to map resource %pR\n", res);
- ret = -ENOMEM;
- goto release_region;
- }
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data) {
- ret = -ENOMEM;
- goto unmap_base;
- }
-
- data->soc_data = of_device_get_match_data(&pdev->dev);
- data->base = base;
- data->res = res;
+ data = &qcom_cpufreq.data[index];
/* HW should be in enabled state to proceed */
- if (!(readl_relaxed(base + data->soc_data->reg_enable) & 0x1)) {
+ if (!(readl_relaxed(data->base + qcom_cpufreq.soc_data->reg_enable) & 0x1)) {
dev_err(dev, "Domain-%d cpufreq hardware not enabled\n", index);
- ret = -ENODEV;
- goto error;
+ return -ENODEV;
}
- if (readl_relaxed(base + data->soc_data->reg_dcvs_ctrl) & 0x1)
+ if (readl_relaxed(data->base + qcom_cpufreq.soc_data->reg_dcvs_ctrl) & 0x1)
data->per_core_dcvs = true;
qcom_get_related_cpus(index, policy->cpus);
- if (cpumask_empty(policy->cpus)) {
- dev_err(dev, "Domain-%d failed to get related CPUs\n", index);
- ret = -ENOENT;
- goto error;
- }
policy->driver_data = data;
policy->dvfs_possible_from_any_cpu = true;
@@ -578,14 +568,13 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy);
if (ret) {
dev_err(dev, "Domain-%d failed to read LUT\n", index);
- goto error;
+ return ret;
}
ret = dev_pm_opp_get_opp_count(cpu_dev);
if (ret <= 0) {
dev_err(cpu_dev, "Failed to add OPPs\n");
- ret = -ENODEV;
- goto error;
+ return -ENODEV;
}
if (policy_has_boost_freq(policy)) {
@@ -594,18 +583,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
}
- ret = qcom_cpufreq_hw_lmh_init(policy, index);
- if (ret)
- goto error;
-
- return 0;
-error:
- kfree(data);
-unmap_base:
- iounmap(base);
-release_region:
- release_mem_region(res->start, resource_size(res));
- return ret;
+ return qcom_cpufreq_hw_lmh_init(policy, index);
}
static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
@@ -658,20 +636,34 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = {
.ready = qcom_cpufreq_ready,
};
+static unsigned long qcom_cpufreq_hw_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct qcom_cpufreq_data *data = container_of(hw, struct qcom_cpufreq_data, cpu_clk);
+
+ return qcom_lmh_get_throttle_freq(data);
+}
+
+static const struct clk_ops qcom_cpufreq_hw_clk_ops = {
+ .recalc_rate = qcom_cpufreq_hw_recalc_rate,
+};
+
static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
{
+ struct clk_hw_onecell_data *clk_data;
+ struct device *dev = &pdev->dev;
+ struct device_node *soc_node;
struct device *cpu_dev;
struct clk *clk;
- int ret;
+ int ret, i, num_domains, reg_sz;
- clk = clk_get(&pdev->dev, "xo");
+ clk = clk_get(dev, "xo");
if (IS_ERR(clk))
return PTR_ERR(clk);
xo_rate = clk_get_rate(clk);
clk_put(clk);
- clk = clk_get(&pdev->dev, "alternate");
+ clk = clk_get(dev, "alternate");
if (IS_ERR(clk))
return PTR_ERR(clk);
@@ -689,18 +681,98 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
if (ret)
return ret;
+ /* Allocate qcom_cpufreq_data based on the available frequency domains in DT */
+ soc_node = of_get_parent(dev->of_node);
+ if (!soc_node)
+ return -EINVAL;
+
+ ret = of_property_read_u32(soc_node, "#address-cells", &reg_sz);
+ if (ret)
+ goto of_exit;
+
+ ret = of_property_read_u32(soc_node, "#size-cells", &i);
+ if (ret)
+ goto of_exit;
+
+ reg_sz += i;
+
+ num_domains = of_property_count_elems_of_size(dev->of_node, "reg", sizeof(u32) * reg_sz);
+ if (num_domains <= 0)
+ return num_domains;
+
+ qcom_cpufreq.data = devm_kzalloc(dev, sizeof(struct qcom_cpufreq_data) * num_domains,
+ GFP_KERNEL);
+ if (!qcom_cpufreq.data)
+ return -ENOMEM;
+
+ qcom_cpufreq.soc_data = of_device_get_match_data(dev);
+ if (!qcom_cpufreq.soc_data)
+ return -ENODEV;
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, num_domains), GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->num = num_domains;
+
+ for (i = 0; i < num_domains; i++) {
+ struct qcom_cpufreq_data *data = &qcom_cpufreq.data[i];
+ struct clk_init_data clk_init = {};
+ struct resource *res;
+ void __iomem *base;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, i, &res);
+ if (IS_ERR(base)) {
+ dev_err(dev, "Failed to map resource %pR\n", res);
+ return PTR_ERR(base);
+ }
+
+ data->base = base;
+ data->res = res;
+
+ /* Register CPU clock for each frequency domain */
+ clk_init.name = kasprintf(GFP_KERNEL, "qcom_cpufreq%d", i);
+ if (!clk_init.name)
+ return -ENOMEM;
+
+ clk_init.flags = CLK_GET_RATE_NOCACHE;
+ clk_init.ops = &qcom_cpufreq_hw_clk_ops;
+ data->cpu_clk.init = &clk_init;
+
+ ret = devm_clk_hw_register(dev, &data->cpu_clk);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register clock %d: %d\n", i, ret);
+ kfree(clk_init.name);
+ return ret;
+ }
+
+ clk_data->hws[i] = &data->cpu_clk;
+ kfree(clk_init.name);
+ }
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
+ if (ret < 0) {
+ dev_err(dev, "Failed to add clock provider\n");
+ return ret;
+ }
+
ret = cpufreq_register_driver(&cpufreq_qcom_hw_driver);
if (ret)
- dev_err(&pdev->dev, "CPUFreq HW driver failed to register\n");
+ dev_err(dev, "CPUFreq HW driver failed to register\n");
else
- dev_dbg(&pdev->dev, "QCOM CPUFreq HW driver initialized\n");
+ dev_dbg(dev, "QCOM CPUFreq HW driver initialized\n");
+
+of_exit:
+ of_node_put(soc_node);
return ret;
}
static int qcom_cpufreq_hw_driver_remove(struct platform_device *pdev)
{
- return cpufreq_unregister_driver(&cpufreq_qcom_hw_driver);
+ cpufreq_unregister_driver(&cpufreq_qcom_hw_driver);
+
+ return 0;
}
static struct platform_driver qcom_cpufreq_hw_driver = {
diff --git a/drivers/cpufreq/s3c2410-cpufreq.c b/drivers/cpufreq/s3c2410-cpufreq.c
deleted file mode 100644
index 5dcfbf0bfb74..000000000000
--- a/drivers/cpufreq/s3c2410-cpufreq.c
+++ /dev/null
@@ -1,155 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2006-2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <[email protected]>
- *
- * S3C2410 CPU Frequency scaling
-*/
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/cpufreq.h>
-#include <linux/device.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/soc/samsung/s3c-cpufreq-core.h>
-#include <linux/soc/samsung/s3c-pm.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#define S3C2410_CLKDIVN_PDIVN (1<<0)
-#define S3C2410_CLKDIVN_HDIVN (1<<1)
-
-/* Note, 2410A has an extra mode for 1:4:4 ratio, bit 2 of CLKDIV */
-
-static void s3c2410_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
-{
- u32 clkdiv = 0;
-
- if (cfg->divs.h_divisor == 2)
- clkdiv |= S3C2410_CLKDIVN_HDIVN;
-
- if (cfg->divs.p_divisor != cfg->divs.h_divisor)
- clkdiv |= S3C2410_CLKDIVN_PDIVN;
-
- s3c24xx_write_clkdivn(clkdiv);
-}
-
-static int s3c2410_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
-{
- unsigned long hclk, fclk, pclk;
- unsigned int hdiv, pdiv;
- unsigned long hclk_max;
-
- fclk = cfg->freq.fclk;
- hclk_max = cfg->max.hclk;
-
- cfg->freq.armclk = fclk;
-
- s3c_freq_dbg("%s: fclk is %lu, max hclk %lu\n",
- __func__, fclk, hclk_max);
-
- hdiv = (fclk > cfg->max.hclk) ? 2 : 1;
- hclk = fclk / hdiv;
-
- if (hclk > cfg->max.hclk) {
- s3c_freq_dbg("%s: hclk too big\n", __func__);
- return -EINVAL;
- }
-
- pdiv = (hclk > cfg->max.pclk) ? 2 : 1;
- pclk = hclk / pdiv;
-
- if (pclk > cfg->max.pclk) {
- s3c_freq_dbg("%s: pclk too big\n", __func__);
- return -EINVAL;
- }
-
- pdiv *= hdiv;
-
- /* record the result */
- cfg->divs.p_divisor = pdiv;
- cfg->divs.h_divisor = hdiv;
-
- return 0;
-}
-
-static struct s3c_cpufreq_info s3c2410_cpufreq_info = {
- .max = {
- .fclk = 200000000,
- .hclk = 100000000,
- .pclk = 50000000,
- },
-
- /* transition latency is about 5ms worst-case, so
- * set 10ms to be sure */
- .latency = 10000000,
-
- .locktime_m = 150,
- .locktime_u = 150,
- .locktime_bits = 12,
-
- .need_pll = 1,
-
- .name = "s3c2410",
- .calc_iotiming = s3c2410_iotiming_calc,
- .set_iotiming = s3c2410_iotiming_set,
- .get_iotiming = s3c2410_iotiming_get,
-
- .set_fvco = s3c2410_set_fvco,
- .set_refresh = s3c2410_cpufreq_setrefresh,
- .set_divs = s3c2410_cpufreq_setdivs,
- .calc_divs = s3c2410_cpufreq_calcdivs,
-
- .debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs),
-};
-
-static int s3c2410_cpufreq_add(struct device *dev,
- struct subsys_interface *sif)
-{
- return s3c_cpufreq_register(&s3c2410_cpufreq_info);
-}
-
-static struct subsys_interface s3c2410_cpufreq_interface = {
- .name = "s3c2410_cpufreq",
- .subsys = &s3c2410_subsys,
- .add_dev = s3c2410_cpufreq_add,
-};
-
-static int __init s3c2410_cpufreq_init(void)
-{
- return subsys_interface_register(&s3c2410_cpufreq_interface);
-}
-arch_initcall(s3c2410_cpufreq_init);
-
-static int s3c2410a_cpufreq_add(struct device *dev,
- struct subsys_interface *sif)
-{
- /* alter the maximum freq settings for S3C2410A. If a board knows
- * it only has a maximum of 200, then it should register its own
- * limits. */
-
- s3c2410_cpufreq_info.max.fclk = 266000000;
- s3c2410_cpufreq_info.max.hclk = 133000000;
- s3c2410_cpufreq_info.max.pclk = 66500000;
- s3c2410_cpufreq_info.name = "s3c2410a";
-
- return s3c2410_cpufreq_add(dev, sif);
-}
-
-static struct subsys_interface s3c2410a_cpufreq_interface = {
- .name = "s3c2410a_cpufreq",
- .subsys = &s3c2410a_subsys,
- .add_dev = s3c2410a_cpufreq_add,
-};
-
-static int __init s3c2410a_cpufreq_init(void)
-{
- return subsys_interface_register(&s3c2410a_cpufreq_interface);
-}
-arch_initcall(s3c2410a_cpufreq_init);
diff --git a/drivers/cpufreq/s3c2412-cpufreq.c b/drivers/cpufreq/s3c2412-cpufreq.c
deleted file mode 100644
index 5945945ead7c..000000000000
--- a/drivers/cpufreq/s3c2412-cpufreq.c
+++ /dev/null
@@ -1,240 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <[email protected]>
- *
- * S3C2412 CPU Frequency scalling
-*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/cpufreq.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/soc/samsung/s3c-cpufreq-core.h>
-#include <linux/soc/samsung/s3c-pm.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#define S3C2412_CLKDIVN_PDIVN (1<<2)
-#define S3C2412_CLKDIVN_HDIVN_MASK (3<<0)
-#define S3C2412_CLKDIVN_ARMDIVN (1<<3)
-#define S3C2412_CLKDIVN_DVSEN (1<<4)
-#define S3C2412_CLKDIVN_HALFHCLK (1<<5)
-#define S3C2412_CLKDIVN_USB48DIV (1<<6)
-#define S3C2412_CLKDIVN_UARTDIV_MASK (15<<8)
-#define S3C2412_CLKDIVN_UARTDIV_SHIFT (8)
-#define S3C2412_CLKDIVN_I2SDIV_MASK (15<<12)
-#define S3C2412_CLKDIVN_I2SDIV_SHIFT (12)
-#define S3C2412_CLKDIVN_CAMDIV_MASK (15<<16)
-#define S3C2412_CLKDIVN_CAMDIV_SHIFT (16)
-
-/* our clock resources. */
-static struct clk *xtal;
-static struct clk *fclk;
-static struct clk *hclk;
-static struct clk *armclk;
-
-/* HDIV: 1, 2, 3, 4, 6, 8 */
-
-static int s3c2412_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
-{
- unsigned int hdiv, pdiv, armdiv, dvs;
- unsigned long hclk, fclk, armclk, armdiv_clk;
- unsigned long hclk_max;
-
- fclk = cfg->freq.fclk;
- armclk = cfg->freq.armclk;
- hclk_max = cfg->max.hclk;
-
- /* We can't run hclk above armclk as at the best we have to
- * have armclk and hclk in dvs mode. */
-
- if (hclk_max > armclk)
- hclk_max = armclk;
-
- s3c_freq_dbg("%s: fclk=%lu, armclk=%lu, hclk_max=%lu\n",
- __func__, fclk, armclk, hclk_max);
- s3c_freq_dbg("%s: want f=%lu, arm=%lu, h=%lu, p=%lu\n",
- __func__, cfg->freq.fclk, cfg->freq.armclk,
- cfg->freq.hclk, cfg->freq.pclk);
-
- armdiv = fclk / armclk;
-
- if (armdiv < 1)
- armdiv = 1;
- if (armdiv > 2)
- armdiv = 2;
-
- cfg->divs.arm_divisor = armdiv;
- armdiv_clk = fclk / armdiv;
-
- hdiv = armdiv_clk / hclk_max;
- if (hdiv < 1)
- hdiv = 1;
-
- cfg->freq.hclk = hclk = armdiv_clk / hdiv;
-
- /* set dvs depending on whether we reached armclk or not. */
- cfg->divs.dvs = dvs = armclk < armdiv_clk;
-
- /* update the actual armclk we achieved. */
- cfg->freq.armclk = dvs ? hclk : armdiv_clk;
-
- s3c_freq_dbg("%s: armclk %lu, hclk %lu, armdiv %d, hdiv %d, dvs %d\n",
- __func__, armclk, hclk, armdiv, hdiv, cfg->divs.dvs);
-
- if (hdiv > 4)
- goto invalid;
-
- pdiv = (hclk > cfg->max.pclk) ? 2 : 1;
-
- if ((hclk / pdiv) > cfg->max.pclk)
- pdiv++;
-
- cfg->freq.pclk = hclk / pdiv;
-
- s3c_freq_dbg("%s: pdiv %d\n", __func__, pdiv);
-
- if (pdiv > 2)
- goto invalid;
-
- pdiv *= hdiv;
-
- /* store the result, and then return */
-
- cfg->divs.h_divisor = hdiv * armdiv;
- cfg->divs.p_divisor = pdiv * armdiv;
-
- return 0;
-
-invalid:
- return -EINVAL;
-}
-
-static void s3c2412_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
-{
- unsigned long clkdiv;
- unsigned long olddiv;
-
- olddiv = clkdiv = s3c24xx_read_clkdivn();
-
- /* clear off current clock info */
-
- clkdiv &= ~S3C2412_CLKDIVN_ARMDIVN;
- clkdiv &= ~S3C2412_CLKDIVN_HDIVN_MASK;
- clkdiv &= ~S3C2412_CLKDIVN_PDIVN;
-
- if (cfg->divs.arm_divisor == 2)
- clkdiv |= S3C2412_CLKDIVN_ARMDIVN;
-
- clkdiv |= ((cfg->divs.h_divisor / cfg->divs.arm_divisor) - 1);
-
- if (cfg->divs.p_divisor != cfg->divs.h_divisor)
- clkdiv |= S3C2412_CLKDIVN_PDIVN;
-
- s3c_freq_dbg("%s: div %08lx => %08lx\n", __func__, olddiv, clkdiv);
- s3c24xx_write_clkdivn(clkdiv);
-
- clk_set_parent(armclk, cfg->divs.dvs ? hclk : fclk);
-}
-
-/* set the default cpu frequency information, based on an 200MHz part
- * as we have no other way of detecting the speed rating in software.
- */
-
-static struct s3c_cpufreq_info s3c2412_cpufreq_info = {
- .max = {
- .fclk = 200000000,
- .hclk = 100000000,
- .pclk = 50000000,
- },
-
- .latency = 5000000, /* 5ms */
-
- .locktime_m = 150,
- .locktime_u = 150,
- .locktime_bits = 16,
-
- .name = "s3c2412",
- .set_refresh = s3c2412_cpufreq_setrefresh,
- .set_divs = s3c2412_cpufreq_setdivs,
- .calc_divs = s3c2412_cpufreq_calcdivs,
-
- .calc_iotiming = s3c2412_iotiming_calc,
- .set_iotiming = s3c2412_iotiming_set,
- .get_iotiming = s3c2412_iotiming_get,
-
- .debug_io_show = s3c_cpufreq_debugfs_call(s3c2412_iotiming_debugfs),
-};
-
-static int s3c2412_cpufreq_add(struct device *dev,
- struct subsys_interface *sif)
-{
- unsigned long fclk_rate;
-
- hclk = clk_get(NULL, "hclk");
- if (IS_ERR(hclk)) {
- pr_err("cannot find hclk clock\n");
- return -ENOENT;
- }
-
- fclk = clk_get(NULL, "fclk");
- if (IS_ERR(fclk)) {
- pr_err("cannot find fclk clock\n");
- goto err_fclk;
- }
-
- fclk_rate = clk_get_rate(fclk);
- if (fclk_rate > 200000000) {
- pr_info("fclk %ld MHz, assuming 266MHz capable part\n",
- fclk_rate / 1000000);
- s3c2412_cpufreq_info.max.fclk = 266000000;
- s3c2412_cpufreq_info.max.hclk = 133000000;
- s3c2412_cpufreq_info.max.pclk = 66000000;
- }
-
- armclk = clk_get(NULL, "armclk");
- if (IS_ERR(armclk)) {
- pr_err("cannot find arm clock\n");
- goto err_armclk;
- }
-
- xtal = clk_get(NULL, "xtal");
- if (IS_ERR(xtal)) {
- pr_err("cannot find xtal clock\n");
- goto err_xtal;
- }
-
- return s3c_cpufreq_register(&s3c2412_cpufreq_info);
-
-err_xtal:
- clk_put(armclk);
-err_armclk:
- clk_put(fclk);
-err_fclk:
- clk_put(hclk);
-
- return -ENOENT;
-}
-
-static struct subsys_interface s3c2412_cpufreq_interface = {
- .name = "s3c2412_cpufreq",
- .subsys = &s3c2412_subsys,
- .add_dev = s3c2412_cpufreq_add,
-};
-
-static int s3c2412_cpufreq_init(void)
-{
- return subsys_interface_register(&s3c2412_cpufreq_interface);
-}
-arch_initcall(s3c2412_cpufreq_init);
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
deleted file mode 100644
index 5c221bc90210..000000000000
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ /dev/null
@@ -1,492 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * S3C2416/2450 CPUfreq Support
- *
- * Copyright 2011 Heiko Stuebner <[email protected]>
- *
- * based on s3c64xx_cpufreq.c
- *
- * Copyright 2009 Wolfson Microelectronics plc
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/cpufreq.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/regulator/consumer.h>
-#include <linux/reboot.h>
-#include <linux/module.h>
-
-static DEFINE_MUTEX(cpufreq_lock);
-
-struct s3c2416_data {
- struct clk *armdiv;
- struct clk *armclk;
- struct clk *hclk;
-
- unsigned long regulator_latency;
-#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
- struct regulator *vddarm;
-#endif
-
- struct cpufreq_frequency_table *freq_table;
-
- bool is_dvs;
- bool disable_dvs;
-};
-
-static struct s3c2416_data s3c2416_cpufreq;
-
-struct s3c2416_dvfs {
- unsigned int vddarm_min;
- unsigned int vddarm_max;
-};
-
-/* pseudo-frequency for dvs mode */
-#define FREQ_DVS 132333
-
-/* frequency to sleep and reboot in
- * it's essential to leave dvs, as some boards do not reconfigure the
- * regulator on reboot
- */
-#define FREQ_SLEEP 133333
-
-/* Sources for the ARMCLK */
-#define SOURCE_HCLK 0
-#define SOURCE_ARMDIV 1
-
-#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
-/* S3C2416 only supports changing the voltage in the dvs-mode.
- * Voltages down to 1.0V seem to work, so we take what the regulator
- * can get us.
- */
-static struct s3c2416_dvfs s3c2416_dvfs_table[] = {
- [SOURCE_HCLK] = { 950000, 1250000 },
- [SOURCE_ARMDIV] = { 1250000, 1350000 },
-};
-#endif
-
-static struct cpufreq_frequency_table s3c2416_freq_table[] = {
- { 0, SOURCE_HCLK, FREQ_DVS },
- { 0, SOURCE_ARMDIV, 133333 },
- { 0, SOURCE_ARMDIV, 266666 },
- { 0, SOURCE_ARMDIV, 400000 },
- { 0, 0, CPUFREQ_TABLE_END },
-};
-
-static struct cpufreq_frequency_table s3c2450_freq_table[] = {
- { 0, SOURCE_HCLK, FREQ_DVS },
- { 0, SOURCE_ARMDIV, 133500 },
- { 0, SOURCE_ARMDIV, 267000 },
- { 0, SOURCE_ARMDIV, 534000 },
- { 0, 0, CPUFREQ_TABLE_END },
-};
-
-static unsigned int s3c2416_cpufreq_get_speed(unsigned int cpu)
-{
- struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
-
- if (cpu != 0)
- return 0;
-
- /* return our pseudo-frequency when in dvs mode */
- if (s3c_freq->is_dvs)
- return FREQ_DVS;
-
- return clk_get_rate(s3c_freq->armclk) / 1000;
-}
-
-static int s3c2416_cpufreq_set_armdiv(struct s3c2416_data *s3c_freq,
- unsigned int freq)
-{
- int ret;
-
- if (clk_get_rate(s3c_freq->armdiv) / 1000 != freq) {
- ret = clk_set_rate(s3c_freq->armdiv, freq * 1000);
- if (ret < 0) {
- pr_err("cpufreq: Failed to set armdiv rate %dkHz: %d\n",
- freq, ret);
- return ret;
- }
- }
-
- return 0;
-}
-
-static int s3c2416_cpufreq_enter_dvs(struct s3c2416_data *s3c_freq, int idx)
-{
-#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
- struct s3c2416_dvfs *dvfs;
-#endif
- int ret;
-
- if (s3c_freq->is_dvs) {
- pr_debug("cpufreq: already in dvs mode, nothing to do\n");
- return 0;
- }
-
- pr_debug("cpufreq: switching armclk to hclk (%lukHz)\n",
- clk_get_rate(s3c_freq->hclk) / 1000);
- ret = clk_set_parent(s3c_freq->armclk, s3c_freq->hclk);
- if (ret < 0) {
- pr_err("cpufreq: Failed to switch armclk to hclk: %d\n", ret);
- return ret;
- }
-
-#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
- /* changing the core voltage is only allowed when in dvs mode */
- if (s3c_freq->vddarm) {
- dvfs = &s3c2416_dvfs_table[idx];
-
- pr_debug("cpufreq: setting regulator to %d-%d\n",
- dvfs->vddarm_min, dvfs->vddarm_max);
- ret = regulator_set_voltage(s3c_freq->vddarm,
- dvfs->vddarm_min,
- dvfs->vddarm_max);
-
- /* when lowering the voltage failed, there is nothing to do */
- if (ret != 0)
- pr_err("cpufreq: Failed to set VDDARM: %d\n", ret);
- }
-#endif
-
- s3c_freq->is_dvs = 1;
-
- return 0;
-}
-
-static int s3c2416_cpufreq_leave_dvs(struct s3c2416_data *s3c_freq, int idx)
-{
-#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
- struct s3c2416_dvfs *dvfs;
-#endif
- int ret;
-
- if (!s3c_freq->is_dvs) {
- pr_debug("cpufreq: not in dvs mode, so can't leave\n");
- return 0;
- }
-
-#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
- if (s3c_freq->vddarm) {
- dvfs = &s3c2416_dvfs_table[idx];
-
- pr_debug("cpufreq: setting regulator to %d-%d\n",
- dvfs->vddarm_min, dvfs->vddarm_max);
- ret = regulator_set_voltage(s3c_freq->vddarm,
- dvfs->vddarm_min,
- dvfs->vddarm_max);
- if (ret != 0) {
- pr_err("cpufreq: Failed to set VDDARM: %d\n", ret);
- return ret;
- }
- }
-#endif
-
- /* force armdiv to hclk frequency for transition from dvs*/
- if (clk_get_rate(s3c_freq->armdiv) > clk_get_rate(s3c_freq->hclk)) {
- pr_debug("cpufreq: force armdiv to hclk frequency (%lukHz)\n",
- clk_get_rate(s3c_freq->hclk) / 1000);
- ret = s3c2416_cpufreq_set_armdiv(s3c_freq,
- clk_get_rate(s3c_freq->hclk) / 1000);
- if (ret < 0) {
- pr_err("cpufreq: Failed to set the armdiv to %lukHz: %d\n",
- clk_get_rate(s3c_freq->hclk) / 1000, ret);
- return ret;
- }
- }
-
- pr_debug("cpufreq: switching armclk parent to armdiv (%lukHz)\n",
- clk_get_rate(s3c_freq->armdiv) / 1000);
-
- ret = clk_set_parent(s3c_freq->armclk, s3c_freq->armdiv);
- if (ret < 0) {
- pr_err("cpufreq: Failed to switch armclk clock parent to armdiv: %d\n",
- ret);
- return ret;
- }
-
- s3c_freq->is_dvs = 0;
-
- return 0;
-}
-
-static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
- unsigned int index)
-{
- struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
- unsigned int new_freq;
- int idx, ret, to_dvs = 0;
-
- mutex_lock(&cpufreq_lock);
-
- idx = s3c_freq->freq_table[index].driver_data;
-
- if (idx == SOURCE_HCLK)
- to_dvs = 1;
-
- /* switching to dvs when it's not allowed */
- if (to_dvs && s3c_freq->disable_dvs) {
- pr_debug("cpufreq: entering dvs mode not allowed\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* When leavin dvs mode, always switch the armdiv to the hclk rate
- * The S3C2416 has stability issues when switching directly to
- * higher frequencies.
- */
- new_freq = (s3c_freq->is_dvs && !to_dvs)
- ? clk_get_rate(s3c_freq->hclk) / 1000
- : s3c_freq->freq_table[index].frequency;
-
- if (to_dvs) {
- pr_debug("cpufreq: enter dvs\n");
- ret = s3c2416_cpufreq_enter_dvs(s3c_freq, idx);
- } else if (s3c_freq->is_dvs) {
- pr_debug("cpufreq: leave dvs\n");
- ret = s3c2416_cpufreq_leave_dvs(s3c_freq, idx);
- } else {
- pr_debug("cpufreq: change armdiv to %dkHz\n", new_freq);
- ret = s3c2416_cpufreq_set_armdiv(s3c_freq, new_freq);
- }
-
-out:
- mutex_unlock(&cpufreq_lock);
-
- return ret;
-}
-
-#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
-static void s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
-{
- int count, v, i, found;
- struct cpufreq_frequency_table *pos;
- struct s3c2416_dvfs *dvfs;
-
- count = regulator_count_voltages(s3c_freq->vddarm);
- if (count < 0) {
- pr_err("cpufreq: Unable to check supported voltages\n");
- return;
- }
-
- if (!count)
- goto out;
-
- cpufreq_for_each_valid_entry(pos, s3c_freq->freq_table) {
- dvfs = &s3c2416_dvfs_table[pos->driver_data];
- found = 0;
-
- /* Check only the min-voltage, more is always ok on S3C2416 */
- for (i = 0; i < count; i++) {
- v = regulator_list_voltage(s3c_freq->vddarm, i);
- if (v >= dvfs->vddarm_min)
- found = 1;
- }
-
- if (!found) {
- pr_debug("cpufreq: %dkHz unsupported by regulator\n",
- pos->frequency);
- pos->frequency = CPUFREQ_ENTRY_INVALID;
- }
- }
-
-out:
- /* Guessed */
- s3c_freq->regulator_latency = 1 * 1000 * 1000;
-}
-#endif
-
-static int s3c2416_cpufreq_reboot_notifier_evt(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
- int ret;
- struct cpufreq_policy *policy;
-
- mutex_lock(&cpufreq_lock);
-
- /* disable further changes */
- s3c_freq->disable_dvs = 1;
-
- mutex_unlock(&cpufreq_lock);
-
- /* some boards don't reconfigure the regulator on reboot, which
- * could lead to undervolting the cpu when the clock is reset.
- * Therefore we always leave the DVS mode on reboot.
- */
- if (s3c_freq->is_dvs) {
- pr_debug("cpufreq: leave dvs on reboot\n");
-
- policy = cpufreq_cpu_get(0);
- if (!policy) {
- pr_debug("cpufreq: get no policy for cpu0\n");
- return NOTIFY_BAD;
- }
-
- ret = cpufreq_driver_target(policy, FREQ_SLEEP, 0);
- cpufreq_cpu_put(policy);
-
- if (ret < 0)
- return NOTIFY_BAD;
- }
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block s3c2416_cpufreq_reboot_notifier = {
- .notifier_call = s3c2416_cpufreq_reboot_notifier_evt,
-};
-
-static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
-{
- struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
- struct cpufreq_frequency_table *pos;
- struct clk *msysclk;
- unsigned long rate;
- int ret;
-
- if (policy->cpu != 0)
- return -EINVAL;
-
- msysclk = clk_get(NULL, "msysclk");
- if (IS_ERR(msysclk)) {
- ret = PTR_ERR(msysclk);
- pr_err("cpufreq: Unable to obtain msysclk: %d\n", ret);
- return ret;
- }
-
- /*
- * S3C2416 and S3C2450 share the same processor-ID and also provide no
- * other means to distinguish them other than through the rate of
- * msysclk. On S3C2416 msysclk runs at 800MHz and on S3C2450 at 533MHz.
- */
- rate = clk_get_rate(msysclk);
- if (rate == 800 * 1000 * 1000) {
- pr_info("cpufreq: msysclk running at %lukHz, using S3C2416 frequency table\n",
- rate / 1000);
- s3c_freq->freq_table = s3c2416_freq_table;
- policy->cpuinfo.max_freq = 400000;
- } else if (rate / 1000 == 534000) {
- pr_info("cpufreq: msysclk running at %lukHz, using S3C2450 frequency table\n",
- rate / 1000);
- s3c_freq->freq_table = s3c2450_freq_table;
- policy->cpuinfo.max_freq = 534000;
- }
-
- /* not needed anymore */
- clk_put(msysclk);
-
- if (s3c_freq->freq_table == NULL) {
- pr_err("cpufreq: No frequency information for this CPU, msysclk at %lukHz\n",
- rate / 1000);
- return -ENODEV;
- }
-
- s3c_freq->is_dvs = 0;
-
- s3c_freq->armdiv = clk_get(NULL, "armdiv");
- if (IS_ERR(s3c_freq->armdiv)) {
- ret = PTR_ERR(s3c_freq->armdiv);
- pr_err("cpufreq: Unable to obtain ARMDIV: %d\n", ret);
- return ret;
- }
-
- s3c_freq->hclk = clk_get(NULL, "hclk");
- if (IS_ERR(s3c_freq->hclk)) {
- ret = PTR_ERR(s3c_freq->hclk);
- pr_err("cpufreq: Unable to obtain HCLK: %d\n", ret);
- goto err_hclk;
- }
-
- /* chech hclk rate, we only support the common 133MHz for now
- * hclk could also run at 66MHz, but this not often used
- */
- rate = clk_get_rate(s3c_freq->hclk);
- if (rate < 133 * 1000 * 1000) {
- pr_err("cpufreq: HCLK not at 133MHz\n");
- ret = -EINVAL;
- goto err_armclk;
- }
-
- s3c_freq->armclk = clk_get(NULL, "armclk");
- if (IS_ERR(s3c_freq->armclk)) {
- ret = PTR_ERR(s3c_freq->armclk);
- pr_err("cpufreq: Unable to obtain ARMCLK: %d\n", ret);
- goto err_armclk;
- }
-
-#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
- s3c_freq->vddarm = regulator_get(NULL, "vddarm");
- if (IS_ERR(s3c_freq->vddarm)) {
- ret = PTR_ERR(s3c_freq->vddarm);
- pr_err("cpufreq: Failed to obtain VDDARM: %d\n", ret);
- goto err_vddarm;
- }
-
- s3c2416_cpufreq_cfg_regulator(s3c_freq);
-#else
- s3c_freq->regulator_latency = 0;
-#endif
-
- cpufreq_for_each_entry(pos, s3c_freq->freq_table) {
- /* special handling for dvs mode */
- if (pos->driver_data == 0) {
- if (!s3c_freq->hclk) {
- pr_debug("cpufreq: %dkHz unsupported as it would need unavailable dvs mode\n",
- pos->frequency);
- pos->frequency = CPUFREQ_ENTRY_INVALID;
- } else {
- continue;
- }
- }
-
- /* Check for frequencies we can generate */
- rate = clk_round_rate(s3c_freq->armdiv,
- pos->frequency * 1000);
- rate /= 1000;
- if (rate != pos->frequency) {
- pr_debug("cpufreq: %dkHz unsupported by clock (clk_round_rate return %lu)\n",
- pos->frequency, rate);
- pos->frequency = CPUFREQ_ENTRY_INVALID;
- }
- }
-
- /* Datasheet says PLL stabalisation time must be at least 300us,
- * so but add some fudge. (reference in LOCKCON0 register description)
- */
- cpufreq_generic_init(policy, s3c_freq->freq_table,
- (500 * 1000) + s3c_freq->regulator_latency);
- register_reboot_notifier(&s3c2416_cpufreq_reboot_notifier);
-
- return 0;
-
-#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
-err_vddarm:
- clk_put(s3c_freq->armclk);
-#endif
-err_armclk:
- clk_put(s3c_freq->hclk);
-err_hclk:
- clk_put(s3c_freq->armdiv);
-
- return ret;
-}
-
-static struct cpufreq_driver s3c2416_cpufreq_driver = {
- .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = s3c2416_cpufreq_set_target,
- .get = s3c2416_cpufreq_get_speed,
- .init = s3c2416_cpufreq_driver_init,
- .name = "s3c2416",
- .attr = cpufreq_generic_attr,
-};
-
-static int __init s3c2416_cpufreq_init(void)
-{
- return cpufreq_register_driver(&s3c2416_cpufreq_driver);
-}
-module_init(s3c2416_cpufreq_init);
diff --git a/drivers/cpufreq/s3c2440-cpufreq.c b/drivers/cpufreq/s3c2440-cpufreq.c
deleted file mode 100644
index 2011fb9c03a4..000000000000
--- a/drivers/cpufreq/s3c2440-cpufreq.c
+++ /dev/null
@@ -1,321 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2006-2009 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <[email protected]>
- * Vincent Sanders <[email protected]>
- *
- * S3C2440/S3C2442 CPU Frequency scaling
-*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/cpufreq.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/soc/samsung/s3c-cpufreq-core.h>
-#include <linux/soc/samsung/s3c-pm.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#define S3C2440_CLKDIVN_PDIVN (1<<0)
-#define S3C2440_CLKDIVN_HDIVN_MASK (3<<1)
-#define S3C2440_CLKDIVN_HDIVN_1 (0<<1)
-#define S3C2440_CLKDIVN_HDIVN_2 (1<<1)
-#define S3C2440_CLKDIVN_HDIVN_4_8 (2<<1)
-#define S3C2440_CLKDIVN_HDIVN_3_6 (3<<1)
-#define S3C2440_CLKDIVN_UCLK (1<<3)
-
-#define S3C2440_CAMDIVN_CAMCLK_MASK (0xf<<0)
-#define S3C2440_CAMDIVN_CAMCLK_SEL (1<<4)
-#define S3C2440_CAMDIVN_HCLK3_HALF (1<<8)
-#define S3C2440_CAMDIVN_HCLK4_HALF (1<<9)
-#define S3C2440_CAMDIVN_DVSEN (1<<12)
-
-#define S3C2442_CAMDIVN_CAMCLK_DIV3 (1<<5)
-
-static struct clk *xtal;
-static struct clk *fclk;
-static struct clk *hclk;
-static struct clk *armclk;
-
-/* HDIV: 1, 2, 3, 4, 6, 8 */
-
-static inline int within_khz(unsigned long a, unsigned long b)
-{
- long diff = a - b;
-
- return (diff >= -1000 && diff <= 1000);
-}
-
-/**
- * s3c2440_cpufreq_calcdivs - calculate divider settings
- * @cfg: The cpu frequency settings.
- *
- * Calcualte the divider values for the given frequency settings
- * specified in @cfg. The values are stored in @cfg for later use
- * by the relevant set routine if the request settings can be reached.
- */
-static int s3c2440_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
-{
- unsigned int hdiv, pdiv;
- unsigned long hclk, fclk, armclk;
- unsigned long hclk_max;
-
- fclk = cfg->freq.fclk;
- armclk = cfg->freq.armclk;
- hclk_max = cfg->max.hclk;
-
- s3c_freq_dbg("%s: fclk is %lu, armclk %lu, max hclk %lu\n",
- __func__, fclk, armclk, hclk_max);
-
- if (armclk > fclk) {
- pr_warn("%s: armclk > fclk\n", __func__);
- armclk = fclk;
- }
-
- /* if we are in DVS, we need HCLK to be <= ARMCLK */
- if (armclk < fclk && armclk < hclk_max)
- hclk_max = armclk;
-
- for (hdiv = 1; hdiv < 9; hdiv++) {
- if (hdiv == 5 || hdiv == 7)
- hdiv++;
-
- hclk = (fclk / hdiv);
- if (hclk <= hclk_max || within_khz(hclk, hclk_max))
- break;
- }
-
- s3c_freq_dbg("%s: hclk %lu, div %d\n", __func__, hclk, hdiv);
-
- if (hdiv > 8)
- goto invalid;
-
- pdiv = (hclk > cfg->max.pclk) ? 2 : 1;
-
- if ((hclk / pdiv) > cfg->max.pclk)
- pdiv++;
-
- s3c_freq_dbg("%s: pdiv %d\n", __func__, pdiv);
-
- if (pdiv > 2)
- goto invalid;
-
- pdiv *= hdiv;
-
- /* calculate a valid armclk */
-
- if (armclk < hclk)
- armclk = hclk;
-
- /* if we're running armclk lower than fclk, this really means
- * that the system should go into dvs mode, which means that
- * armclk is connected to hclk. */
- if (armclk < fclk) {
- cfg->divs.dvs = 1;
- armclk = hclk;
- } else
- cfg->divs.dvs = 0;
-
- cfg->freq.armclk = armclk;
-
- /* store the result, and then return */
-
- cfg->divs.h_divisor = hdiv;
- cfg->divs.p_divisor = pdiv;
-
- return 0;
-
- invalid:
- return -EINVAL;
-}
-
-#define CAMDIVN_HCLK_HALF (S3C2440_CAMDIVN_HCLK3_HALF | \
- S3C2440_CAMDIVN_HCLK4_HALF)
-
-/**
- * s3c2440_cpufreq_setdivs - set the cpu frequency divider settings
- * @cfg: The cpu frequency settings.
- *
- * Set the divisors from the settings in @cfg, which where generated
- * during the calculation phase by s3c2440_cpufreq_calcdivs().
- */
-static void s3c2440_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
-{
- unsigned long clkdiv, camdiv;
-
- s3c_freq_dbg("%s: divisors: h=%d, p=%d\n", __func__,
- cfg->divs.h_divisor, cfg->divs.p_divisor);
-
- clkdiv = s3c24xx_read_clkdivn();
- camdiv = s3c2440_read_camdivn();
-
- clkdiv &= ~(S3C2440_CLKDIVN_HDIVN_MASK | S3C2440_CLKDIVN_PDIVN);
- camdiv &= ~CAMDIVN_HCLK_HALF;
-
- switch (cfg->divs.h_divisor) {
- case 1:
- clkdiv |= S3C2440_CLKDIVN_HDIVN_1;
- break;
-
- case 2:
- clkdiv |= S3C2440_CLKDIVN_HDIVN_2;
- break;
-
- case 6:
- camdiv |= S3C2440_CAMDIVN_HCLK3_HALF;
- fallthrough;
- case 3:
- clkdiv |= S3C2440_CLKDIVN_HDIVN_3_6;
- break;
-
- case 8:
- camdiv |= S3C2440_CAMDIVN_HCLK4_HALF;
- fallthrough;
- case 4:
- clkdiv |= S3C2440_CLKDIVN_HDIVN_4_8;
- break;
-
- default:
- BUG(); /* we don't expect to get here. */
- }
-
- if (cfg->divs.p_divisor != cfg->divs.h_divisor)
- clkdiv |= S3C2440_CLKDIVN_PDIVN;
-
- /* todo - set pclk. */
-
- /* Write the divisors first with hclk intentionally halved so that
- * when we write clkdiv we will under-frequency instead of over. We
- * then make a short delay and remove the hclk halving if necessary.
- */
-
- s3c2440_write_camdivn(camdiv | CAMDIVN_HCLK_HALF);
- s3c24xx_write_clkdivn(clkdiv);
-
- ndelay(20);
- s3c2440_write_camdivn(camdiv);
-
- clk_set_parent(armclk, cfg->divs.dvs ? hclk : fclk);
-}
-
-static int run_freq_for(unsigned long max_hclk, unsigned long fclk,
- int *divs,
- struct cpufreq_frequency_table *table,
- size_t table_size)
-{
- unsigned long freq;
- int index = 0;
- int div;
-
- for (div = *divs; div > 0; div = *divs++) {
- freq = fclk / div;
-
- if (freq > max_hclk && div != 1)
- continue;
-
- freq /= 1000; /* table is in kHz */
- index = s3c_cpufreq_addfreq(table, index, table_size, freq);
- if (index < 0)
- break;
- }
-
- return index;
-}
-
-static int hclk_divs[] = { 1, 2, 3, 4, 6, 8, -1 };
-
-static int s3c2440_cpufreq_calctable(struct s3c_cpufreq_config *cfg,
- struct cpufreq_frequency_table *table,
- size_t table_size)
-{
- int ret;
-
- WARN_ON(cfg->info == NULL);
- WARN_ON(cfg->board == NULL);
-
- ret = run_freq_for(cfg->info->max.hclk,
- cfg->info->max.fclk,
- hclk_divs,
- table, table_size);
-
- s3c_freq_dbg("%s: returning %d\n", __func__, ret);
-
- return ret;
-}
-
-static struct s3c_cpufreq_info s3c2440_cpufreq_info = {
- .max = {
- .fclk = 400000000,
- .hclk = 133333333,
- .pclk = 66666666,
- },
-
- .locktime_m = 300,
- .locktime_u = 300,
- .locktime_bits = 16,
-
- .name = "s3c244x",
- .calc_iotiming = s3c2410_iotiming_calc,
- .set_iotiming = s3c2410_iotiming_set,
- .get_iotiming = s3c2410_iotiming_get,
- .set_fvco = s3c2410_set_fvco,
-
- .set_refresh = s3c2410_cpufreq_setrefresh,
- .set_divs = s3c2440_cpufreq_setdivs,
- .calc_divs = s3c2440_cpufreq_calcdivs,
- .calc_freqtable = s3c2440_cpufreq_calctable,
-
- .debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs),
-};
-
-static int s3c2440_cpufreq_add(struct device *dev,
- struct subsys_interface *sif)
-{
- xtal = s3c_cpufreq_clk_get(NULL, "xtal");
- hclk = s3c_cpufreq_clk_get(NULL, "hclk");
- fclk = s3c_cpufreq_clk_get(NULL, "fclk");
- armclk = s3c_cpufreq_clk_get(NULL, "armclk");
-
- if (IS_ERR(xtal) || IS_ERR(hclk) || IS_ERR(fclk) || IS_ERR(armclk)) {
- pr_err("%s: failed to get clocks\n", __func__);
- return -ENOENT;
- }
-
- return s3c_cpufreq_register(&s3c2440_cpufreq_info);
-}
-
-static struct subsys_interface s3c2440_cpufreq_interface = {
- .name = "s3c2440_cpufreq",
- .subsys = &s3c2440_subsys,
- .add_dev = s3c2440_cpufreq_add,
-};
-
-static int s3c2440_cpufreq_init(void)
-{
- return subsys_interface_register(&s3c2440_cpufreq_interface);
-}
-
-/* arch_initcall adds the clocks we need, so use subsys_initcall. */
-subsys_initcall(s3c2440_cpufreq_init);
-
-static struct subsys_interface s3c2442_cpufreq_interface = {
- .name = "s3c2442_cpufreq",
- .subsys = &s3c2442_subsys,
- .add_dev = s3c2440_cpufreq_add,
-};
-
-static int s3c2442_cpufreq_init(void)
-{
- return subsys_interface_register(&s3c2442_cpufreq_interface);
-}
-subsys_initcall(s3c2442_cpufreq_init);
diff --git a/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c b/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
deleted file mode 100644
index 93971dfe7c75..000000000000
--- a/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
+++ /dev/null
@@ -1,163 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2009 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <[email protected]>
- *
- * S3C24XX CPU Frequency scaling - debugfs status support
-*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/cpufreq.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/err.h>
-
-#include <linux/soc/samsung/s3c-cpufreq-core.h>
-
-static struct dentry *dbgfs_root;
-static struct dentry *dbgfs_file_io;
-static struct dentry *dbgfs_file_info;
-static struct dentry *dbgfs_file_board;
-
-#define print_ns(x) ((x) / 10), ((x) % 10)
-
-static void show_max(struct seq_file *seq, struct s3c_freq *f)
-{
- seq_printf(seq, "MAX: F=%lu, H=%lu, P=%lu, A=%lu\n",
- f->fclk, f->hclk, f->pclk, f->armclk);
-}
-
-static int board_show(struct seq_file *seq, void *p)
-{
- struct s3c_cpufreq_config *cfg;
- struct s3c_cpufreq_board *brd;
-
- cfg = s3c_cpufreq_getconfig();
- if (!cfg) {
- seq_printf(seq, "no configuration registered\n");
- return 0;
- }
-
- brd = cfg->board;
- if (!brd) {
- seq_printf(seq, "no board definition set?\n");
- return 0;
- }
-
- seq_printf(seq, "SDRAM refresh %u ns\n", brd->refresh);
- seq_printf(seq, "auto_io=%u\n", brd->auto_io);
- seq_printf(seq, "need_io=%u\n", brd->need_io);
-
- show_max(seq, &brd->max);
-
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(board);
-
-static int info_show(struct seq_file *seq, void *p)
-{
- struct s3c_cpufreq_config *cfg;
-
- cfg = s3c_cpufreq_getconfig();
- if (!cfg) {
- seq_printf(seq, "no configuration registered\n");
- return 0;
- }
-
- seq_printf(seq, " FCLK %ld Hz\n", cfg->freq.fclk);
- seq_printf(seq, " HCLK %ld Hz (%lu.%lu ns)\n",
- cfg->freq.hclk, print_ns(cfg->freq.hclk_tns));
- seq_printf(seq, " PCLK %ld Hz\n", cfg->freq.hclk);
- seq_printf(seq, "ARMCLK %ld Hz\n", cfg->freq.armclk);
- seq_printf(seq, "\n");
-
- show_max(seq, &cfg->max);
-
- seq_printf(seq, "Divisors: P=%d, H=%d, A=%d, dvs=%s\n",
- cfg->divs.h_divisor, cfg->divs.p_divisor,
- cfg->divs.arm_divisor, cfg->divs.dvs ? "on" : "off");
- seq_printf(seq, "\n");
-
- seq_printf(seq, "lock_pll=%u\n", cfg->lock_pll);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(info);
-
-static int io_show(struct seq_file *seq, void *p)
-{
- void (*show_bank)(struct seq_file *, struct s3c_cpufreq_config *, union s3c_iobank *);
- struct s3c_cpufreq_config *cfg;
- struct s3c_iotimings *iot;
- union s3c_iobank *iob;
- int bank;
-
- cfg = s3c_cpufreq_getconfig();
- if (!cfg) {
- seq_printf(seq, "no configuration registered\n");
- return 0;
- }
-
- show_bank = cfg->info->debug_io_show;
- if (!show_bank) {
- seq_printf(seq, "no code to show bank timing\n");
- return 0;
- }
-
- iot = s3c_cpufreq_getiotimings();
- if (!iot) {
- seq_printf(seq, "no io timings registered\n");
- return 0;
- }
-
- seq_printf(seq, "hclk period is %lu.%lu ns\n", print_ns(cfg->freq.hclk_tns));
-
- for (bank = 0; bank < MAX_BANKS; bank++) {
- iob = &iot->bank[bank];
-
- seq_printf(seq, "bank %d: ", bank);
-
- if (!iob->io_2410) {
- seq_printf(seq, "nothing set\n");
- continue;
- }
-
- show_bank(seq, cfg, iob);
- }
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(io);
-
-static int __init s3c_freq_debugfs_init(void)
-{
- dbgfs_root = debugfs_create_dir("s3c-cpufreq", NULL);
- if (IS_ERR(dbgfs_root)) {
- pr_err("%s: error creating debugfs root\n", __func__);
- return PTR_ERR(dbgfs_root);
- }
-
- dbgfs_file_io = debugfs_create_file("io-timing", S_IRUGO, dbgfs_root,
- NULL, &io_fops);
-
- dbgfs_file_info = debugfs_create_file("info", S_IRUGO, dbgfs_root,
- NULL, &info_fops);
-
- dbgfs_file_board = debugfs_create_file("board", S_IRUGO, dbgfs_root,
- NULL, &board_fops);
-
- return 0;
-}
-
-late_initcall(s3c_freq_debugfs_init);
-
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
deleted file mode 100644
index 7380c32b238e..000000000000
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ /dev/null
@@ -1,648 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2006-2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <[email protected]>
- *
- * S3C24XX CPU Frequency scaling
-*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/cpufreq.h>
-#include <linux/cpu.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/device.h>
-#include <linux/sysfs.h>
-#include <linux/slab.h>
-#include <linux/soc/samsung/s3c-cpufreq-core.h>
-#include <linux/soc/samsung/s3c-pm.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-/* note, cpufreq support deals in kHz, no Hz */
-static struct cpufreq_driver s3c24xx_driver;
-static struct s3c_cpufreq_config cpu_cur;
-static struct s3c_iotimings s3c24xx_iotiming;
-static struct cpufreq_frequency_table *pll_reg;
-static unsigned int last_target = ~0;
-static unsigned int ftab_size;
-static struct cpufreq_frequency_table *ftab;
-
-static struct clk *_clk_mpll;
-static struct clk *_clk_xtal;
-static struct clk *clk_fclk;
-static struct clk *clk_hclk;
-static struct clk *clk_pclk;
-static struct clk *clk_arm;
-
-#ifdef CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS
-struct s3c_cpufreq_config *s3c_cpufreq_getconfig(void)
-{
- return &cpu_cur;
-}
-
-struct s3c_iotimings *s3c_cpufreq_getiotimings(void)
-{
- return &s3c24xx_iotiming;
-}
-#endif /* CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS */
-
-static void s3c_cpufreq_getcur(struct s3c_cpufreq_config *cfg)
-{
- unsigned long fclk, pclk, hclk, armclk;
-
- cfg->freq.fclk = fclk = clk_get_rate(clk_fclk);
- cfg->freq.hclk = hclk = clk_get_rate(clk_hclk);
- cfg->freq.pclk = pclk = clk_get_rate(clk_pclk);
- cfg->freq.armclk = armclk = clk_get_rate(clk_arm);
-
- cfg->pll.driver_data = s3c24xx_read_mpllcon();
- cfg->pll.frequency = fclk;
-
- cfg->freq.hclk_tns = 1000000000 / (cfg->freq.hclk / 10);
-
- cfg->divs.h_divisor = fclk / hclk;
- cfg->divs.p_divisor = fclk / pclk;
-}
-
-static inline void s3c_cpufreq_calc(struct s3c_cpufreq_config *cfg)
-{
- unsigned long pll = cfg->pll.frequency;
-
- cfg->freq.fclk = pll;
- cfg->freq.hclk = pll / cfg->divs.h_divisor;
- cfg->freq.pclk = pll / cfg->divs.p_divisor;
-
- /* convert hclk into 10ths of nanoseconds for io calcs */
- cfg->freq.hclk_tns = 1000000000 / (cfg->freq.hclk / 10);
-}
-
-static inline int closer(unsigned int target, unsigned int n, unsigned int c)
-{
- int diff_cur = abs(target - c);
- int diff_new = abs(target - n);
-
- return (diff_new < diff_cur);
-}
-
-static void s3c_cpufreq_show(const char *pfx,
- struct s3c_cpufreq_config *cfg)
-{
- s3c_freq_dbg("%s: Fvco=%u, F=%lu, A=%lu, H=%lu (%u), P=%lu (%u)\n",
- pfx, cfg->pll.frequency, cfg->freq.fclk, cfg->freq.armclk,
- cfg->freq.hclk, cfg->divs.h_divisor,
- cfg->freq.pclk, cfg->divs.p_divisor);
-}
-
-/* functions to wrapper the driver info calls to do the cpu specific work */
-
-static void s3c_cpufreq_setio(struct s3c_cpufreq_config *cfg)
-{
- if (cfg->info->set_iotiming)
- (cfg->info->set_iotiming)(cfg, &s3c24xx_iotiming);
-}
-
-static int s3c_cpufreq_calcio(struct s3c_cpufreq_config *cfg)
-{
- if (cfg->info->calc_iotiming)
- return (cfg->info->calc_iotiming)(cfg, &s3c24xx_iotiming);
-
- return 0;
-}
-
-static void s3c_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg)
-{
- (cfg->info->set_refresh)(cfg);
-}
-
-static void s3c_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
-{
- (cfg->info->set_divs)(cfg);
-}
-
-static int s3c_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
-{
- return (cfg->info->calc_divs)(cfg);
-}
-
-static void s3c_cpufreq_setfvco(struct s3c_cpufreq_config *cfg)
-{
- cfg->mpll = _clk_mpll;
- (cfg->info->set_fvco)(cfg);
-}
-
-static inline void s3c_cpufreq_updateclk(struct clk *clk,
- unsigned int freq)
-{
- clk_set_rate(clk, freq);
-}
-
-static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
- unsigned int target_freq,
- struct cpufreq_frequency_table *pll)
-{
- struct s3c_cpufreq_freqs freqs;
- struct s3c_cpufreq_config cpu_new;
- unsigned long flags;
-
- cpu_new = cpu_cur; /* copy new from current */
-
- s3c_cpufreq_show("cur", &cpu_cur);
-
- /* TODO - check for DMA currently outstanding */
-
- cpu_new.pll = pll ? *pll : cpu_cur.pll;
-
- if (pll)
- freqs.pll_changing = 1;
-
- /* update our frequencies */
-
- cpu_new.freq.armclk = target_freq;
- cpu_new.freq.fclk = cpu_new.pll.frequency;
-
- if (s3c_cpufreq_calcdivs(&cpu_new) < 0) {
- pr_err("no divisors for %d\n", target_freq);
- goto err_notpossible;
- }
-
- s3c_freq_dbg("%s: got divs\n", __func__);
-
- s3c_cpufreq_calc(&cpu_new);
-
- s3c_freq_dbg("%s: calculated frequencies for new\n", __func__);
-
- if (cpu_new.freq.hclk != cpu_cur.freq.hclk) {
- if (s3c_cpufreq_calcio(&cpu_new) < 0) {
- pr_err("%s: no IO timings\n", __func__);
- goto err_notpossible;
- }
- }
-
- s3c_cpufreq_show("new", &cpu_new);
-
- /* setup our cpufreq parameters */
-
- freqs.old = cpu_cur.freq;
- freqs.new = cpu_new.freq;
-
- freqs.freqs.old = cpu_cur.freq.armclk / 1000;
- freqs.freqs.new = cpu_new.freq.armclk / 1000;
-
- /* update f/h/p clock settings before we issue the change
- * notification, so that drivers do not need to do anything
- * special if they want to recalculate on CPUFREQ_PRECHANGE. */
-
- s3c_cpufreq_updateclk(_clk_mpll, cpu_new.pll.frequency);
- s3c_cpufreq_updateclk(clk_fclk, cpu_new.freq.fclk);
- s3c_cpufreq_updateclk(clk_hclk, cpu_new.freq.hclk);
- s3c_cpufreq_updateclk(clk_pclk, cpu_new.freq.pclk);
-
- /* start the frequency change */
- cpufreq_freq_transition_begin(policy, &freqs.freqs);
-
- /* If hclk is staying the same, then we do not need to
- * re-write the IO or the refresh timings whilst we are changing
- * speed. */
-
- local_irq_save(flags);
-
- /* is our memory clock slowing down? */
- if (cpu_new.freq.hclk < cpu_cur.freq.hclk) {
- s3c_cpufreq_setrefresh(&cpu_new);
- s3c_cpufreq_setio(&cpu_new);
- }
-
- if (cpu_new.freq.fclk == cpu_cur.freq.fclk) {
- /* not changing PLL, just set the divisors */
-
- s3c_cpufreq_setdivs(&cpu_new);
- } else {
- if (cpu_new.freq.fclk < cpu_cur.freq.fclk) {
- /* slow the cpu down, then set divisors */
-
- s3c_cpufreq_setfvco(&cpu_new);
- s3c_cpufreq_setdivs(&cpu_new);
- } else {
- /* set the divisors, then speed up */
-
- s3c_cpufreq_setdivs(&cpu_new);
- s3c_cpufreq_setfvco(&cpu_new);
- }
- }
-
- /* did our memory clock speed up */
- if (cpu_new.freq.hclk > cpu_cur.freq.hclk) {
- s3c_cpufreq_setrefresh(&cpu_new);
- s3c_cpufreq_setio(&cpu_new);
- }
-
- /* update our current settings */
- cpu_cur = cpu_new;
-
- local_irq_restore(flags);
-
- /* notify everyone we've done this */
- cpufreq_freq_transition_end(policy, &freqs.freqs, 0);
-
- s3c_freq_dbg("%s: finished\n", __func__);
- return 0;
-
- err_notpossible:
- pr_err("no compatible settings for %d\n", target_freq);
- return -EINVAL;
-}
-
-/* s3c_cpufreq_target
- *
- * called by the cpufreq core to adjust the frequency that the CPU
- * is currently running at.
- */
-
-static int s3c_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- struct cpufreq_frequency_table *pll;
- unsigned int index;
-
- /* avoid repeated calls which cause a needless amout of duplicated
- * logging output (and CPU time as the calculation process is
- * done) */
- if (target_freq == last_target)
- return 0;
-
- last_target = target_freq;
-
- s3c_freq_dbg("%s: policy %p, target %u, relation %u\n",
- __func__, policy, target_freq, relation);
-
- if (ftab) {
- index = cpufreq_frequency_table_target(policy, target_freq,
- relation);
-
- s3c_freq_dbg("%s: adjust %d to entry %d (%u)\n", __func__,
- target_freq, index, ftab[index].frequency);
- target_freq = ftab[index].frequency;
- }
-
- target_freq *= 1000; /* convert target to Hz */
-
- /* find the settings for our new frequency */
-
- if (!pll_reg || cpu_cur.lock_pll) {
- /* either we've not got any PLL values, or we've locked
- * to the current one. */
- pll = NULL;
- } else {
- struct cpufreq_policy tmp_policy;
-
- /* we keep the cpu pll table in Hz, to ensure we get an
- * accurate value for the PLL output. */
-
- tmp_policy.min = policy->min * 1000;
- tmp_policy.max = policy->max * 1000;
- tmp_policy.cpu = policy->cpu;
- tmp_policy.freq_table = pll_reg;
-
- /* cpufreq_frequency_table_target returns the index
- * of the table entry, not the value of
- * the table entry's index field. */
-
- index = cpufreq_frequency_table_target(&tmp_policy, target_freq,
- relation);
- pll = pll_reg + index;
-
- s3c_freq_dbg("%s: target %u => %u\n",
- __func__, target_freq, pll->frequency);
-
- target_freq = pll->frequency;
- }
-
- return s3c_cpufreq_settarget(policy, target_freq, pll);
-}
-
-struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
-{
- struct clk *clk;
-
- clk = clk_get(dev, name);
- if (IS_ERR(clk))
- pr_err("failed to get clock '%s'\n", name);
-
- return clk;
-}
-
-static int s3c_cpufreq_init(struct cpufreq_policy *policy)
-{
- policy->clk = clk_arm;
- policy->cpuinfo.transition_latency = cpu_cur.info->latency;
- policy->freq_table = ftab;
-
- return 0;
-}
-
-static int __init s3c_cpufreq_initclks(void)
-{
- _clk_mpll = s3c_cpufreq_clk_get(NULL, "mpll");
- _clk_xtal = s3c_cpufreq_clk_get(NULL, "xtal");
- clk_fclk = s3c_cpufreq_clk_get(NULL, "fclk");
- clk_hclk = s3c_cpufreq_clk_get(NULL, "hclk");
- clk_pclk = s3c_cpufreq_clk_get(NULL, "pclk");
- clk_arm = s3c_cpufreq_clk_get(NULL, "armclk");
-
- if (IS_ERR(clk_fclk) || IS_ERR(clk_hclk) || IS_ERR(clk_pclk) ||
- IS_ERR(_clk_mpll) || IS_ERR(clk_arm) || IS_ERR(_clk_xtal)) {
- pr_err("%s: could not get clock(s)\n", __func__);
- return -ENOENT;
- }
-
- pr_info("%s: clocks f=%lu,h=%lu,p=%lu,a=%lu\n",
- __func__,
- clk_get_rate(clk_fclk) / 1000,
- clk_get_rate(clk_hclk) / 1000,
- clk_get_rate(clk_pclk) / 1000,
- clk_get_rate(clk_arm) / 1000);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static struct cpufreq_frequency_table suspend_pll;
-static unsigned int suspend_freq;
-
-static int s3c_cpufreq_suspend(struct cpufreq_policy *policy)
-{
- suspend_pll.frequency = clk_get_rate(_clk_mpll);
- suspend_pll.driver_data = s3c24xx_read_mpllcon();
- suspend_freq = clk_get_rate(clk_arm);
-
- return 0;
-}
-
-static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
-{
- int ret;
-
- s3c_freq_dbg("%s: resuming with policy %p\n", __func__, policy);
-
- last_target = ~0; /* invalidate last_target setting */
-
- /* whilst we will be called later on, we try and re-set the
- * cpu frequencies as soon as possible so that we do not end
- * up resuming devices and then immediately having to re-set
- * a number of settings once these devices have restarted.
- *
- * as a note, it is expected devices are not used until they
- * have been un-suspended and at that time they should have
- * used the updated clock settings.
- */
-
- ret = s3c_cpufreq_settarget(NULL, suspend_freq, &suspend_pll);
- if (ret) {
- pr_err("%s: failed to reset pll/freq\n", __func__);
- return ret;
- }
-
- return 0;
-}
-#else
-#define s3c_cpufreq_resume NULL
-#define s3c_cpufreq_suspend NULL
-#endif
-
-static struct cpufreq_driver s3c24xx_driver = {
- .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
- .target = s3c_cpufreq_target,
- .get = cpufreq_generic_get,
- .init = s3c_cpufreq_init,
- .suspend = s3c_cpufreq_suspend,
- .resume = s3c_cpufreq_resume,
- .name = "s3c24xx",
-};
-
-
-int s3c_cpufreq_register(struct s3c_cpufreq_info *info)
-{
- if (!info || !info->name) {
- pr_err("%s: failed to pass valid information\n", __func__);
- return -EINVAL;
- }
-
- pr_info("S3C24XX CPU Frequency driver, %s cpu support\n",
- info->name);
-
- /* check our driver info has valid data */
-
- BUG_ON(info->set_refresh == NULL);
- BUG_ON(info->set_divs == NULL);
- BUG_ON(info->calc_divs == NULL);
-
- /* info->set_fvco is optional, depending on whether there
- * is a need to set the clock code. */
-
- cpu_cur.info = info;
-
- /* Note, driver registering should probably update locktime */
-
- return 0;
-}
-
-int __init s3c_cpufreq_setboard(struct s3c_cpufreq_board *board)
-{
- struct s3c_cpufreq_board *ours;
-
- if (!board) {
- pr_info("%s: no board data\n", __func__);
- return -EINVAL;
- }
-
- /* Copy the board information so that each board can make this
- * initdata. */
-
- ours = kzalloc(sizeof(*ours), GFP_KERNEL);
- if (!ours)
- return -ENOMEM;
-
- *ours = *board;
- cpu_cur.board = ours;
-
- return 0;
-}
-
-static int __init s3c_cpufreq_auto_io(void)
-{
- int ret;
-
- if (!cpu_cur.info->get_iotiming) {
- pr_err("%s: get_iotiming undefined\n", __func__);
- return -ENOENT;
- }
-
- pr_info("%s: working out IO settings\n", __func__);
-
- ret = (cpu_cur.info->get_iotiming)(&cpu_cur, &s3c24xx_iotiming);
- if (ret)
- pr_err("%s: failed to get timings\n", __func__);
-
- return ret;
-}
-
-/* if one or is zero, then return the other, otherwise return the min */
-#define do_min(_a, _b) ((_a) == 0 ? (_b) : (_b) == 0 ? (_a) : min(_a, _b))
-
-/**
- * s3c_cpufreq_freq_min - find the minimum settings for the given freq.
- * @dst: The destination structure
- * @a: One argument.
- * @b: The other argument.
- *
- * Create a minimum of each frequency entry in the 'struct s3c_freq',
- * unless the entry is zero when it is ignored and the non-zero argument
- * used.
- */
-static void s3c_cpufreq_freq_min(struct s3c_freq *dst,
- struct s3c_freq *a, struct s3c_freq *b)
-{
- dst->fclk = do_min(a->fclk, b->fclk);
- dst->hclk = do_min(a->hclk, b->hclk);
- dst->pclk = do_min(a->pclk, b->pclk);
- dst->armclk = do_min(a->armclk, b->armclk);
-}
-
-static inline u32 calc_locktime(u32 freq, u32 time_us)
-{
- u32 result;
-
- result = freq * time_us;
- result = DIV_ROUND_UP(result, 1000 * 1000);
-
- return result;
-}
-
-static void s3c_cpufreq_update_loctkime(void)
-{
- unsigned int bits = cpu_cur.info->locktime_bits;
- u32 rate = (u32)clk_get_rate(_clk_xtal);
- u32 val;
-
- if (bits == 0) {
- WARN_ON(1);
- return;
- }
-
- val = calc_locktime(rate, cpu_cur.info->locktime_u) << bits;
- val |= calc_locktime(rate, cpu_cur.info->locktime_m);
-
- pr_info("%s: new locktime is 0x%08x\n", __func__, val);
- s3c24xx_write_locktime(val);
-}
-
-static int s3c_cpufreq_build_freq(void)
-{
- int size, ret;
-
- kfree(ftab);
-
- size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0);
- size++;
-
- ftab = kcalloc(size, sizeof(*ftab), GFP_KERNEL);
- if (!ftab)
- return -ENOMEM;
-
- ftab_size = size;
-
- ret = cpu_cur.info->calc_freqtable(&cpu_cur, ftab, size);
- s3c_cpufreq_addfreq(ftab, ret, size, CPUFREQ_TABLE_END);
-
- return 0;
-}
-
-static int __init s3c_cpufreq_initcall(void)
-{
- int ret = 0;
-
- if (cpu_cur.info && cpu_cur.board) {
- ret = s3c_cpufreq_initclks();
- if (ret)
- goto out;
-
- /* get current settings */
- s3c_cpufreq_getcur(&cpu_cur);
- s3c_cpufreq_show("cur", &cpu_cur);
-
- if (cpu_cur.board->auto_io) {
- ret = s3c_cpufreq_auto_io();
- if (ret) {
- pr_err("%s: failed to get io timing\n",
- __func__);
- goto out;
- }
- }
-
- if (cpu_cur.board->need_io && !cpu_cur.info->set_iotiming) {
- pr_err("%s: no IO support registered\n", __func__);
- ret = -EINVAL;
- goto out;
- }
-
- if (!cpu_cur.info->need_pll)
- cpu_cur.lock_pll = 1;
-
- s3c_cpufreq_update_loctkime();
-
- s3c_cpufreq_freq_min(&cpu_cur.max, &cpu_cur.board->max,
- &cpu_cur.info->max);
-
- if (cpu_cur.info->calc_freqtable)
- s3c_cpufreq_build_freq();
-
- ret = cpufreq_register_driver(&s3c24xx_driver);
- }
-
- out:
- return ret;
-}
-
-late_initcall(s3c_cpufreq_initcall);
-
-/**
- * s3c_plltab_register - register CPU PLL table.
- * @plls: The list of PLL entries.
- * @plls_no: The size of the PLL entries @plls.
- *
- * Register the given set of PLLs with the system.
- */
-int s3c_plltab_register(struct cpufreq_frequency_table *plls,
- unsigned int plls_no)
-{
- struct cpufreq_frequency_table *vals;
- unsigned int size;
-
- size = sizeof(*vals) * (plls_no + 1);
-
- vals = kzalloc(size, GFP_KERNEL);
- if (vals) {
- memcpy(vals, plls, size);
- pll_reg = vals;
-
- /* write a terminating entry, we don't store it in the
- * table that is stored in the kernel */
- vals += plls_no;
- vals->frequency = CPUFREQ_TABLE_END;
-
- pr_info("%d PLL entries\n", plls_no);
- } else
- pr_err("no memory for PLL tables\n");
-
- return vals ? 0 : -ENOMEM;
-}
diff --git a/drivers/cpufreq/sa1100-cpufreq.c b/drivers/cpufreq/sa1100-cpufreq.c
deleted file mode 100644
index 252b9fc26124..000000000000
--- a/drivers/cpufreq/sa1100-cpufreq.c
+++ /dev/null
@@ -1,206 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * cpu-sa1100.c: clock scaling for the SA1100
- *
- * Copyright (C) 2000 2001, The Delft University of Technology
- *
- * Authors:
- * - Johan Pouwelse ([email protected]): initial version
- * - Erik Mouw ([email protected]):
- * - major rewrite for linux-2.3.99
- * - rewritten for the more generic power management scheme in
- * linux-2.4.5-rmk1
- *
- * This software has been developed while working on the LART
- * computing board (http://www.lartmaker.nl/), which is
- * sponsored by the Mobile Multi-media Communications
- * (http://www.mobimedia.org/) and Ubiquitous Communications
- * (http://www.ubicom.tudelft.nl/) projects.
- *
- * The authors can be reached at:
- *
- * Erik Mouw
- * Information and Communication Theory Group
- * Faculty of Information Technology and Systems
- * Delft University of Technology
- * P.O. Box 5031
- * 2600 GA Delft
- * The Netherlands
- *
- * Theory of operations
- * ====================
- *
- * Clock scaling can be used to lower the power consumption of the CPU
- * core. This will give you a somewhat longer running time.
- *
- * The SA-1100 has a single register to change the core clock speed:
- *
- * PPCR 0x90020014 PLL config
- *
- * However, the DRAM timings are closely related to the core clock
- * speed, so we need to change these, too. The used registers are:
- *
- * MDCNFG 0xA0000000 DRAM config
- * MDCAS0 0xA0000004 Access waveform
- * MDCAS1 0xA0000008 Access waveform
- * MDCAS2 0xA000000C Access waveform
- *
- * Care must be taken to change the DRAM parameters the correct way,
- * because otherwise the DRAM becomes unusable and the kernel will
- * crash.
- *
- * The simple solution to avoid a kernel crash is to put the actual
- * clock change in ROM and jump to that code from the kernel. The main
- * disadvantage is that the ROM has to be modified, which is not
- * possible on all SA-1100 platforms. Another disadvantage is that
- * jumping to ROM makes clock switching unnecessary complicated.
- *
- * The idea behind this driver is that the memory configuration can be
- * changed while running from DRAM (even with interrupts turned on!)
- * as long as all re-configuration steps yield a valid DRAM
- * configuration. The advantages are clear: it will run on all SA-1100
- * platforms, and the code is very simple.
- *
- * If you really want to understand what is going on in
- * sa1100_update_dram_timings(), you'll have to read sections 8.2,
- * 9.5.7.3, and 10.2 from the "Intel StrongARM SA-1100 Microprocessor
- * Developers Manual" (available for free from Intel).
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/cpufreq.h>
-#include <linux/io.h>
-
-#include <asm/cputype.h>
-
-#include <mach/generic.h>
-#include <mach/hardware.h>
-
-struct sa1100_dram_regs {
- int speed;
- u32 mdcnfg;
- u32 mdcas0;
- u32 mdcas1;
- u32 mdcas2;
-};
-
-
-static struct cpufreq_driver sa1100_driver;
-
-static struct sa1100_dram_regs sa1100_dram_settings[] = {
- /*speed, mdcnfg, mdcas0, mdcas1, mdcas2, clock freq */
- { 59000, 0x00dc88a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 59.0 MHz */
- { 73700, 0x011490a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 73.7 MHz */
- { 88500, 0x014e90a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 88.5 MHz */
- {103200, 0x01889923, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 103.2 MHz */
- {118000, 0x01c29923, 0x9999998f, 0xfffffff9, 0xffffffff},/* 118.0 MHz */
- {132700, 0x01fb2123, 0x9999998f, 0xfffffff9, 0xffffffff},/* 132.7 MHz */
- {147500, 0x02352123, 0x3333330f, 0xfffffff3, 0xffffffff},/* 147.5 MHz */
- {162200, 0x026b29a3, 0x38e38e1f, 0xfff8e38e, 0xffffffff},/* 162.2 MHz */
- {176900, 0x02a329a3, 0x71c71c1f, 0xfff1c71c, 0xffffffff},/* 176.9 MHz */
- {191700, 0x02dd31a3, 0xe38e383f, 0xffe38e38, 0xffffffff},/* 191.7 MHz */
- {206400, 0x03153223, 0xc71c703f, 0xffc71c71, 0xffffffff},/* 206.4 MHz */
- {221200, 0x034fba23, 0xc71c703f, 0xffc71c71, 0xffffffff},/* 221.2 MHz */
- {235900, 0x03853a23, 0xe1e1e07f, 0xe1e1e1e1, 0xffffffe1},/* 235.9 MHz */
- {250700, 0x03bf3aa3, 0xc3c3c07f, 0xc3c3c3c3, 0xffffffc3},/* 250.7 MHz */
- {265400, 0x03f7c2a3, 0xc3c3c07f, 0xc3c3c3c3, 0xffffffc3},/* 265.4 MHz */
- {280200, 0x0431c2a3, 0x878780ff, 0x87878787, 0xffffff87},/* 280.2 MHz */
- { 0, 0, 0, 0, 0 } /* last entry */
-};
-
-static void sa1100_update_dram_timings(int current_speed, int new_speed)
-{
- struct sa1100_dram_regs *settings = sa1100_dram_settings;
-
- /* find speed */
- while (settings->speed != 0) {
- if (new_speed == settings->speed)
- break;
-
- settings++;
- }
-
- if (settings->speed == 0) {
- panic("%s: couldn't find dram setting for speed %d\n",
- __func__, new_speed);
- }
-
- /* No risk, no fun: run with interrupts on! */
- if (new_speed > current_speed) {
- /* We're going FASTER, so first relax the memory
- * timings before changing the core frequency
- */
-
- /* Half the memory access clock */
- MDCNFG |= MDCNFG_CDB2;
-
- /* The order of these statements IS important, keep 8
- * pulses!!
- */
- MDCAS2 = settings->mdcas2;
- MDCAS1 = settings->mdcas1;
- MDCAS0 = settings->mdcas0;
- MDCNFG = settings->mdcnfg;
- } else {
- /* We're going SLOWER: first decrease the core
- * frequency and then tighten the memory settings.
- */
-
- /* Half the memory access clock */
- MDCNFG |= MDCNFG_CDB2;
-
- /* The order of these statements IS important, keep 8
- * pulses!!
- */
- MDCAS0 = settings->mdcas0;
- MDCAS1 = settings->mdcas1;
- MDCAS2 = settings->mdcas2;
- MDCNFG = settings->mdcnfg;
- }
-}
-
-static int sa1100_target(struct cpufreq_policy *policy, unsigned int ppcr)
-{
- unsigned int cur = sa11x0_getspeed(0);
- unsigned int new_freq;
-
- new_freq = sa11x0_freq_table[ppcr].frequency;
-
- if (new_freq > cur)
- sa1100_update_dram_timings(cur, new_freq);
-
- PPCR = ppcr;
-
- if (new_freq < cur)
- sa1100_update_dram_timings(cur, new_freq);
-
- return 0;
-}
-
-static int __init sa1100_cpu_init(struct cpufreq_policy *policy)
-{
- cpufreq_generic_init(policy, sa11x0_freq_table, 0);
- return 0;
-}
-
-static struct cpufreq_driver sa1100_driver __refdata = {
- .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
- CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = sa1100_target,
- .get = sa11x0_getspeed,
- .init = sa1100_cpu_init,
- .name = "sa1100",
-};
-
-static int __init sa1100_dram_init(void)
-{
- if (cpu_is_sa1100())
- return cpufreq_register_driver(&sa1100_driver);
- else
- return -ENODEV;
-}
-
-arch_initcall(sa1100_dram_init);
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c
index 1a83c8678a63..bb7f591a8b05 100644
--- a/drivers/cpufreq/sa1110-cpufreq.c
+++ b/drivers/cpufreq/sa1110-cpufreq.c
@@ -344,14 +344,8 @@ static int __init sa1110_clk_init(void)
if (!name[0]) {
if (machine_is_assabet())
name = "TC59SM716-CL3";
- if (machine_is_pt_system3())
- name = "K4S641632D";
- if (machine_is_h3100())
- name = "KM416S4030CT";
if (machine_is_jornada720() || machine_is_h3600())
name = "K4S281632B-1H";
- if (machine_is_nanoengine())
- name = "MT48LC8M16A2TG-75";
}
sdram = sa1110_find_sdram(name);
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 7d0d62a06bf3..c6fdf019dbde 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -39,7 +39,7 @@ static struct clk *spear1340_cpu_get_possible_parent(unsigned long newfreq)
* In SPEAr1340, cpu clk's parent sys clk can take input from
* following sources
*/
- const char *sys_clk_src[] = {
+ static const char * const sys_clk_src[] = {
"sys_syn_clk",
"pll1_clk",
"pll2_clk",
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index 6c88827f4e62..f98f53bf1011 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -65,8 +65,8 @@ struct tegra186_cpufreq_cluster {
struct tegra186_cpufreq_data {
void __iomem *regs;
- struct tegra186_cpufreq_cluster *clusters;
const struct tegra186_cpufreq_cpu *cpus;
+ struct tegra186_cpufreq_cluster clusters[];
};
static int tegra186_cpufreq_init(struct cpufreq_policy *policy)
@@ -221,15 +221,12 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev)
struct tegra_bpmp *bpmp;
unsigned int i = 0, err;
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(&pdev->dev,
+ struct_size(data, clusters, TEGRA186_NUM_CLUSTERS),
+ GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->clusters = devm_kcalloc(&pdev->dev, TEGRA186_NUM_CLUSTERS,
- sizeof(*data->clusters), GFP_KERNEL);
- if (!data->clusters)
- return -ENOMEM;
-
data->cpus = tegra186_cpus;
bpmp = tegra_bpmp_get(&pdev->dev);
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index 4596c3e323aa..5890e25d7f77 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -411,7 +411,8 @@ static int tegra194_cpufreq_set_target(struct cpufreq_policy *policy,
static struct cpufreq_driver tegra194_cpufreq_driver = {
.name = "tegra194",
- .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = tegra194_cpufreq_set_target,
.get = tegra194_get_speed,
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index f64180dd2005..be4209d97cb3 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -39,6 +39,14 @@
#define OMAP34xx_ProdID_SKUID 0x4830A20C
#define OMAP3_SYSCON_BASE (0x48000000 + 0x2000 + 0x270)
+#define AM625_EFUSE_K_MPU_OPP 11
+#define AM625_EFUSE_S_MPU_OPP 19
+#define AM625_EFUSE_T_MPU_OPP 20
+
+#define AM625_SUPPORT_K_MPU_OPP BIT(0)
+#define AM625_SUPPORT_S_MPU_OPP BIT(1)
+#define AM625_SUPPORT_T_MPU_OPP BIT(2)
+
#define VERSION_COUNT 2
struct ti_cpufreq_data;
@@ -104,6 +112,25 @@ static unsigned long omap3_efuse_xlate(struct ti_cpufreq_data *opp_data,
return BIT(efuse);
}
+static unsigned long am625_efuse_xlate(struct ti_cpufreq_data *opp_data,
+ unsigned long efuse)
+{
+ unsigned long calculated_efuse = AM625_SUPPORT_K_MPU_OPP;
+
+ switch (efuse) {
+ case AM625_EFUSE_T_MPU_OPP:
+ calculated_efuse |= AM625_SUPPORT_T_MPU_OPP;
+ fallthrough;
+ case AM625_EFUSE_S_MPU_OPP:
+ calculated_efuse |= AM625_SUPPORT_S_MPU_OPP;
+ fallthrough;
+ case AM625_EFUSE_K_MPU_OPP:
+ calculated_efuse |= AM625_SUPPORT_K_MPU_OPP;
+ }
+
+ return calculated_efuse;
+}
+
static struct ti_cpufreq_soc_data am3x_soc_data = {
.efuse_xlate = amx3_efuse_xlate,
.efuse_fallback = AM33XX_800M_ARM_MPU_MAX_FREQ,
@@ -198,6 +225,14 @@ static struct ti_cpufreq_soc_data am3517_soc_data = {
.multi_regulator = false,
};
+static struct ti_cpufreq_soc_data am625_soc_data = {
+ .efuse_xlate = am625_efuse_xlate,
+ .efuse_offset = 0x0018,
+ .efuse_mask = 0x07c0,
+ .efuse_shift = 0x6,
+ .rev_offset = 0x0014,
+ .multi_regulator = false,
+};
/**
* ti_cpufreq_get_efuse() - Parse and return efuse value present on SoC
@@ -301,6 +336,7 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
{ .compatible = "ti,dra7", .data = &dra7_soc_data },
{ .compatible = "ti,omap34xx", .data = &omap34xx_soc_data, },
{ .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, },
+ { .compatible = "ti,am625", .data = &am625_soc_data, },
/* legacy */
{ .compatible = "ti,omap3430", .data = &omap34xx_soc_data, },
{ .compatible = "ti,omap3630", .data = &omap36xx_soc_data, },