aboutsummaryrefslogtreecommitdiff
path: root/arch/mips/kernel/cacheinfo.c
diff options
context:
space:
mode:
authorThomas Gleixner <[email protected]>2021-03-14 16:34:35 +0100
committerThomas Gleixner <[email protected]>2021-03-14 16:34:35 +0100
commitb470ebc9e0e57f53d1db9c49b8a3de4086babd05 (patch)
tree95c61291ad5f216967a9be36f19774026ffc88cb /arch/mips/kernel/cacheinfo.c
parent4c7bcb51ae25f79e3733982e5d0cd8ce8640ddfc (diff)
parent5fbecd2389f48e1415799c63130d0cdce1cf3f60 (diff)
Merge tag 'irqchip-fixes-5.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into irq/urgent
Pull irqchip fixes from Marc Zyngier: - More compatible strings for the Ingenic irqchip (introducing the JZ4760B SoC) - Select GENERIC_IRQ_MULTI_HANDLER on the ARM ep93xx platform - Drop all GENERIC_IRQ_MULTI_HANDLER selections from the irqchip Kconfig, now relying on the architecture to get it right - Drop the debugfs_file field from struct irq_domain, now that debugfs can track things on its own
Diffstat (limited to 'arch/mips/kernel/cacheinfo.c')
-rw-r--r--arch/mips/kernel/cacheinfo.c30
1 files changed, 23 insertions, 7 deletions
diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c
index 47312c529410..53d8ea7d36e6 100644
--- a/arch/mips/kernel/cacheinfo.c
+++ b/arch/mips/kernel/cacheinfo.c
@@ -35,6 +35,11 @@ static int __init_cache_level(unsigned int cpu)
leaves += (c->icache.waysize) ? 2 : 1;
+ if (c->vcache.waysize) {
+ levels++;
+ leaves++;
+ }
+
if (c->scache.waysize) {
levels++;
leaves++;
@@ -74,25 +79,36 @@ static int __populate_cache_leaves(unsigned int cpu)
struct cpuinfo_mips *c = &current_cpu_data;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
+ int level = 1;
if (c->icache.waysize) {
- /* L1 caches are per core */
+ /* I/D caches are per core */
fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
- populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA);
+ populate_cache(dcache, this_leaf, level, CACHE_TYPE_DATA);
fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
- populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST);
+ populate_cache(icache, this_leaf, level, CACHE_TYPE_INST);
+ level++;
} else {
- populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED);
+ populate_cache(dcache, this_leaf, level, CACHE_TYPE_UNIFIED);
+ level++;
+ }
+
+ if (c->vcache.waysize) {
+ /* Vcache is per core as well */
+ fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
+ populate_cache(vcache, this_leaf, level, CACHE_TYPE_UNIFIED);
+ level++;
}
if (c->scache.waysize) {
- /* L2 cache is per cluster */
+ /* Scache is per cluster */
fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map);
- populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED);
+ populate_cache(scache, this_leaf, level, CACHE_TYPE_UNIFIED);
+ level++;
}
if (c->tcache.waysize)
- populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED);
+ populate_cache(tcache, this_leaf, level, CACHE_TYPE_UNIFIED);
this_cpu_ci->cpu_map_populated = true;