1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * LoongArch cacheinfo support
4  *
5  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6  */
7 #include <linux/cacheinfo.h>
8 #include <linux/topology.h>
9 #include <asm/bootinfo.h>
10 #include <asm/cpu-info.h>
11 
init_cache_level(unsigned int cpu)12 int init_cache_level(unsigned int cpu)
13 {
14 	int cache_present = current_cpu_data.cache_leaves_present;
15 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
16 
17 	this_cpu_ci->num_levels =
18 		current_cpu_data.cache_leaves[cache_present - 1].level;
19 	this_cpu_ci->num_leaves = cache_present;
20 
21 	return 0;
22 }
23 
cache_leaves_are_shared(struct cacheinfo * this_leaf,struct cacheinfo * sib_leaf)24 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
25 					   struct cacheinfo *sib_leaf)
26 {
27 	return (!(*(unsigned char *)(this_leaf->priv) & CACHE_PRIVATE)
28 		&& !(*(unsigned char *)(sib_leaf->priv) & CACHE_PRIVATE));
29 }
30 
cache_cpumap_setup(unsigned int cpu)31 static void cache_cpumap_setup(unsigned int cpu)
32 {
33 	unsigned int index;
34 	struct cacheinfo *this_leaf, *sib_leaf;
35 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
36 
37 	for (index = 0; index < this_cpu_ci->num_leaves; index++) {
38 		unsigned int i;
39 
40 		this_leaf = this_cpu_ci->info_list + index;
41 		/* skip if shared_cpu_map is already populated */
42 		if (!cpumask_empty(&this_leaf->shared_cpu_map))
43 			continue;
44 
45 		cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
46 		for_each_online_cpu(i) {
47 			struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
48 
49 			if (i == cpu || !sib_cpu_ci->info_list ||
50 				(cpu_to_node(i) != cpu_to_node(cpu)))
51 				continue;
52 
53 			sib_leaf = sib_cpu_ci->info_list + index;
54 			/* SMT cores share all caches */
55 			if (cpus_are_siblings(i, cpu)) {
56 				cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
57 				cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
58 			}
59 			/* Node's cores share shared caches */
60 			if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
61 				cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
62 				cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
63 			}
64 		}
65 	}
66 }
67 
populate_cache_leaves(unsigned int cpu)68 int populate_cache_leaves(unsigned int cpu)
69 {
70 	int i, cache_present = current_cpu_data.cache_leaves_present;
71 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
72 	struct cacheinfo *this_leaf = this_cpu_ci->info_list;
73 	struct cache_desc *cd, *cdesc = current_cpu_data.cache_leaves;
74 
75 	for (i = 0; i < cache_present; i++) {
76 		cd = cdesc + i;
77 
78 		this_leaf->type = cd->type;
79 		this_leaf->level = cd->level;
80 		this_leaf->coherency_line_size = cd->linesz;
81 		this_leaf->number_of_sets = cd->sets;
82 		this_leaf->ways_of_associativity = cd->ways;
83 		this_leaf->size = cd->linesz * cd->sets * cd->ways;
84 		this_leaf->priv = &cd->flags;
85 		this_leaf++;
86 	}
87 
88 	cache_cpumap_setup(cpu);
89 	this_cpu_ci->cpu_map_populated = true;
90 
91 	return 0;
92 }
93