1 /*
2  * Copyright 2019-2023 NXP
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <bl31/interrupt_mgmt.h>
8 #include <common/runtime_svc.h>
9 #include <lib/mmio.h>
10 #include <lib/spinlock.h>
11 #include <plat/common/platform.h>
12 
13 #include <dram.h>
14 #include <gpc.h>
15 
16 #define IMX_SIP_DDR_DVFS_GET_FREQ_COUNT		0x10
17 #define IMX_SIP_DDR_DVFS_GET_FREQ_INFO		0x11
18 
19 struct dram_info dram_info;
20 
21 /* lock used for DDR DVFS */
22 spinlock_t dfs_lock;
23 
24 #if defined(PLAT_imx8mq)
25 /* ocram used to dram timing */
26 static uint8_t dram_timing_saved[13 * 1024] __aligned(8);
27 #endif
28 
29 static volatile uint32_t wfe_done;
30 static volatile bool wait_ddrc_hwffc_done = true;
31 static unsigned int dev_fsp = 0x1;
32 
33 static uint32_t fsp_init_reg[3][4] = {
34 	{ DDRC_INIT3(0), DDRC_INIT4(0), DDRC_INIT6(0), DDRC_INIT7(0) },
35 	{ DDRC_FREQ1_INIT3(0), DDRC_FREQ1_INIT4(0), DDRC_FREQ1_INIT6(0), DDRC_FREQ1_INIT7(0) },
36 	{ DDRC_FREQ2_INIT3(0), DDRC_FREQ2_INIT4(0), DDRC_FREQ2_INIT6(0), DDRC_FREQ2_INIT7(0) },
37 };
38 
39 #if defined(PLAT_imx8mq)
get_cfg_ptr(void * ptr,void * old_base,void * new_base)40 static inline struct dram_cfg_param *get_cfg_ptr(void *ptr,
41 		void *old_base, void *new_base)
42 {
43 	uintptr_t offset = (uintptr_t)ptr & ~((uintptr_t)old_base);
44 
45 	return (struct dram_cfg_param *)(offset + new_base);
46 }
47 
48 /* copy the dram timing info from DRAM to OCRAM */
imx8mq_dram_timing_copy(struct dram_timing_info * from)49 void imx8mq_dram_timing_copy(struct dram_timing_info *from)
50 {
51 	struct dram_timing_info *info = (struct dram_timing_info *)dram_timing_saved;
52 
53 	/* copy the whole 13KB content used for dram timing info */
54 	memcpy(dram_timing_saved, from, sizeof(dram_timing_saved));
55 
56 	/* correct the header after copied into ocram */
57 	info->ddrc_cfg = get_cfg_ptr(info->ddrc_cfg, from, dram_timing_saved);
58 	info->ddrphy_cfg = get_cfg_ptr(info->ddrphy_cfg, from, dram_timing_saved);
59 	info->ddrphy_trained_csr = get_cfg_ptr(info->ddrphy_trained_csr, from, dram_timing_saved);
60 	info->ddrphy_pie = get_cfg_ptr(info->ddrphy_pie, from, dram_timing_saved);
61 }
62 #endif
63 
64 #if defined(PLAT_imx8mp)
lpddr4_mr_read(unsigned int mr_rank,unsigned int mr_addr)65 static uint32_t lpddr4_mr_read(unsigned int mr_rank, unsigned int mr_addr)
66 {
67 	unsigned int tmp, drate_byte;
68 
69 	tmp = mmio_read_32(DRC_PERF_MON_MRR0_DAT(0));
70 	mmio_write_32(DRC_PERF_MON_MRR0_DAT(0), tmp | 0x1);
71 	do {
72 		tmp = mmio_read_32(DDRC_MRSTAT(0));
73 	} while (tmp & 0x1);
74 
75 	mmio_write_32(DDRC_MRCTRL0(0), (mr_rank << 4) | 0x1);
76 	mmio_write_32(DDRC_MRCTRL1(0), (mr_addr << 8));
77 	mmio_write_32(DDRC_MRCTRL0(0), (mr_rank << 4) | BIT(31) | 0x1);
78 
79 	/* Workaround for SNPS STAR 9001549457 */
80 	do {
81 		tmp = mmio_read_32(DDRC_MRSTAT(0));
82 	} while (tmp & 0x1);
83 
84 	do {
85 		tmp = mmio_read_32(DRC_PERF_MON_MRR0_DAT(0));
86 	} while (!(tmp & 0x8));
87 	tmp = mmio_read_32(DRC_PERF_MON_MRR1_DAT(0));
88 
89 	drate_byte = (mmio_read_32(DDRC_DERATEEN(0)) >> 4) & 0xff;
90 	tmp = (tmp >> (drate_byte * 8)) & 0xff;
91 	mmio_write_32(DRC_PERF_MON_MRR0_DAT(0), 0x4);
92 
93 	return tmp;
94 }
95 #endif
96 
get_mr_values(uint32_t (* mr_value)[8])97 static void get_mr_values(uint32_t (*mr_value)[8])
98 {
99 	uint32_t init_val;
100 	unsigned int i, fsp_index;
101 
102 	for (fsp_index = 0U; fsp_index < 3U; fsp_index++) {
103 		for (i = 0U; i < 4U; i++) {
104 			init_val = mmio_read_32(fsp_init_reg[fsp_index][i]);
105 			mr_value[fsp_index][2*i] = init_val >> 16;
106 			mr_value[fsp_index][2*i + 1] = init_val & 0xFFFF;
107 		}
108 
109 #if defined(PLAT_imx8mp)
110 		if (dram_info.dram_type == DDRC_LPDDR4) {
111 			mr_value[fsp_index][5] = lpddr4_mr_read(1, MR12); /* read MR12 from DRAM */
112 			mr_value[fsp_index][7] = lpddr4_mr_read(1, MR14); /* read MR14 from DRAM */
113 		}
114 #endif
115 	}
116 }
117 
save_rank_setting(void)118 static void save_rank_setting(void)
119 {
120 	uint32_t i, offset;
121 	uint32_t pstate_num = dram_info.num_fsp;
122 
123 	/* only support maximum 3 setpoints */
124 	pstate_num = (pstate_num > MAX_FSP_NUM) ? MAX_FSP_NUM : pstate_num;
125 
126 	for (i = 0U; i < pstate_num; i++) {
127 		offset = i ? (i + 1) * 0x1000 : 0U;
128 		dram_info.rank_setting[i][0] = mmio_read_32(DDRC_DRAMTMG2(0) + offset);
129 		if (dram_info.dram_type != DDRC_LPDDR4) {
130 			dram_info.rank_setting[i][1] = mmio_read_32(DDRC_DRAMTMG9(0) + offset);
131 		}
132 #if !defined(PLAT_imx8mq)
133 		dram_info.rank_setting[i][2] = mmio_read_32(DDRC_RANKCTL(0) + offset);
134 #endif
135 	}
136 #if defined(PLAT_imx8mq)
137 	dram_info.rank_setting[0][2] = mmio_read_32(DDRC_RANKCTL(0));
138 #endif
139 }
140 /* Restore the ddrc configs */
dram_umctl2_init(struct dram_timing_info * timing)141 void dram_umctl2_init(struct dram_timing_info *timing)
142 {
143 	struct dram_cfg_param *ddrc_cfg = timing->ddrc_cfg;
144 	unsigned int i;
145 
146 	for (i = 0U; i < timing->ddrc_cfg_num; i++) {
147 		mmio_write_32(ddrc_cfg->reg, ddrc_cfg->val);
148 		ddrc_cfg++;
149 	}
150 
151 	/* set the default fsp to P0 */
152 	mmio_write_32(DDRC_MSTR2(0), 0x0);
153 }
154 
155 /* Restore the dram PHY config */
dram_phy_init(struct dram_timing_info * timing)156 void dram_phy_init(struct dram_timing_info *timing)
157 {
158 	struct dram_cfg_param *cfg = timing->ddrphy_cfg;
159 	unsigned int i;
160 
161 	/* Restore the PHY init config */
162 	cfg = timing->ddrphy_cfg;
163 	for (i = 0U; i < timing->ddrphy_cfg_num; i++) {
164 		dwc_ddrphy_apb_wr(cfg->reg, cfg->val);
165 		cfg++;
166 	}
167 
168 	/* Restore the DDR PHY CSRs */
169 	cfg = timing->ddrphy_trained_csr;
170 	for (i = 0U; i < timing->ddrphy_trained_csr_num; i++) {
171 		dwc_ddrphy_apb_wr(cfg->reg, cfg->val);
172 		cfg++;
173 	}
174 
175 	/* Load the PIE image */
176 	cfg = timing->ddrphy_pie;
177 	for (i = 0U; i < timing->ddrphy_pie_num; i++) {
178 		dwc_ddrphy_apb_wr(cfg->reg, cfg->val);
179 		cfg++;
180 	}
181 }
182 
183 /* EL3 SGI-8 IPI handler for DDR Dynamic frequency scaling */
waiting_dvfs(uint32_t id,uint32_t flags,void * handle,void * cookie)184 static uint64_t waiting_dvfs(uint32_t id, uint32_t flags,
185 				void *handle, void *cookie)
186 {
187 	uint64_t mpidr = read_mpidr_el1();
188 	unsigned int cpu_id = MPIDR_AFFLVL0_VAL(mpidr);
189 	uint32_t irq;
190 
191 	irq = plat_ic_acknowledge_interrupt();
192 	if (irq < 1022U) {
193 		plat_ic_end_of_interrupt(irq);
194 	}
195 
196 	/* set the WFE done status */
197 	spin_lock(&dfs_lock);
198 	wfe_done |= (1 << cpu_id * 8);
199 	dsb();
200 	spin_unlock(&dfs_lock);
201 
202 	while (1) {
203 		/* ddr frequency change done */
204 		if (!wait_ddrc_hwffc_done)
205 			break;
206 
207 		wfe();
208 	}
209 
210 	return 0;
211 }
212 
dram_info_init(unsigned long dram_timing_base)213 void dram_info_init(unsigned long dram_timing_base)
214 {
215 	uint32_t ddrc_mstr, current_fsp;
216 	unsigned int idx = 0;
217 	uint32_t flags = 0;
218 	uint32_t rc;
219 	unsigned int i;
220 
221 	/* Get the dram type & rank */
222 	ddrc_mstr = mmio_read_32(DDRC_MSTR(0));
223 
224 	dram_info.dram_type = ddrc_mstr & DDR_TYPE_MASK;
225 	dram_info.num_rank = ((ddrc_mstr >> 24) & ACTIVE_RANK_MASK) == 0x3 ?
226 		DDRC_ACTIVE_TWO_RANK : DDRC_ACTIVE_ONE_RANK;
227 
228 	/* Get current fsp info */
229 	current_fsp = mmio_read_32(DDRC_DFIMISC(0));
230 	current_fsp = (current_fsp >> 8) & 0xf;
231 	dram_info.boot_fsp = current_fsp;
232 	dram_info.current_fsp = current_fsp;
233 
234 #if defined(PLAT_imx8mq)
235 	imx8mq_dram_timing_copy((struct dram_timing_info *)dram_timing_base);
236 	dram_timing_base = (unsigned long) dram_timing_saved;
237 #endif
238 	get_mr_values(dram_info.mr_table);
239 
240 	dram_info.timing_info = (struct dram_timing_info *)dram_timing_base;
241 
242 	/* get the num of supported fsp */
243 	for (i = 0U; i < 4U; ++i) {
244 		if (!dram_info.timing_info->fsp_table[i]) {
245 			break;
246 		}
247 		idx = i;
248 	}
249 
250 	/* only support maximum 3 setpoints */
251 	dram_info.num_fsp = (i > MAX_FSP_NUM) ? MAX_FSP_NUM : i;
252 
253 	/* no valid fsp table, return directly */
254 	if (i == 0U) {
255 		return;
256 	}
257 
258 	/* save the DRAMTMG2/9 for rank to rank workaround */
259 	save_rank_setting();
260 
261 	/* check if has bypass mode support */
262 	if (dram_info.timing_info->fsp_table[idx] < 666) {
263 		dram_info.bypass_mode = true;
264 	} else {
265 		dram_info.bypass_mode = false;
266 	}
267 
268 	/* Register the EL3 handler for DDR DVFS */
269 	set_interrupt_rm_flag(flags, NON_SECURE);
270 	rc = register_interrupt_type_handler(INTR_TYPE_EL3, waiting_dvfs, flags);
271 	if (rc != 0) {
272 		panic();
273 	}
274 
275 	if (dram_info.dram_type == DDRC_LPDDR4 && current_fsp != 0x0) {
276 		/* flush the L1/L2 cache */
277 		dcsw_op_all(DCCSW);
278 		lpddr4_swffc(&dram_info, dev_fsp, 0x0);
279 		dev_fsp = (~dev_fsp) & 0x1;
280 	} else if (current_fsp != 0x0) {
281 		/* flush the L1/L2 cache */
282 		dcsw_op_all(DCCSW);
283 		ddr4_swffc(&dram_info, 0x0);
284 	}
285 }
286 
287 /*
288  * For each freq return the following info:
289  *
290  * r1: data rate
291  * r2: 1 + dram_core parent
292  * r3: 1 + dram_alt parent index
293  * r4: 1 + dram_apb parent index
294  *
295  * The parent indices can be used by an OS who manages source clocks to enabled
296  * them ahead of the switch.
297  *
298  * A parent value of "0" means "don't care".
299  *
300  * Current implementation of freq switch is hardcoded in
301  * plat/imx/common/imx8m/clock.c but in theory this can be enhanced to support
302  * a wide variety of rates.
303  */
dram_dvfs_get_freq_info(void * handle,u_register_t index)304 int dram_dvfs_get_freq_info(void *handle, u_register_t index)
305 {
306 	switch (index) {
307 	case 0:
308 		 SMC_RET4(handle, dram_info.timing_info->fsp_table[0],
309 			1, 0, 5);
310 	case 1:
311 		if (!dram_info.bypass_mode) {
312 			SMC_RET4(handle, dram_info.timing_info->fsp_table[1],
313 				1, 0, 0);
314 		}
315 		SMC_RET4(handle, dram_info.timing_info->fsp_table[1],
316 			2, 2, 4);
317 	case 2:
318 		if (!dram_info.bypass_mode) {
319 			SMC_RET4(handle, dram_info.timing_info->fsp_table[2],
320 				1, 0, 0);
321 		}
322 		SMC_RET4(handle, dram_info.timing_info->fsp_table[2],
323 			2, 3, 3);
324 	case 3:
325 		 SMC_RET4(handle, dram_info.timing_info->fsp_table[3],
326 			1, 0, 0);
327 	default:
328 		SMC_RET1(handle, -3);
329 	}
330 }
331 
dram_dvfs_handler(uint32_t smc_fid,void * handle,u_register_t x1,u_register_t x2,u_register_t x3)332 int dram_dvfs_handler(uint32_t smc_fid, void *handle,
333 	u_register_t x1, u_register_t x2, u_register_t x3)
334 {
335 	uint64_t mpidr = read_mpidr_el1();
336 	unsigned int cpu_id = MPIDR_AFFLVL0_VAL(mpidr);
337 	unsigned int fsp_index = x1;
338 	uint32_t online_cores = x2;
339 
340 	if (x1 == IMX_SIP_DDR_DVFS_GET_FREQ_COUNT) {
341 		SMC_RET1(handle, dram_info.num_fsp);
342 	} else if (x1 == IMX_SIP_DDR_DVFS_GET_FREQ_INFO) {
343 		return dram_dvfs_get_freq_info(handle, x2);
344 	} else if (x1 < 3U) {
345 		wait_ddrc_hwffc_done = true;
346 		dsb();
347 
348 		/* trigger the SGI IPI to info other cores */
349 		for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
350 			if (cpu_id != i && (online_cores & (0x1 << (i * 8)))) {
351 				plat_ic_raise_el3_sgi(0x8, i);
352 			}
353 		}
354 #if defined(PLAT_imx8mq)
355 		for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; i++) {
356 			if (i != cpu_id && online_cores & (1 << (i * 8))) {
357 				imx_gpc_core_wake(1 << i);
358 			}
359 		}
360 #endif
361 		/* make sure all the core in WFE */
362 		online_cores &= ~(0x1 << (cpu_id * 8));
363 		while (1) {
364 			if (online_cores == wfe_done) {
365 				break;
366 			}
367 		}
368 
369 		/* flush the L1/L2 cache */
370 		dcsw_op_all(DCCSW);
371 
372 		if (dram_info.dram_type == DDRC_LPDDR4) {
373 			lpddr4_swffc(&dram_info, dev_fsp, fsp_index);
374 			dev_fsp = (~dev_fsp) & 0x1;
375 		} else {
376 			ddr4_swffc(&dram_info, fsp_index);
377 		}
378 
379 		dram_info.current_fsp = fsp_index;
380 		wait_ddrc_hwffc_done = false;
381 		wfe_done = 0;
382 		dsb();
383 		sev();
384 		isb();
385 	}
386 
387 	SMC_RET1(handle, 0);
388 }
389