1 /*
2 * Copyright 2023 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include <linux/firmware.h>
24 #include <linux/module.h>
25 #include <linux/pci.h>
26 #include <linux/reboot.h>
27
28 #define SWSMU_CODE_LAYER_L3
29
30 #include "amdgpu.h"
31 #include "amdgpu_smu.h"
32 #include "atomfirmware.h"
33 #include "amdgpu_atomfirmware.h"
34 #include "amdgpu_atombios.h"
35 #include "smu_v14_0.h"
36 #include "soc15_common.h"
37 #include "atom.h"
38 #include "amdgpu_ras.h"
39 #include "smu_cmn.h"
40
41 #include "asic_reg/thm/thm_14_0_2_offset.h"
42 #include "asic_reg/thm/thm_14_0_2_sh_mask.h"
43 #include "asic_reg/mp/mp_14_0_2_offset.h"
44 #include "asic_reg/mp/mp_14_0_2_sh_mask.h"
45
46 #define regMP1_SMN_IH_SW_INT_mp1_14_0_0 0x0341
47 #define regMP1_SMN_IH_SW_INT_mp1_14_0_0_BASE_IDX 0
48 #define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0 0x0342
49 #define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0_BASE_IDX 0
50
51 const int decoded_link_speed[5] = {1, 2, 3, 4, 5};
52 const int decoded_link_width[8] = {0, 1, 2, 4, 8, 12, 16, 32};
53 /*
54 * DO NOT use these for err/warn/info/debug messages.
55 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
56 * They are more MGPU friendly.
57 */
58 #undef pr_err
59 #undef pr_warn
60 #undef pr_info
61 #undef pr_debug
62
63 MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin");
64 MODULE_FIRMWARE("amdgpu/smu_14_0_3.bin");
65
66 #define ENABLE_IMU_ARG_GFXOFF_ENABLE 1
67
smu_v14_0_init_microcode(struct smu_context * smu)68 int smu_v14_0_init_microcode(struct smu_context *smu)
69 {
70 struct amdgpu_device *adev = smu->adev;
71 char ucode_prefix[15];
72 int err = 0;
73 const struct smc_firmware_header_v1_0 *hdr;
74 const struct common_firmware_header *header;
75 struct amdgpu_firmware_info *ucode = NULL;
76
77 /* doesn't need to load smu firmware in IOV mode */
78 if (amdgpu_sriov_vf(adev))
79 return 0;
80
81 amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
82 err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
83 "amdgpu/%s.bin", ucode_prefix);
84 if (err)
85 goto out;
86
87 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
88 amdgpu_ucode_print_smc_hdr(&hdr->header);
89 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
90
91 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
92 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
93 ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
94 ucode->fw = adev->pm.fw;
95 header = (const struct common_firmware_header *)ucode->fw->data;
96 adev->firmware.fw_size +=
97 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
98 }
99
100 out:
101 if (err)
102 amdgpu_ucode_release(&adev->pm.fw);
103 return err;
104 }
105
smu_v14_0_fini_microcode(struct smu_context * smu)106 void smu_v14_0_fini_microcode(struct smu_context *smu)
107 {
108 struct amdgpu_device *adev = smu->adev;
109
110 amdgpu_ucode_release(&adev->pm.fw);
111 adev->pm.fw_version = 0;
112 }
113
smu_v14_0_load_microcode(struct smu_context * smu)114 int smu_v14_0_load_microcode(struct smu_context *smu)
115 {
116 struct amdgpu_device *adev = smu->adev;
117 const uint32_t *src;
118 const struct smc_firmware_header_v1_0 *hdr;
119 uint32_t addr_start = MP1_SRAM;
120 uint32_t i;
121 uint32_t smc_fw_size;
122 uint32_t mp1_fw_flags;
123
124 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
125 src = (const uint32_t *)(adev->pm.fw->data +
126 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
127 smc_fw_size = hdr->header.ucode_size_bytes;
128
129 for (i = 1; i < smc_fw_size/4 - 1; i++) {
130 WREG32_PCIE(addr_start, src[i]);
131 addr_start += 4;
132 }
133
134 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
135 1 & MP1_SMN_PUB_CTRL__LX3_RESET_MASK);
136 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
137 1 & ~MP1_SMN_PUB_CTRL__LX3_RESET_MASK);
138
139 for (i = 0; i < adev->usec_timeout; i++) {
140 if (smu->is_apu)
141 mp1_fw_flags = RREG32_PCIE(MP1_Public |
142 (smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff));
143 else
144 mp1_fw_flags = RREG32_PCIE(MP1_Public |
145 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
146 if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
147 MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
148 break;
149 udelay(1);
150 }
151
152 if (i == adev->usec_timeout)
153 return -ETIME;
154
155 return 0;
156 }
157
smu_v14_0_init_pptable_microcode(struct smu_context * smu)158 int smu_v14_0_init_pptable_microcode(struct smu_context *smu)
159 {
160 struct amdgpu_device *adev = smu->adev;
161 struct amdgpu_firmware_info *ucode = NULL;
162 uint32_t size = 0, pptable_id = 0;
163 int ret = 0;
164 void *table;
165
166 /* doesn't need to load smu firmware in IOV mode */
167 if (amdgpu_sriov_vf(adev))
168 return 0;
169
170 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
171 return 0;
172
173 if (!adev->scpm_enabled)
174 return 0;
175
176 if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 2)) ||
177 (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 3)))
178 return 0;
179
180 /* override pptable_id from driver parameter */
181 if (amdgpu_smu_pptable_id >= 0) {
182 pptable_id = amdgpu_smu_pptable_id;
183 dev_info(adev->dev, "override pptable id %d\n", pptable_id);
184 } else {
185 pptable_id = smu->smu_table.boot_values.pp_table_id;
186 }
187
188 /* "pptable_id == 0" means vbios carries the pptable. */
189 if (!pptable_id)
190 return 0;
191
192 ret = smu_v14_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
193 if (ret)
194 return ret;
195
196 smu->pptable_firmware.data = table;
197 smu->pptable_firmware.size = size;
198
199 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_PPTABLE];
200 ucode->ucode_id = AMDGPU_UCODE_ID_PPTABLE;
201 ucode->fw = &smu->pptable_firmware;
202 adev->firmware.fw_size +=
203 ALIGN(smu->pptable_firmware.size, PAGE_SIZE);
204
205 return 0;
206 }
207
smu_v14_0_check_fw_status(struct smu_context * smu)208 int smu_v14_0_check_fw_status(struct smu_context *smu)
209 {
210 struct amdgpu_device *adev = smu->adev;
211 uint32_t mp1_fw_flags;
212
213 if (smu->is_apu)
214 mp1_fw_flags = RREG32_PCIE(MP1_Public |
215 (smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff));
216 else
217 mp1_fw_flags = RREG32_PCIE(MP1_Public |
218 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
219
220 if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
221 MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
222 return 0;
223
224 return -EIO;
225 }
226
smu_v14_0_check_fw_version(struct smu_context * smu)227 int smu_v14_0_check_fw_version(struct smu_context *smu)
228 {
229 struct amdgpu_device *adev = smu->adev;
230 uint32_t if_version = 0xff, smu_version = 0xff;
231 uint8_t smu_program, smu_major, smu_minor, smu_debug;
232 int ret = 0;
233
234 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
235 if (ret)
236 return ret;
237
238 smu_program = (smu_version >> 24) & 0xff;
239 smu_major = (smu_version >> 16) & 0xff;
240 smu_minor = (smu_version >> 8) & 0xff;
241 smu_debug = (smu_version >> 0) & 0xff;
242 if (smu->is_apu)
243 adev->pm.fw_version = smu_version;
244
245 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
246 case IP_VERSION(14, 0, 0):
247 case IP_VERSION(14, 0, 4):
248 smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
249 break;
250 case IP_VERSION(14, 0, 1):
251 smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1;
252 break;
253 case IP_VERSION(14, 0, 2):
254 case IP_VERSION(14, 0, 3):
255 smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2;
256 break;
257 default:
258 dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
259 amdgpu_ip_version(adev, MP1_HWIP, 0));
260 smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_INV;
261 break;
262 }
263
264 if (adev->pm.fw)
265 dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n",
266 smu_program, smu_version, smu_major, smu_minor, smu_debug);
267
268 /*
269 * 1. if_version mismatch is not critical as our fw is designed
270 * to be backward compatible.
271 * 2. New fw usually brings some optimizations. But that's visible
272 * only on the paired driver.
273 * Considering above, we just leave user a verbal message instead
274 * of halt driver loading.
275 */
276 if (if_version != smu->smc_driver_if_version) {
277 dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
278 "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
279 smu->smc_driver_if_version, if_version,
280 smu_program, smu_version, smu_major, smu_minor, smu_debug);
281 dev_info(adev->dev, "SMU driver if version not matched\n");
282 }
283
284 return ret;
285 }
286
smu_v14_0_set_pptable_v2_0(struct smu_context * smu,void ** table,uint32_t * size)287 static int smu_v14_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
288 {
289 struct amdgpu_device *adev = smu->adev;
290 uint32_t ppt_offset_bytes;
291 const struct smc_firmware_header_v2_0 *v2;
292
293 v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data;
294
295 ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes);
296 *size = le32_to_cpu(v2->ppt_size_bytes);
297 *table = (uint8_t *)v2 + ppt_offset_bytes;
298
299 return 0;
300 }
301
smu_v14_0_set_pptable_v2_1(struct smu_context * smu,void ** table,uint32_t * size,uint32_t pptable_id)302 static int smu_v14_0_set_pptable_v2_1(struct smu_context *smu, void **table,
303 uint32_t *size, uint32_t pptable_id)
304 {
305 struct amdgpu_device *adev = smu->adev;
306 const struct smc_firmware_header_v2_1 *v2_1;
307 struct smc_soft_pptable_entry *entries;
308 uint32_t pptable_count = 0;
309 int i = 0;
310
311 v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data;
312 entries = (struct smc_soft_pptable_entry *)
313 ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset));
314 pptable_count = le32_to_cpu(v2_1->pptable_count);
315 for (i = 0; i < pptable_count; i++) {
316 if (le32_to_cpu(entries[i].id) == pptable_id) {
317 *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes));
318 *size = le32_to_cpu(entries[i].ppt_size_bytes);
319 break;
320 }
321 }
322
323 if (i == pptable_count)
324 return -EINVAL;
325
326 return 0;
327 }
328
smu_v14_0_get_pptable_from_vbios(struct smu_context * smu,void ** table,uint32_t * size)329 static int smu_v14_0_get_pptable_from_vbios(struct smu_context *smu, void **table, uint32_t *size)
330 {
331 struct amdgpu_device *adev = smu->adev;
332 uint16_t atom_table_size;
333 uint8_t frev, crev;
334 int ret, index;
335
336 dev_info(adev->dev, "use vbios provided pptable\n");
337 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
338 powerplayinfo);
339
340 ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
341 (uint8_t **)table);
342 if (ret)
343 return ret;
344
345 if (size)
346 *size = atom_table_size;
347
348 return 0;
349 }
350
smu_v14_0_get_pptable_from_firmware(struct smu_context * smu,void ** table,uint32_t * size,uint32_t pptable_id)351 int smu_v14_0_get_pptable_from_firmware(struct smu_context *smu,
352 void **table,
353 uint32_t *size,
354 uint32_t pptable_id)
355 {
356 const struct smc_firmware_header_v1_0 *hdr;
357 struct amdgpu_device *adev = smu->adev;
358 uint16_t version_major, version_minor;
359 int ret;
360
361 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
362 if (!hdr)
363 return -EINVAL;
364
365 dev_info(adev->dev, "use driver provided pptable %d\n", pptable_id);
366
367 version_major = le16_to_cpu(hdr->header.header_version_major);
368 version_minor = le16_to_cpu(hdr->header.header_version_minor);
369 if (version_major != 2) {
370 dev_err(adev->dev, "Unsupported smu firmware version %d.%d\n",
371 version_major, version_minor);
372 return -EINVAL;
373 }
374
375 switch (version_minor) {
376 case 0:
377 ret = smu_v14_0_set_pptable_v2_0(smu, table, size);
378 break;
379 case 1:
380 ret = smu_v14_0_set_pptable_v2_1(smu, table, size, pptable_id);
381 break;
382 default:
383 ret = -EINVAL;
384 break;
385 }
386
387 return ret;
388 }
389
smu_v14_0_setup_pptable(struct smu_context * smu)390 int smu_v14_0_setup_pptable(struct smu_context *smu)
391 {
392 struct amdgpu_device *adev = smu->adev;
393 uint32_t size = 0, pptable_id = 0;
394 void *table;
395 int ret = 0;
396
397 /* override pptable_id from driver parameter */
398 if (amdgpu_smu_pptable_id >= 0) {
399 pptable_id = amdgpu_smu_pptable_id;
400 dev_info(adev->dev, "override pptable id %d\n", pptable_id);
401 } else {
402 pptable_id = smu->smu_table.boot_values.pp_table_id;
403 }
404
405 /* force using vbios pptable in sriov mode */
406 if ((amdgpu_sriov_vf(adev) || !pptable_id) && (amdgpu_emu_mode != 1))
407 ret = smu_v14_0_get_pptable_from_vbios(smu, &table, &size);
408 else
409 ret = smu_v14_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
410
411 if (ret)
412 return ret;
413
414 if (!smu->smu_table.power_play_table)
415 smu->smu_table.power_play_table = table;
416 if (!smu->smu_table.power_play_table_size)
417 smu->smu_table.power_play_table_size = size;
418
419 return 0;
420 }
421
smu_v14_0_init_smc_tables(struct smu_context * smu)422 int smu_v14_0_init_smc_tables(struct smu_context *smu)
423 {
424 struct smu_table_context *smu_table = &smu->smu_table;
425 struct smu_table *tables = smu_table->tables;
426 int ret = 0;
427
428 smu_table->driver_pptable =
429 kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL);
430 if (!smu_table->driver_pptable) {
431 ret = -ENOMEM;
432 goto err0_out;
433 }
434
435 smu_table->max_sustainable_clocks =
436 kzalloc(sizeof(struct smu_14_0_max_sustainable_clocks), GFP_KERNEL);
437 if (!smu_table->max_sustainable_clocks) {
438 ret = -ENOMEM;
439 goto err1_out;
440 }
441
442 if (tables[SMU_TABLE_OVERDRIVE].size) {
443 smu_table->overdrive_table =
444 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
445 if (!smu_table->overdrive_table) {
446 ret = -ENOMEM;
447 goto err2_out;
448 }
449
450 smu_table->boot_overdrive_table =
451 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
452 if (!smu_table->boot_overdrive_table) {
453 ret = -ENOMEM;
454 goto err3_out;
455 }
456
457 smu_table->user_overdrive_table =
458 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
459 if (!smu_table->user_overdrive_table) {
460 ret = -ENOMEM;
461 goto err4_out;
462 }
463 }
464
465 smu_table->combo_pptable =
466 kzalloc(tables[SMU_TABLE_COMBO_PPTABLE].size, GFP_KERNEL);
467 if (!smu_table->combo_pptable) {
468 ret = -ENOMEM;
469 goto err5_out;
470 }
471
472 return 0;
473
474 err5_out:
475 kfree(smu_table->user_overdrive_table);
476 err4_out:
477 kfree(smu_table->boot_overdrive_table);
478 err3_out:
479 kfree(smu_table->overdrive_table);
480 err2_out:
481 kfree(smu_table->max_sustainable_clocks);
482 err1_out:
483 kfree(smu_table->driver_pptable);
484 err0_out:
485 return ret;
486 }
487
smu_v14_0_fini_smc_tables(struct smu_context * smu)488 int smu_v14_0_fini_smc_tables(struct smu_context *smu)
489 {
490 struct smu_table_context *smu_table = &smu->smu_table;
491 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
492
493 kfree(smu_table->gpu_metrics_table);
494 kfree(smu_table->combo_pptable);
495 kfree(smu_table->boot_overdrive_table);
496 kfree(smu_table->overdrive_table);
497 kfree(smu_table->max_sustainable_clocks);
498 kfree(smu_table->driver_pptable);
499 smu_table->gpu_metrics_table = NULL;
500 smu_table->combo_pptable = NULL;
501 smu_table->boot_overdrive_table = NULL;
502 smu_table->overdrive_table = NULL;
503 smu_table->max_sustainable_clocks = NULL;
504 smu_table->driver_pptable = NULL;
505 kfree(smu_table->hardcode_pptable);
506 smu_table->hardcode_pptable = NULL;
507
508 kfree(smu_table->ecc_table);
509 kfree(smu_table->metrics_table);
510 kfree(smu_table->watermarks_table);
511 smu_table->ecc_table = NULL;
512 smu_table->metrics_table = NULL;
513 smu_table->watermarks_table = NULL;
514 smu_table->metrics_time = 0;
515
516 kfree(smu_dpm->dpm_context);
517 kfree(smu_dpm->golden_dpm_context);
518 kfree(smu_dpm->dpm_current_power_state);
519 kfree(smu_dpm->dpm_request_power_state);
520 smu_dpm->dpm_context = NULL;
521 smu_dpm->golden_dpm_context = NULL;
522 smu_dpm->dpm_context_size = 0;
523 smu_dpm->dpm_current_power_state = NULL;
524 smu_dpm->dpm_request_power_state = NULL;
525
526 return 0;
527 }
528
smu_v14_0_init_power(struct smu_context * smu)529 int smu_v14_0_init_power(struct smu_context *smu)
530 {
531 struct smu_power_context *smu_power = &smu->smu_power;
532
533 if (smu_power->power_context || smu_power->power_context_size != 0)
534 return -EINVAL;
535
536 smu_power->power_context = kzalloc(sizeof(struct smu_14_0_dpm_context),
537 GFP_KERNEL);
538 if (!smu_power->power_context)
539 return -ENOMEM;
540 smu_power->power_context_size = sizeof(struct smu_14_0_dpm_context);
541
542 return 0;
543 }
544
smu_v14_0_fini_power(struct smu_context * smu)545 int smu_v14_0_fini_power(struct smu_context *smu)
546 {
547 struct smu_power_context *smu_power = &smu->smu_power;
548
549 if (!smu_power->power_context || smu_power->power_context_size == 0)
550 return -EINVAL;
551
552 kfree(smu_power->power_context);
553 smu_power->power_context = NULL;
554 smu_power->power_context_size = 0;
555
556 return 0;
557 }
558
smu_v14_0_get_vbios_bootup_values(struct smu_context * smu)559 int smu_v14_0_get_vbios_bootup_values(struct smu_context *smu)
560 {
561 int ret, index;
562 uint16_t size;
563 uint8_t frev, crev;
564 struct atom_common_table_header *header;
565 struct atom_firmware_info_v3_4 *v_3_4;
566 struct atom_firmware_info_v3_3 *v_3_3;
567 struct atom_firmware_info_v3_1 *v_3_1;
568 struct atom_smu_info_v3_6 *smu_info_v3_6;
569 struct atom_smu_info_v4_0 *smu_info_v4_0;
570
571 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
572 firmwareinfo);
573
574 ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
575 (uint8_t **)&header);
576 if (ret)
577 return ret;
578
579 if (header->format_revision != 3) {
580 dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu14\n");
581 return -EINVAL;
582 }
583
584 switch (header->content_revision) {
585 case 0:
586 case 1:
587 case 2:
588 v_3_1 = (struct atom_firmware_info_v3_1 *)header;
589 smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
590 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
591 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
592 smu->smu_table.boot_values.socclk = 0;
593 smu->smu_table.boot_values.dcefclk = 0;
594 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
595 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
596 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
597 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
598 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
599 smu->smu_table.boot_values.pp_table_id = 0;
600 break;
601 case 3:
602 v_3_3 = (struct atom_firmware_info_v3_3 *)header;
603 smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
604 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
605 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
606 smu->smu_table.boot_values.socclk = 0;
607 smu->smu_table.boot_values.dcefclk = 0;
608 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
609 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
610 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
611 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
612 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
613 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
614 break;
615 case 4:
616 default:
617 v_3_4 = (struct atom_firmware_info_v3_4 *)header;
618 smu->smu_table.boot_values.revision = v_3_4->firmware_revision;
619 smu->smu_table.boot_values.gfxclk = v_3_4->bootup_sclk_in10khz;
620 smu->smu_table.boot_values.uclk = v_3_4->bootup_mclk_in10khz;
621 smu->smu_table.boot_values.socclk = 0;
622 smu->smu_table.boot_values.dcefclk = 0;
623 smu->smu_table.boot_values.vddc = v_3_4->bootup_vddc_mv;
624 smu->smu_table.boot_values.vddci = v_3_4->bootup_vddci_mv;
625 smu->smu_table.boot_values.mvddc = v_3_4->bootup_mvddc_mv;
626 smu->smu_table.boot_values.vdd_gfx = v_3_4->bootup_vddgfx_mv;
627 smu->smu_table.boot_values.cooling_id = v_3_4->coolingsolution_id;
628 smu->smu_table.boot_values.pp_table_id = v_3_4->pplib_pptable_id;
629 break;
630 }
631
632 smu->smu_table.boot_values.format_revision = header->format_revision;
633 smu->smu_table.boot_values.content_revision = header->content_revision;
634
635 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
636 smu_info);
637 if (!amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
638 (uint8_t **)&header)) {
639
640 if ((frev == 3) && (crev == 6)) {
641 smu_info_v3_6 = (struct atom_smu_info_v3_6 *)header;
642
643 smu->smu_table.boot_values.socclk = smu_info_v3_6->bootup_socclk_10khz;
644 smu->smu_table.boot_values.vclk = smu_info_v3_6->bootup_vclk_10khz;
645 smu->smu_table.boot_values.dclk = smu_info_v3_6->bootup_dclk_10khz;
646 smu->smu_table.boot_values.fclk = smu_info_v3_6->bootup_fclk_10khz;
647 } else if ((frev == 3) && (crev == 1)) {
648 return 0;
649 } else if ((frev == 4) && (crev == 0)) {
650 smu_info_v4_0 = (struct atom_smu_info_v4_0 *)header;
651
652 smu->smu_table.boot_values.socclk = smu_info_v4_0->bootup_socclk_10khz;
653 smu->smu_table.boot_values.dcefclk = smu_info_v4_0->bootup_dcefclk_10khz;
654 smu->smu_table.boot_values.vclk = smu_info_v4_0->bootup_vclk0_10khz;
655 smu->smu_table.boot_values.dclk = smu_info_v4_0->bootup_dclk0_10khz;
656 smu->smu_table.boot_values.fclk = smu_info_v4_0->bootup_fclk_10khz;
657 } else {
658 dev_warn(smu->adev->dev, "Unexpected and unhandled version: %d.%d\n",
659 (uint32_t)frev, (uint32_t)crev);
660 }
661 }
662
663 return 0;
664 }
665
666
smu_v14_0_notify_memory_pool_location(struct smu_context * smu)667 int smu_v14_0_notify_memory_pool_location(struct smu_context *smu)
668 {
669 struct smu_table_context *smu_table = &smu->smu_table;
670 struct smu_table *memory_pool = &smu_table->memory_pool;
671 int ret = 0;
672 uint64_t address;
673 uint32_t address_low, address_high;
674
675 if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
676 return ret;
677
678 address = memory_pool->mc_address;
679 address_high = (uint32_t)upper_32_bits(address);
680 address_low = (uint32_t)lower_32_bits(address);
681
682 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
683 address_high, NULL);
684 if (ret)
685 return ret;
686 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
687 address_low, NULL);
688 if (ret)
689 return ret;
690 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
691 (uint32_t)memory_pool->size, NULL);
692 if (ret)
693 return ret;
694
695 return ret;
696 }
697
smu_v14_0_set_driver_table_location(struct smu_context * smu)698 int smu_v14_0_set_driver_table_location(struct smu_context *smu)
699 {
700 struct smu_table *driver_table = &smu->smu_table.driver_table;
701 int ret = 0;
702
703 if (driver_table->mc_address) {
704 ret = smu_cmn_send_smc_msg_with_param(smu,
705 SMU_MSG_SetDriverDramAddrHigh,
706 upper_32_bits(driver_table->mc_address),
707 NULL);
708 if (!ret)
709 ret = smu_cmn_send_smc_msg_with_param(smu,
710 SMU_MSG_SetDriverDramAddrLow,
711 lower_32_bits(driver_table->mc_address),
712 NULL);
713 }
714
715 return ret;
716 }
717
smu_v14_0_set_tool_table_location(struct smu_context * smu)718 int smu_v14_0_set_tool_table_location(struct smu_context *smu)
719 {
720 int ret = 0;
721 struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
722
723 if (tool_table->mc_address) {
724 ret = smu_cmn_send_smc_msg_with_param(smu,
725 SMU_MSG_SetToolsDramAddrHigh,
726 upper_32_bits(tool_table->mc_address),
727 NULL);
728 if (!ret)
729 ret = smu_cmn_send_smc_msg_with_param(smu,
730 SMU_MSG_SetToolsDramAddrLow,
731 lower_32_bits(tool_table->mc_address),
732 NULL);
733 }
734
735 return ret;
736 }
737
smu_v14_0_set_allowed_mask(struct smu_context * smu)738 int smu_v14_0_set_allowed_mask(struct smu_context *smu)
739 {
740 struct smu_feature *feature = &smu->smu_feature;
741 int ret = 0;
742 uint32_t feature_mask[2];
743
744 if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) ||
745 feature->feature_num < 64)
746 return -EINVAL;
747
748 bitmap_to_arr32(feature_mask, feature->allowed, 64);
749
750 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
751 feature_mask[1], NULL);
752 if (ret)
753 return ret;
754
755 return smu_cmn_send_smc_msg_with_param(smu,
756 SMU_MSG_SetAllowedFeaturesMaskLow,
757 feature_mask[0],
758 NULL);
759 }
760
smu_v14_0_gfx_off_control(struct smu_context * smu,bool enable)761 int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable)
762 {
763 int ret = 0;
764 struct amdgpu_device *adev = smu->adev;
765
766 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
767 case IP_VERSION(14, 0, 0):
768 case IP_VERSION(14, 0, 1):
769 case IP_VERSION(14, 0, 2):
770 case IP_VERSION(14, 0, 3):
771 case IP_VERSION(14, 0, 4):
772 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
773 return 0;
774 if (enable)
775 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
776 else
777 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
778 break;
779 default:
780 break;
781 }
782
783 return ret;
784 }
785
smu_v14_0_system_features_control(struct smu_context * smu,bool en)786 int smu_v14_0_system_features_control(struct smu_context *smu,
787 bool en)
788 {
789 return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
790 SMU_MSG_DisableAllSmuFeatures), NULL);
791 }
792
smu_v14_0_notify_display_change(struct smu_context * smu)793 int smu_v14_0_notify_display_change(struct smu_context *smu)
794 {
795 int ret = 0;
796
797 if (!smu->pm_enabled)
798 return ret;
799
800 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
801 smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
802 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
803
804 return ret;
805 }
806
smu_v14_0_get_current_power_limit(struct smu_context * smu,uint32_t * power_limit)807 int smu_v14_0_get_current_power_limit(struct smu_context *smu,
808 uint32_t *power_limit)
809 {
810 int power_src;
811 int ret = 0;
812
813 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
814 return -EINVAL;
815
816 power_src = smu_cmn_to_asic_specific_index(smu,
817 CMN2ASIC_MAPPING_PWR,
818 smu->adev->pm.ac_power ?
819 SMU_POWER_SOURCE_AC :
820 SMU_POWER_SOURCE_DC);
821 if (power_src < 0)
822 return -EINVAL;
823
824 ret = smu_cmn_send_smc_msg_with_param(smu,
825 SMU_MSG_GetPptLimit,
826 power_src << 16,
827 power_limit);
828 if (ret)
829 dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__);
830
831 return ret;
832 }
833
smu_v14_0_set_power_limit(struct smu_context * smu,enum smu_ppt_limit_type limit_type,uint32_t limit)834 int smu_v14_0_set_power_limit(struct smu_context *smu,
835 enum smu_ppt_limit_type limit_type,
836 uint32_t limit)
837 {
838 int ret = 0;
839
840 if (limit_type != SMU_DEFAULT_PPT_LIMIT)
841 return -EINVAL;
842
843 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
844 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
845 return -EOPNOTSUPP;
846 }
847
848 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit, NULL);
849 if (ret) {
850 dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
851 return ret;
852 }
853
854 smu->current_power_limit = limit;
855
856 return 0;
857 }
858
smu_v14_0_set_irq_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned tyep,enum amdgpu_interrupt_state state)859 static int smu_v14_0_set_irq_state(struct amdgpu_device *adev,
860 struct amdgpu_irq_src *source,
861 unsigned tyep,
862 enum amdgpu_interrupt_state state)
863 {
864 struct smu_context *smu = adev->powerplay.pp_handle;
865 uint32_t low, high;
866 uint32_t val = 0;
867
868 switch (state) {
869 case AMDGPU_IRQ_STATE_DISABLE:
870 /* For THM irqs */
871 val = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
872 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 1);
873 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 1);
874 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, val);
875
876 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_ENA, 0);
877
878 /* For MP1 SW irqs */
879 if (smu->is_apu) {
880 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0);
881 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
882 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val);
883 } else {
884 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
885 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
886 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
887 }
888
889 break;
890 case AMDGPU_IRQ_STATE_ENABLE:
891 /* For THM irqs */
892 low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP,
893 smu->thermal_range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
894 high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP,
895 smu->thermal_range.software_shutdown_temp);
896 val = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
897 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
898 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
899 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
900 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
901 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
902 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
903 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
904 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, val);
905
906 val = (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
907 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
908 val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
909 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_ENA, val);
910
911 /* For MP1 SW irqs */
912 if (smu->is_apu) {
913 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0);
914 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
915 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
916 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0, val);
917
918 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0);
919 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
920 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val);
921 } else {
922 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT);
923 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
924 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
925 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val);
926
927 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
928 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
929 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
930 }
931
932 break;
933 default:
934 break;
935 }
936
937 return 0;
938 }
939
940 #define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
941 #define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
942
smu_v14_0_irq_process(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)943 static int smu_v14_0_irq_process(struct amdgpu_device *adev,
944 struct amdgpu_irq_src *source,
945 struct amdgpu_iv_entry *entry)
946 {
947 struct smu_context *smu = adev->powerplay.pp_handle;
948 uint32_t client_id = entry->client_id;
949 uint32_t src_id = entry->src_id;
950
951 if (client_id == SOC15_IH_CLIENTID_THM) {
952 switch (src_id) {
953 case THM_11_0__SRCID__THM_DIG_THERM_L2H:
954 schedule_delayed_work(&smu->swctf_delayed_work,
955 msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
956 break;
957 case THM_11_0__SRCID__THM_DIG_THERM_H2L:
958 dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
959 break;
960 default:
961 dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n",
962 src_id);
963 break;
964 }
965 }
966
967 return 0;
968 }
969
970 static const struct amdgpu_irq_src_funcs smu_v14_0_irq_funcs = {
971 .set = smu_v14_0_set_irq_state,
972 .process = smu_v14_0_irq_process,
973 };
974
smu_v14_0_register_irq_handler(struct smu_context * smu)975 int smu_v14_0_register_irq_handler(struct smu_context *smu)
976 {
977 struct amdgpu_device *adev = smu->adev;
978 struct amdgpu_irq_src *irq_src = &smu->irq_source;
979 int ret = 0;
980
981 if (amdgpu_sriov_vf(adev))
982 return 0;
983
984 irq_src->num_types = 1;
985 irq_src->funcs = &smu_v14_0_irq_funcs;
986
987 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
988 THM_11_0__SRCID__THM_DIG_THERM_L2H,
989 irq_src);
990 if (ret)
991 return ret;
992
993 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
994 THM_11_0__SRCID__THM_DIG_THERM_H2L,
995 irq_src);
996 if (ret)
997 return ret;
998
999 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
1000 SMU_IH_INTERRUPT_ID_TO_DRIVER,
1001 irq_src);
1002 if (ret)
1003 return ret;
1004
1005 return ret;
1006 }
1007
smu_v14_0_wait_for_reset_complete(struct smu_context * smu,uint64_t event_arg)1008 static int smu_v14_0_wait_for_reset_complete(struct smu_context *smu,
1009 uint64_t event_arg)
1010 {
1011 int ret = 0;
1012
1013 dev_dbg(smu->adev->dev, "waiting for smu reset complete\n");
1014 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GfxDriverResetRecovery, NULL);
1015
1016 return ret;
1017 }
1018
smu_v14_0_wait_for_event(struct smu_context * smu,enum smu_event_type event,uint64_t event_arg)1019 int smu_v14_0_wait_for_event(struct smu_context *smu, enum smu_event_type event,
1020 uint64_t event_arg)
1021 {
1022 int ret = -EINVAL;
1023
1024 switch (event) {
1025 case SMU_EVENT_RESET_COMPLETE:
1026 ret = smu_v14_0_wait_for_reset_complete(smu, event_arg);
1027 break;
1028 default:
1029 break;
1030 }
1031
1032 return ret;
1033 }
1034
smu_v14_0_get_dpm_ultimate_freq(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * min,uint32_t * max)1035 int smu_v14_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
1036 uint32_t *min, uint32_t *max)
1037 {
1038 int ret = 0, clk_id = 0;
1039 uint32_t param = 0;
1040 uint32_t clock_limit;
1041
1042 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
1043 switch (clk_type) {
1044 case SMU_MCLK:
1045 case SMU_UCLK:
1046 clock_limit = smu->smu_table.boot_values.uclk;
1047 break;
1048 case SMU_GFXCLK:
1049 case SMU_SCLK:
1050 clock_limit = smu->smu_table.boot_values.gfxclk;
1051 break;
1052 case SMU_SOCCLK:
1053 clock_limit = smu->smu_table.boot_values.socclk;
1054 break;
1055 default:
1056 clock_limit = 0;
1057 break;
1058 }
1059
1060 /* clock in Mhz unit */
1061 if (min)
1062 *min = clock_limit / 100;
1063 if (max)
1064 *max = clock_limit / 100;
1065
1066 return 0;
1067 }
1068
1069 clk_id = smu_cmn_to_asic_specific_index(smu,
1070 CMN2ASIC_MAPPING_CLK,
1071 clk_type);
1072 if (clk_id < 0) {
1073 ret = -EINVAL;
1074 goto failed;
1075 }
1076 param = (clk_id & 0xffff) << 16;
1077
1078 if (max) {
1079 if (smu->adev->pm.ac_power)
1080 ret = smu_cmn_send_smc_msg_with_param(smu,
1081 SMU_MSG_GetMaxDpmFreq,
1082 param,
1083 max);
1084 else
1085 ret = smu_cmn_send_smc_msg_with_param(smu,
1086 SMU_MSG_GetDcModeMaxDpmFreq,
1087 param,
1088 max);
1089 if (ret)
1090 goto failed;
1091 }
1092
1093 if (min) {
1094 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
1095 if (ret)
1096 goto failed;
1097 }
1098
1099 failed:
1100 return ret;
1101 }
1102
smu_v14_0_set_soft_freq_limited_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t min,uint32_t max,bool automatic)1103 int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu,
1104 enum smu_clk_type clk_type,
1105 uint32_t min,
1106 uint32_t max,
1107 bool automatic)
1108 {
1109 int ret = 0, clk_id = 0;
1110 uint32_t param;
1111
1112 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1113 return 0;
1114
1115 clk_id = smu_cmn_to_asic_specific_index(smu,
1116 CMN2ASIC_MAPPING_CLK,
1117 clk_type);
1118 if (clk_id < 0)
1119 return clk_id;
1120
1121 if (max > 0) {
1122 if (automatic)
1123 param = (uint32_t)((clk_id << 16) | 0xffff);
1124 else
1125 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
1126 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
1127 param, NULL);
1128 if (ret)
1129 goto out;
1130 }
1131
1132 if (min > 0) {
1133 if (automatic)
1134 param = (uint32_t)((clk_id << 16) | 0);
1135 else
1136 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
1137 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
1138 param, NULL);
1139 if (ret)
1140 goto out;
1141 }
1142
1143 out:
1144 return ret;
1145 }
1146
smu_v14_0_set_hard_freq_limited_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t min,uint32_t max)1147 int smu_v14_0_set_hard_freq_limited_range(struct smu_context *smu,
1148 enum smu_clk_type clk_type,
1149 uint32_t min,
1150 uint32_t max)
1151 {
1152 int ret = 0, clk_id = 0;
1153 uint32_t param;
1154
1155 if (min <= 0 && max <= 0)
1156 return -EINVAL;
1157
1158 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1159 return 0;
1160
1161 clk_id = smu_cmn_to_asic_specific_index(smu,
1162 CMN2ASIC_MAPPING_CLK,
1163 clk_type);
1164 if (clk_id < 0)
1165 return clk_id;
1166
1167 if (max > 0) {
1168 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
1169 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
1170 param, NULL);
1171 if (ret)
1172 return ret;
1173 }
1174
1175 if (min > 0) {
1176 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
1177 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
1178 param, NULL);
1179 if (ret)
1180 return ret;
1181 }
1182
1183 return ret;
1184 }
1185
smu_v14_0_set_performance_level(struct smu_context * smu,enum amd_dpm_forced_level level)1186 int smu_v14_0_set_performance_level(struct smu_context *smu,
1187 enum amd_dpm_forced_level level)
1188 {
1189 struct smu_14_0_dpm_context *dpm_context =
1190 smu->smu_dpm.dpm_context;
1191 struct smu_14_0_dpm_table *gfx_table =
1192 &dpm_context->dpm_tables.gfx_table;
1193 struct smu_14_0_dpm_table *mem_table =
1194 &dpm_context->dpm_tables.uclk_table;
1195 struct smu_14_0_dpm_table *soc_table =
1196 &dpm_context->dpm_tables.soc_table;
1197 struct smu_14_0_dpm_table *vclk_table =
1198 &dpm_context->dpm_tables.vclk_table;
1199 struct smu_14_0_dpm_table *dclk_table =
1200 &dpm_context->dpm_tables.dclk_table;
1201 struct smu_14_0_dpm_table *fclk_table =
1202 &dpm_context->dpm_tables.fclk_table;
1203 struct smu_umd_pstate_table *pstate_table =
1204 &smu->pstate_table;
1205 struct amdgpu_device *adev = smu->adev;
1206 uint32_t sclk_min = 0, sclk_max = 0;
1207 uint32_t mclk_min = 0, mclk_max = 0;
1208 uint32_t socclk_min = 0, socclk_max = 0;
1209 uint32_t vclk_min = 0, vclk_max = 0;
1210 uint32_t dclk_min = 0, dclk_max = 0;
1211 uint32_t fclk_min = 0, fclk_max = 0;
1212 int ret = 0, i;
1213 bool auto_level = false;
1214
1215 switch (level) {
1216 case AMD_DPM_FORCED_LEVEL_HIGH:
1217 sclk_min = sclk_max = gfx_table->max;
1218 mclk_min = mclk_max = mem_table->max;
1219 socclk_min = socclk_max = soc_table->max;
1220 vclk_min = vclk_max = vclk_table->max;
1221 dclk_min = dclk_max = dclk_table->max;
1222 fclk_min = fclk_max = fclk_table->max;
1223 break;
1224 case AMD_DPM_FORCED_LEVEL_LOW:
1225 sclk_min = sclk_max = gfx_table->min;
1226 mclk_min = mclk_max = mem_table->min;
1227 socclk_min = socclk_max = soc_table->min;
1228 vclk_min = vclk_max = vclk_table->min;
1229 dclk_min = dclk_max = dclk_table->min;
1230 fclk_min = fclk_max = fclk_table->min;
1231 break;
1232 case AMD_DPM_FORCED_LEVEL_AUTO:
1233 sclk_min = gfx_table->min;
1234 sclk_max = gfx_table->max;
1235 mclk_min = mem_table->min;
1236 mclk_max = mem_table->max;
1237 socclk_min = soc_table->min;
1238 socclk_max = soc_table->max;
1239 vclk_min = vclk_table->min;
1240 vclk_max = vclk_table->max;
1241 dclk_min = dclk_table->min;
1242 dclk_max = dclk_table->max;
1243 fclk_min = fclk_table->min;
1244 fclk_max = fclk_table->max;
1245 auto_level = true;
1246 break;
1247 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1248 sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard;
1249 mclk_min = mclk_max = pstate_table->uclk_pstate.standard;
1250 socclk_min = socclk_max = pstate_table->socclk_pstate.standard;
1251 vclk_min = vclk_max = pstate_table->vclk_pstate.standard;
1252 dclk_min = dclk_max = pstate_table->dclk_pstate.standard;
1253 fclk_min = fclk_max = pstate_table->fclk_pstate.standard;
1254 break;
1255 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1256 sclk_min = sclk_max = pstate_table->gfxclk_pstate.min;
1257 break;
1258 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1259 mclk_min = mclk_max = pstate_table->uclk_pstate.min;
1260 break;
1261 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1262 sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak;
1263 mclk_min = mclk_max = pstate_table->uclk_pstate.peak;
1264 socclk_min = socclk_max = pstate_table->socclk_pstate.peak;
1265 vclk_min = vclk_max = pstate_table->vclk_pstate.peak;
1266 dclk_min = dclk_max = pstate_table->dclk_pstate.peak;
1267 fclk_min = fclk_max = pstate_table->fclk_pstate.peak;
1268 break;
1269 case AMD_DPM_FORCED_LEVEL_MANUAL:
1270 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1271 return 0;
1272 default:
1273 dev_err(adev->dev, "Invalid performance level %d\n", level);
1274 return -EINVAL;
1275 }
1276
1277 if (sclk_min && sclk_max) {
1278 ret = smu_v14_0_set_soft_freq_limited_range(smu,
1279 SMU_GFXCLK,
1280 sclk_min,
1281 sclk_max,
1282 auto_level);
1283 if (ret)
1284 return ret;
1285
1286 pstate_table->gfxclk_pstate.curr.min = sclk_min;
1287 pstate_table->gfxclk_pstate.curr.max = sclk_max;
1288 }
1289
1290 if (mclk_min && mclk_max) {
1291 ret = smu_v14_0_set_soft_freq_limited_range(smu,
1292 SMU_MCLK,
1293 mclk_min,
1294 mclk_max,
1295 auto_level);
1296 if (ret)
1297 return ret;
1298
1299 pstate_table->uclk_pstate.curr.min = mclk_min;
1300 pstate_table->uclk_pstate.curr.max = mclk_max;
1301 }
1302
1303 if (socclk_min && socclk_max) {
1304 ret = smu_v14_0_set_soft_freq_limited_range(smu,
1305 SMU_SOCCLK,
1306 socclk_min,
1307 socclk_max,
1308 auto_level);
1309 if (ret)
1310 return ret;
1311
1312 pstate_table->socclk_pstate.curr.min = socclk_min;
1313 pstate_table->socclk_pstate.curr.max = socclk_max;
1314 }
1315
1316 if (vclk_min && vclk_max) {
1317 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1318 if (adev->vcn.harvest_config & (1 << i))
1319 continue;
1320 ret = smu_v14_0_set_soft_freq_limited_range(smu,
1321 i ? SMU_VCLK1 : SMU_VCLK,
1322 vclk_min,
1323 vclk_max,
1324 auto_level);
1325 if (ret)
1326 return ret;
1327 }
1328 pstate_table->vclk_pstate.curr.min = vclk_min;
1329 pstate_table->vclk_pstate.curr.max = vclk_max;
1330 }
1331
1332 if (dclk_min && dclk_max) {
1333 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1334 if (adev->vcn.harvest_config & (1 << i))
1335 continue;
1336 ret = smu_v14_0_set_soft_freq_limited_range(smu,
1337 i ? SMU_DCLK1 : SMU_DCLK,
1338 dclk_min,
1339 dclk_max,
1340 auto_level);
1341 if (ret)
1342 return ret;
1343 }
1344 pstate_table->dclk_pstate.curr.min = dclk_min;
1345 pstate_table->dclk_pstate.curr.max = dclk_max;
1346 }
1347
1348 if (fclk_min && fclk_max) {
1349 ret = smu_v14_0_set_soft_freq_limited_range(smu,
1350 SMU_FCLK,
1351 fclk_min,
1352 fclk_max,
1353 auto_level);
1354 if (ret)
1355 return ret;
1356
1357 pstate_table->fclk_pstate.curr.min = fclk_min;
1358 pstate_table->fclk_pstate.curr.max = fclk_max;
1359 }
1360
1361 return ret;
1362 }
1363
smu_v14_0_set_power_source(struct smu_context * smu,enum smu_power_src_type power_src)1364 int smu_v14_0_set_power_source(struct smu_context *smu,
1365 enum smu_power_src_type power_src)
1366 {
1367 int pwr_source;
1368
1369 pwr_source = smu_cmn_to_asic_specific_index(smu,
1370 CMN2ASIC_MAPPING_PWR,
1371 (uint32_t)power_src);
1372 if (pwr_source < 0)
1373 return -EINVAL;
1374
1375 return smu_cmn_send_smc_msg_with_param(smu,
1376 SMU_MSG_NotifyPowerSource,
1377 pwr_source,
1378 NULL);
1379 }
1380
smu_v14_0_get_dpm_freq_by_index(struct smu_context * smu,enum smu_clk_type clk_type,uint16_t level,uint32_t * value)1381 static int smu_v14_0_get_dpm_freq_by_index(struct smu_context *smu,
1382 enum smu_clk_type clk_type,
1383 uint16_t level,
1384 uint32_t *value)
1385 {
1386 int ret = 0, clk_id = 0;
1387 uint32_t param;
1388
1389 if (!value)
1390 return -EINVAL;
1391
1392 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1393 return 0;
1394
1395 clk_id = smu_cmn_to_asic_specific_index(smu,
1396 CMN2ASIC_MAPPING_CLK,
1397 clk_type);
1398 if (clk_id < 0)
1399 return clk_id;
1400
1401 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
1402
1403 ret = smu_cmn_send_smc_msg_with_param(smu,
1404 SMU_MSG_GetDpmFreqByIndex,
1405 param,
1406 value);
1407 if (ret)
1408 return ret;
1409
1410 *value = *value & 0x7fffffff;
1411
1412 return ret;
1413 }
1414
smu_v14_0_get_dpm_level_count(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * value)1415 static int smu_v14_0_get_dpm_level_count(struct smu_context *smu,
1416 enum smu_clk_type clk_type,
1417 uint32_t *value)
1418 {
1419 int ret;
1420
1421 ret = smu_v14_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
1422
1423 return ret;
1424 }
1425
smu_v14_0_get_fine_grained_status(struct smu_context * smu,enum smu_clk_type clk_type,bool * is_fine_grained_dpm)1426 static int smu_v14_0_get_fine_grained_status(struct smu_context *smu,
1427 enum smu_clk_type clk_type,
1428 bool *is_fine_grained_dpm)
1429 {
1430 int ret = 0, clk_id = 0;
1431 uint32_t param;
1432 uint32_t value;
1433
1434 if (!is_fine_grained_dpm)
1435 return -EINVAL;
1436
1437 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1438 return 0;
1439
1440 clk_id = smu_cmn_to_asic_specific_index(smu,
1441 CMN2ASIC_MAPPING_CLK,
1442 clk_type);
1443 if (clk_id < 0)
1444 return clk_id;
1445
1446 param = (uint32_t)(((clk_id & 0xffff) << 16) | 0xff);
1447
1448 ret = smu_cmn_send_smc_msg_with_param(smu,
1449 SMU_MSG_GetDpmFreqByIndex,
1450 param,
1451 &value);
1452 if (ret)
1453 return ret;
1454
1455 /*
1456 * BIT31: 1 - Fine grained DPM, 0 - Dicrete DPM
1457 * now, we un-support it
1458 */
1459 *is_fine_grained_dpm = value & 0x80000000;
1460
1461 return 0;
1462 }
1463
smu_v14_0_set_single_dpm_table(struct smu_context * smu,enum smu_clk_type clk_type,struct smu_14_0_dpm_table * single_dpm_table)1464 int smu_v14_0_set_single_dpm_table(struct smu_context *smu,
1465 enum smu_clk_type clk_type,
1466 struct smu_14_0_dpm_table *single_dpm_table)
1467 {
1468 int ret = 0;
1469 uint32_t clk;
1470 int i;
1471
1472 ret = smu_v14_0_get_dpm_level_count(smu,
1473 clk_type,
1474 &single_dpm_table->count);
1475 if (ret) {
1476 dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__);
1477 return ret;
1478 }
1479
1480 ret = smu_v14_0_get_fine_grained_status(smu,
1481 clk_type,
1482 &single_dpm_table->is_fine_grained);
1483 if (ret) {
1484 dev_err(smu->adev->dev, "[%s] failed to get fine grained status!\n", __func__);
1485 return ret;
1486 }
1487
1488 for (i = 0; i < single_dpm_table->count; i++) {
1489 ret = smu_v14_0_get_dpm_freq_by_index(smu,
1490 clk_type,
1491 i,
1492 &clk);
1493 if (ret) {
1494 dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__);
1495 return ret;
1496 }
1497
1498 single_dpm_table->dpm_levels[i].value = clk;
1499 single_dpm_table->dpm_levels[i].enabled = true;
1500
1501 if (i == 0)
1502 single_dpm_table->min = clk;
1503 else if (i == single_dpm_table->count - 1)
1504 single_dpm_table->max = clk;
1505 }
1506
1507 return 0;
1508 }
1509
smu_v14_0_set_vcn_enable(struct smu_context * smu,bool enable,int inst)1510 int smu_v14_0_set_vcn_enable(struct smu_context *smu,
1511 bool enable,
1512 int inst)
1513 {
1514 struct amdgpu_device *adev = smu->adev;
1515 int ret = 0;
1516
1517 if (adev->vcn.harvest_config & (1 << inst))
1518 return ret;
1519
1520 if (smu->is_apu) {
1521 if (inst == 0)
1522 ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
1523 SMU_MSG_PowerUpVcn0 : SMU_MSG_PowerDownVcn0,
1524 inst << 16U, NULL);
1525 else if (inst == 1)
1526 ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
1527 SMU_MSG_PowerUpVcn1 : SMU_MSG_PowerDownVcn1,
1528 inst << 16U, NULL);
1529 } else {
1530 ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
1531 SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
1532 inst << 16U, NULL);
1533 }
1534
1535 return ret;
1536 }
1537
smu_v14_0_set_jpeg_enable(struct smu_context * smu,bool enable)1538 int smu_v14_0_set_jpeg_enable(struct smu_context *smu,
1539 bool enable)
1540 {
1541 struct amdgpu_device *adev = smu->adev;
1542 int i, ret = 0;
1543
1544 for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
1545 if (adev->jpeg.harvest_config & (1 << i))
1546 continue;
1547
1548 if (smu->is_apu) {
1549 if (i == 0)
1550 ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
1551 SMU_MSG_PowerUpJpeg0 : SMU_MSG_PowerDownJpeg0,
1552 i << 16U, NULL);
1553 else if (i == 1 && amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
1554 ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
1555 SMU_MSG_PowerUpJpeg1 : SMU_MSG_PowerDownJpeg1,
1556 i << 16U, NULL);
1557 } else {
1558 ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
1559 SMU_MSG_PowerUpJpeg : SMU_MSG_PowerDownJpeg,
1560 i << 16U, NULL);
1561 }
1562
1563 if (ret)
1564 return ret;
1565 }
1566
1567 return ret;
1568 }
1569
smu_v14_0_run_btc(struct smu_context * smu)1570 int smu_v14_0_run_btc(struct smu_context *smu)
1571 {
1572 int res;
1573
1574 res = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
1575 if (res)
1576 dev_err(smu->adev->dev, "RunDcBtc failed!\n");
1577
1578 return res;
1579 }
1580
smu_v14_0_gpo_control(struct smu_context * smu,bool enablement)1581 int smu_v14_0_gpo_control(struct smu_context *smu,
1582 bool enablement)
1583 {
1584 int res;
1585
1586 res = smu_cmn_send_smc_msg_with_param(smu,
1587 SMU_MSG_AllowGpo,
1588 enablement ? 1 : 0,
1589 NULL);
1590 if (res)
1591 dev_err(smu->adev->dev, "SetGpoAllow %d failed!\n", enablement);
1592
1593 return res;
1594 }
1595
smu_v14_0_deep_sleep_control(struct smu_context * smu,bool enablement)1596 int smu_v14_0_deep_sleep_control(struct smu_context *smu,
1597 bool enablement)
1598 {
1599 struct amdgpu_device *adev = smu->adev;
1600 int ret = 0;
1601
1602 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) {
1603 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement);
1604 if (ret) {
1605 dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", enablement ? "enable" : "disable");
1606 return ret;
1607 }
1608 }
1609
1610 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) {
1611 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement);
1612 if (ret) {
1613 dev_err(adev->dev, "Failed to %s UCLK DS!\n", enablement ? "enable" : "disable");
1614 return ret;
1615 }
1616 }
1617
1618 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) {
1619 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement);
1620 if (ret) {
1621 dev_err(adev->dev, "Failed to %s FCLK DS!\n", enablement ? "enable" : "disable");
1622 return ret;
1623 }
1624 }
1625
1626 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) {
1627 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement);
1628 if (ret) {
1629 dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", enablement ? "enable" : "disable");
1630 return ret;
1631 }
1632 }
1633
1634 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) {
1635 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement);
1636 if (ret) {
1637 dev_err(adev->dev, "Failed to %s LCLK DS!\n", enablement ? "enable" : "disable");
1638 return ret;
1639 }
1640 }
1641
1642 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_VCN_BIT)) {
1643 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_VCN_BIT, enablement);
1644 if (ret) {
1645 dev_err(adev->dev, "Failed to %s VCN DS!\n", enablement ? "enable" : "disable");
1646 return ret;
1647 }
1648 }
1649
1650 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP0CLK_BIT)) {
1651 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP0CLK_BIT, enablement);
1652 if (ret) {
1653 dev_err(adev->dev, "Failed to %s MP0/MPIOCLK DS!\n", enablement ? "enable" : "disable");
1654 return ret;
1655 }
1656 }
1657
1658 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP1CLK_BIT)) {
1659 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP1CLK_BIT, enablement);
1660 if (ret) {
1661 dev_err(adev->dev, "Failed to %s MP1CLK DS!\n", enablement ? "enable" : "disable");
1662 return ret;
1663 }
1664 }
1665
1666 return ret;
1667 }
1668
smu_v14_0_gfx_ulv_control(struct smu_context * smu,bool enablement)1669 int smu_v14_0_gfx_ulv_control(struct smu_context *smu,
1670 bool enablement)
1671 {
1672 int ret = 0;
1673
1674 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT))
1675 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement);
1676
1677 return ret;
1678 }
1679
smu_v14_0_baco_set_armd3_sequence(struct smu_context * smu,enum smu_baco_seq baco_seq)1680 int smu_v14_0_baco_set_armd3_sequence(struct smu_context *smu,
1681 enum smu_baco_seq baco_seq)
1682 {
1683 struct smu_baco_context *smu_baco = &smu->smu_baco;
1684 int ret;
1685
1686 ret = smu_cmn_send_smc_msg_with_param(smu,
1687 SMU_MSG_ArmD3,
1688 baco_seq,
1689 NULL);
1690 if (ret)
1691 return ret;
1692
1693 if (baco_seq == BACO_SEQ_BAMACO ||
1694 baco_seq == BACO_SEQ_BACO)
1695 smu_baco->state = SMU_BACO_STATE_ENTER;
1696 else
1697 smu_baco->state = SMU_BACO_STATE_EXIT;
1698
1699 return 0;
1700 }
1701
smu_v14_0_get_bamaco_support(struct smu_context * smu)1702 int smu_v14_0_get_bamaco_support(struct smu_context *smu)
1703 {
1704 struct smu_baco_context *smu_baco = &smu->smu_baco;
1705 int bamaco_support = 0;
1706
1707 if (amdgpu_sriov_vf(smu->adev) ||
1708 !smu_baco->platform_support)
1709 return 0;
1710
1711 if (smu_baco->maco_support)
1712 bamaco_support |= MACO_SUPPORT;
1713
1714 /* return true if ASIC is in BACO state already */
1715 if (smu_v14_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
1716 return (bamaco_support |= BACO_SUPPORT);
1717
1718 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
1719 !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
1720 return 0;
1721
1722 return (bamaco_support |= BACO_SUPPORT);
1723 }
1724
smu_v14_0_baco_get_state(struct smu_context * smu)1725 enum smu_baco_state smu_v14_0_baco_get_state(struct smu_context *smu)
1726 {
1727 struct smu_baco_context *smu_baco = &smu->smu_baco;
1728
1729 return smu_baco->state;
1730 }
1731
smu_v14_0_baco_set_state(struct smu_context * smu,enum smu_baco_state state)1732 int smu_v14_0_baco_set_state(struct smu_context *smu,
1733 enum smu_baco_state state)
1734 {
1735 struct smu_baco_context *smu_baco = &smu->smu_baco;
1736 struct amdgpu_device *adev = smu->adev;
1737 int ret = 0;
1738
1739 if (smu_v14_0_baco_get_state(smu) == state)
1740 return 0;
1741
1742 if (state == SMU_BACO_STATE_ENTER) {
1743 ret = smu_cmn_send_smc_msg_with_param(smu,
1744 SMU_MSG_EnterBaco,
1745 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ?
1746 BACO_SEQ_BAMACO : BACO_SEQ_BACO,
1747 NULL);
1748 } else {
1749 ret = smu_cmn_send_smc_msg(smu,
1750 SMU_MSG_ExitBaco,
1751 NULL);
1752 if (ret)
1753 return ret;
1754
1755 /* clear vbios scratch 6 and 7 for coming asic reinit */
1756 WREG32(adev->bios_scratch_reg_offset + 6, 0);
1757 WREG32(adev->bios_scratch_reg_offset + 7, 0);
1758 }
1759
1760 if (!ret)
1761 smu_baco->state = state;
1762
1763 return ret;
1764 }
1765
smu_v14_0_baco_enter(struct smu_context * smu)1766 int smu_v14_0_baco_enter(struct smu_context *smu)
1767 {
1768 int ret = 0;
1769
1770 ret = smu_v14_0_baco_set_state(smu,
1771 SMU_BACO_STATE_ENTER);
1772 if (ret)
1773 return ret;
1774
1775 msleep(10);
1776
1777 return ret;
1778 }
1779
smu_v14_0_baco_exit(struct smu_context * smu)1780 int smu_v14_0_baco_exit(struct smu_context *smu)
1781 {
1782 return smu_v14_0_baco_set_state(smu,
1783 SMU_BACO_STATE_EXIT);
1784 }
1785
smu_v14_0_set_gfx_power_up_by_imu(struct smu_context * smu)1786 int smu_v14_0_set_gfx_power_up_by_imu(struct smu_context *smu)
1787 {
1788 uint16_t index;
1789 struct amdgpu_device *adev = smu->adev;
1790
1791 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1792 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableGfxImu,
1793 ENABLE_IMU_ARG_GFXOFF_ENABLE, NULL);
1794 }
1795
1796 index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
1797 SMU_MSG_EnableGfxImu);
1798 return smu_cmn_send_msg_without_waiting(smu, index, ENABLE_IMU_ARG_GFXOFF_ENABLE);
1799 }
1800
smu_v14_0_set_default_dpm_tables(struct smu_context * smu)1801 int smu_v14_0_set_default_dpm_tables(struct smu_context *smu)
1802 {
1803 struct smu_table_context *smu_table = &smu->smu_table;
1804
1805 return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0,
1806 smu_table->clocks_table, false);
1807 }
1808
smu_v14_0_od_edit_dpm_table(struct smu_context * smu,enum PP_OD_DPM_TABLE_COMMAND type,long input[],uint32_t size)1809 int smu_v14_0_od_edit_dpm_table(struct smu_context *smu,
1810 enum PP_OD_DPM_TABLE_COMMAND type,
1811 long input[], uint32_t size)
1812 {
1813 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1814 int ret = 0;
1815
1816 /* Only allowed in manual mode */
1817 if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1818 return -EINVAL;
1819
1820 switch (type) {
1821 case PP_OD_EDIT_SCLK_VDDC_TABLE:
1822 if (size != 2) {
1823 dev_err(smu->adev->dev, "Input parameter number not correct\n");
1824 return -EINVAL;
1825 }
1826
1827 if (input[0] == 0) {
1828 if (input[1] < smu->gfx_default_hard_min_freq) {
1829 dev_warn(smu->adev->dev,
1830 "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
1831 input[1], smu->gfx_default_hard_min_freq);
1832 return -EINVAL;
1833 }
1834 smu->gfx_actual_hard_min_freq = input[1];
1835 } else if (input[0] == 1) {
1836 if (input[1] > smu->gfx_default_soft_max_freq) {
1837 dev_warn(smu->adev->dev,
1838 "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
1839 input[1], smu->gfx_default_soft_max_freq);
1840 return -EINVAL;
1841 }
1842 smu->gfx_actual_soft_max_freq = input[1];
1843 } else {
1844 return -EINVAL;
1845 }
1846 break;
1847 case PP_OD_RESTORE_DEFAULT_TABLE:
1848 if (size != 0) {
1849 dev_err(smu->adev->dev, "Input parameter number not correct\n");
1850 return -EINVAL;
1851 }
1852 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1853 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
1854 break;
1855 case PP_OD_COMMIT_DPM_TABLE:
1856 if (size != 0) {
1857 dev_err(smu->adev->dev, "Input parameter number not correct\n");
1858 return -EINVAL;
1859 }
1860 if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
1861 dev_err(smu->adev->dev,
1862 "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
1863 smu->gfx_actual_hard_min_freq,
1864 smu->gfx_actual_soft_max_freq);
1865 return -EINVAL;
1866 }
1867
1868 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
1869 smu->gfx_actual_hard_min_freq,
1870 NULL);
1871 if (ret) {
1872 dev_err(smu->adev->dev, "Set hard min sclk failed!");
1873 return ret;
1874 }
1875
1876 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
1877 smu->gfx_actual_soft_max_freq,
1878 NULL);
1879 if (ret) {
1880 dev_err(smu->adev->dev, "Set soft max sclk failed!");
1881 return ret;
1882 }
1883 break;
1884 default:
1885 return -ENOSYS;
1886 }
1887
1888 return ret;
1889 }
1890
smu_v14_0_allow_ih_interrupt(struct smu_context * smu)1891 static int smu_v14_0_allow_ih_interrupt(struct smu_context *smu)
1892 {
1893 return smu_cmn_send_smc_msg(smu,
1894 SMU_MSG_AllowIHHostInterrupt,
1895 NULL);
1896 }
1897
smu_v14_0_enable_thermal_alert(struct smu_context * smu)1898 int smu_v14_0_enable_thermal_alert(struct smu_context *smu)
1899 {
1900 int ret = 0;
1901
1902 if (!smu->irq_source.num_types)
1903 return 0;
1904
1905 ret = amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
1906 if (ret)
1907 return ret;
1908
1909 return smu_v14_0_allow_ih_interrupt(smu);
1910 }
1911
smu_v14_0_disable_thermal_alert(struct smu_context * smu)1912 int smu_v14_0_disable_thermal_alert(struct smu_context *smu)
1913 {
1914 if (!smu->irq_source.num_types)
1915 return 0;
1916
1917 return amdgpu_irq_put(smu->adev, &smu->irq_source, 0);
1918 }
1919