1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * SPDX-License-Identifier: MIT
5 */
6
7 /* The GPU load is measured as follows.
8 *
9 * There is a thread which samples the GRBM_STATUS register at a certain
10 * frequency and the "busy" or "idle" counter is incremented based on
11 * whether the GUI_ACTIVE bit is set or not.
12 *
13 * Then, the user can sample the counters twice and calculate the average
14 * GPU load between the two samples.
15 */
16
17 #include "si_pipe.h"
18 #include "si_query.h"
19 #include "util/os_time.h"
20
21 /* For good accuracy at 1000 fps or lower. This will be inaccurate for higher
22 * fps (there are too few samples per frame). */
23 #define SAMPLES_PER_SEC 10000
24
25 #define GRBM_STATUS 0x8010
26 #define TA_BUSY(x) (((x) >> 14) & 0x1)
27 #define GDS_BUSY(x) (((x) >> 15) & 0x1)
28 #define VGT_BUSY(x) (((x) >> 17) & 0x1)
29 #define IA_BUSY(x) (((x) >> 19) & 0x1)
30 #define SX_BUSY(x) (((x) >> 20) & 0x1)
31 #define WD_BUSY(x) (((x) >> 21) & 0x1)
32 #define SPI_BUSY(x) (((x) >> 22) & 0x1)
33 #define BCI_BUSY(x) (((x) >> 23) & 0x1)
34 #define SC_BUSY(x) (((x) >> 24) & 0x1)
35 #define PA_BUSY(x) (((x) >> 25) & 0x1)
36 #define DB_BUSY(x) (((x) >> 26) & 0x1)
37 #define CP_BUSY(x) (((x) >> 29) & 0x1)
38 #define CB_BUSY(x) (((x) >> 30) & 0x1)
39 #define GUI_ACTIVE(x) (((x) >> 31) & 0x1)
40
41 #define SRBM_STATUS2 0x0e4c
42 #define SDMA_BUSY(x) (((x) >> 5) & 0x1)
43
44 #define CP_STAT 0x8680
45 #define PFP_BUSY(x) (((x) >> 15) & 0x1)
46 #define MEQ_BUSY(x) (((x) >> 16) & 0x1)
47 #define ME_BUSY(x) (((x) >> 17) & 0x1)
48 #define SURFACE_SYNC_BUSY(x) (((x) >> 21) & 0x1)
49 #define DMA_BUSY(x) (((x) >> 22) & 0x1)
50 #define SCRATCH_RAM_BUSY(x) (((x) >> 24) & 0x1)
51
52 #define IDENTITY(x) x
53
54 #define UPDATE_COUNTER(field, mask) \
55 do { \
56 if (mask(value)) \
57 p_atomic_inc(&counters->named.field.busy); \
58 else \
59 p_atomic_inc(&counters->named.field.idle); \
60 } while (0)
61
si_update_mmio_counters(struct si_screen * sscreen,union si_mmio_counters * counters)62 static void si_update_mmio_counters(struct si_screen *sscreen, union si_mmio_counters *counters)
63 {
64 uint32_t value = 0;
65 bool gui_busy, sdma_busy = false;
66
67 /* GRBM_STATUS */
68 sscreen->ws->read_registers(sscreen->ws, GRBM_STATUS, 1, &value);
69
70 UPDATE_COUNTER(ta, TA_BUSY);
71 if (sscreen->info.gfx_level < GFX12)
72 UPDATE_COUNTER(gds, GDS_BUSY);
73 UPDATE_COUNTER(vgt, VGT_BUSY);
74 UPDATE_COUNTER(ia, IA_BUSY);
75 UPDATE_COUNTER(sx, SX_BUSY);
76 UPDATE_COUNTER(wd, WD_BUSY);
77 UPDATE_COUNTER(spi, SPI_BUSY);
78 UPDATE_COUNTER(bci, BCI_BUSY);
79 UPDATE_COUNTER(sc, SC_BUSY);
80 UPDATE_COUNTER(pa, PA_BUSY);
81 UPDATE_COUNTER(db, DB_BUSY);
82 UPDATE_COUNTER(cp, CP_BUSY);
83 UPDATE_COUNTER(cb, CB_BUSY);
84 UPDATE_COUNTER(gui, GUI_ACTIVE);
85 gui_busy = GUI_ACTIVE(value);
86
87 if (sscreen->info.gfx_level == GFX7 || sscreen->info.gfx_level == GFX8) {
88 /* SRBM_STATUS2 */
89 sscreen->ws->read_registers(sscreen->ws, SRBM_STATUS2, 1, &value);
90
91 UPDATE_COUNTER(sdma, SDMA_BUSY);
92 sdma_busy = SDMA_BUSY(value);
93 }
94
95 if (sscreen->info.gfx_level >= GFX8) {
96 /* CP_STAT */
97 sscreen->ws->read_registers(sscreen->ws, CP_STAT, 1, &value);
98
99 UPDATE_COUNTER(pfp, PFP_BUSY);
100 UPDATE_COUNTER(meq, MEQ_BUSY);
101 UPDATE_COUNTER(me, ME_BUSY);
102 UPDATE_COUNTER(surf_sync, SURFACE_SYNC_BUSY);
103 UPDATE_COUNTER(cp_dma, DMA_BUSY);
104 UPDATE_COUNTER(scratch_ram, SCRATCH_RAM_BUSY);
105 }
106
107 value = gui_busy || sdma_busy;
108 UPDATE_COUNTER(gpu, IDENTITY);
109 }
110
111 #undef UPDATE_COUNTER
112
si_gpu_load_thread(void * param)113 static int si_gpu_load_thread(void *param)
114 {
115 struct si_screen *sscreen = (struct si_screen *)param;
116 const int period_us = 1000000 / SAMPLES_PER_SEC;
117 int sleep_us = period_us;
118 int64_t cur_time, last_time = os_time_get();
119
120 while (!p_atomic_read(&sscreen->gpu_load_stop_thread)) {
121 if (sleep_us)
122 os_time_sleep(sleep_us);
123
124 /* Make sure we sleep the ideal amount of time to match
125 * the expected frequency. */
126 cur_time = os_time_get();
127
128 if (os_time_timeout(last_time, last_time + period_us, cur_time))
129 sleep_us = MAX2(sleep_us - 1, 1);
130 else
131 sleep_us += 1;
132
133 /*printf("Hz: %.1f\n", 1000000.0 / (cur_time - last_time));*/
134 last_time = cur_time;
135
136 /* Update the counters. */
137 si_update_mmio_counters(sscreen, &sscreen->mmio_counters);
138 }
139 p_atomic_dec(&sscreen->gpu_load_stop_thread);
140 return 0;
141 }
142
si_gpu_load_kill_thread(struct si_screen * sscreen)143 void si_gpu_load_kill_thread(struct si_screen *sscreen)
144 {
145 if (!sscreen->gpu_load_thread_created)
146 return;
147
148 p_atomic_inc(&sscreen->gpu_load_stop_thread);
149 thrd_join(sscreen->gpu_load_thread, NULL);
150 sscreen->gpu_load_thread_created = false;
151 }
152
si_read_mmio_counter(struct si_screen * sscreen,unsigned busy_index)153 static uint64_t si_read_mmio_counter(struct si_screen *sscreen, unsigned busy_index)
154 {
155 /* Start the thread if needed. */
156 if (!sscreen->gpu_load_thread_created) {
157 simple_mtx_lock(&sscreen->gpu_load_mutex);
158 /* Check again inside the mutex. */
159 if (!sscreen->gpu_load_thread_created) {
160 if (thrd_success == u_thread_create(&sscreen->gpu_load_thread, si_gpu_load_thread, sscreen)) {
161 sscreen->gpu_load_thread_created = true;
162 }
163 }
164 simple_mtx_unlock(&sscreen->gpu_load_mutex);
165 }
166
167 unsigned busy = p_atomic_read(&sscreen->mmio_counters.array[busy_index]);
168 unsigned idle = p_atomic_read(&sscreen->mmio_counters.array[busy_index + 1]);
169
170 return busy | ((uint64_t)idle << 32);
171 }
172
si_end_mmio_counter(struct si_screen * sscreen,uint64_t begin,unsigned busy_index)173 static unsigned si_end_mmio_counter(struct si_screen *sscreen, uint64_t begin, unsigned busy_index)
174 {
175 uint64_t end = si_read_mmio_counter(sscreen, busy_index);
176 unsigned busy = (end & 0xffffffff) - (begin & 0xffffffff);
177 unsigned idle = (end >> 32) - (begin >> 32);
178
179 /* Calculate the % of time the busy counter was being incremented.
180 *
181 * If no counters were incremented, return the current counter status.
182 * It's for the case when the load is queried faster than
183 * the counters are updated.
184 */
185 if (idle || busy) {
186 return busy * 100 / (busy + idle);
187 } else {
188 union si_mmio_counters counters;
189
190 memset(&counters, 0, sizeof(counters));
191 si_update_mmio_counters(sscreen, &counters);
192 return counters.array[busy_index] ? 100 : 0;
193 }
194 }
195
196 #define BUSY_INDEX(sscreen, field) \
197 (&sscreen->mmio_counters.named.field.busy - sscreen->mmio_counters.array)
198
busy_index_from_type(struct si_screen * sscreen,unsigned type)199 static unsigned busy_index_from_type(struct si_screen *sscreen, unsigned type)
200 {
201 switch (type) {
202 case SI_QUERY_GPU_LOAD:
203 return BUSY_INDEX(sscreen, gpu);
204 case SI_QUERY_GPU_SHADERS_BUSY:
205 return BUSY_INDEX(sscreen, spi);
206 case SI_QUERY_GPU_TA_BUSY:
207 return BUSY_INDEX(sscreen, ta);
208 case SI_QUERY_GPU_GDS_BUSY:
209 return BUSY_INDEX(sscreen, gds);
210 case SI_QUERY_GPU_VGT_BUSY:
211 return BUSY_INDEX(sscreen, vgt);
212 case SI_QUERY_GPU_IA_BUSY:
213 return BUSY_INDEX(sscreen, ia);
214 case SI_QUERY_GPU_SX_BUSY:
215 return BUSY_INDEX(sscreen, sx);
216 case SI_QUERY_GPU_WD_BUSY:
217 return BUSY_INDEX(sscreen, wd);
218 case SI_QUERY_GPU_BCI_BUSY:
219 return BUSY_INDEX(sscreen, bci);
220 case SI_QUERY_GPU_SC_BUSY:
221 return BUSY_INDEX(sscreen, sc);
222 case SI_QUERY_GPU_PA_BUSY:
223 return BUSY_INDEX(sscreen, pa);
224 case SI_QUERY_GPU_DB_BUSY:
225 return BUSY_INDEX(sscreen, db);
226 case SI_QUERY_GPU_CP_BUSY:
227 return BUSY_INDEX(sscreen, cp);
228 case SI_QUERY_GPU_CB_BUSY:
229 return BUSY_INDEX(sscreen, cb);
230 case SI_QUERY_GPU_SDMA_BUSY:
231 return BUSY_INDEX(sscreen, sdma);
232 case SI_QUERY_GPU_PFP_BUSY:
233 return BUSY_INDEX(sscreen, pfp);
234 case SI_QUERY_GPU_MEQ_BUSY:
235 return BUSY_INDEX(sscreen, meq);
236 case SI_QUERY_GPU_ME_BUSY:
237 return BUSY_INDEX(sscreen, me);
238 case SI_QUERY_GPU_SURF_SYNC_BUSY:
239 return BUSY_INDEX(sscreen, surf_sync);
240 case SI_QUERY_GPU_CP_DMA_BUSY:
241 return BUSY_INDEX(sscreen, cp_dma);
242 case SI_QUERY_GPU_SCRATCH_RAM_BUSY:
243 return BUSY_INDEX(sscreen, scratch_ram);
244 default:
245 unreachable("invalid query type");
246 }
247 }
248
si_begin_counter(struct si_screen * sscreen,unsigned type)249 uint64_t si_begin_counter(struct si_screen *sscreen, unsigned type)
250 {
251 unsigned busy_index = busy_index_from_type(sscreen, type);
252 return si_read_mmio_counter(sscreen, busy_index);
253 }
254
si_end_counter(struct si_screen * sscreen,unsigned type,uint64_t begin)255 unsigned si_end_counter(struct si_screen *sscreen, unsigned type, uint64_t begin)
256 {
257 unsigned busy_index = busy_index_from_type(sscreen, type);
258 return si_end_mmio_counter(sscreen, begin, busy_index);
259 }
260