xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/r600/r600_gpu_load.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  * Authors: Marek Olšák <[email protected]>
4  * SPDX-License-Identifier: MIT
5  */
6 
7 /* The GPU load is measured as follows.
8  *
9  * There is a thread which samples the GRBM_STATUS register at a certain
10  * frequency and the "busy" or "idle" counter is incremented based on
11  * whether the GUI_ACTIVE bit is set or not.
12  *
13  * Then, the user can sample the counters twice and calculate the average
14  * GPU load between the two samples.
15  */
16 
17 #include "r600_pipe_common.h"
18 #include "r600_query.h"
19 #include "util/os_time.h"
20 
21 /* For good accuracy at 1000 fps or lower. This will be inaccurate for higher
22  * fps (there are too few samples per frame). */
23 #define SAMPLES_PER_SEC 10000
24 
25 #define GRBM_STATUS		0x8010
26 #define TA_BUSY(x)		(((x) >> 14) & 0x1)
27 #define GDS_BUSY(x)		(((x) >> 15) & 0x1)
28 #define VGT_BUSY(x)		(((x) >> 17) & 0x1)
29 #define IA_BUSY(x)		(((x) >> 19) & 0x1)
30 #define SX_BUSY(x)		(((x) >> 20) & 0x1)
31 #define WD_BUSY(x)		(((x) >> 21) & 0x1)
32 #define SPI_BUSY(x)		(((x) >> 22) & 0x1)
33 #define BCI_BUSY(x)		(((x) >> 23) & 0x1)
34 #define SC_BUSY(x)		(((x) >> 24) & 0x1)
35 #define PA_BUSY(x)		(((x) >> 25) & 0x1)
36 #define DB_BUSY(x)		(((x) >> 26) & 0x1)
37 #define CP_BUSY(x)		(((x) >> 29) & 0x1)
38 #define CB_BUSY(x)		(((x) >> 30) & 0x1)
39 #define GUI_ACTIVE(x)		(((x) >> 31) & 0x1)
40 
41 #define SRBM_STATUS2		0x0e4c
42 #define SDMA_BUSY(x)		(((x) >> 5) & 0x1)
43 
44 #define CP_STAT                 0x8680
45 #define PFP_BUSY(x)		(((x) >> 15) & 0x1)
46 #define MEQ_BUSY(x)		(((x) >> 16) & 0x1)
47 #define ME_BUSY(x)		(((x) >> 17) & 0x1)
48 #define SURFACE_SYNC_BUSY(x)	(((x) >> 21) & 0x1)
49 #define DMA_BUSY(x)		(((x) >> 22) & 0x1)
50 #define SCRATCH_RAM_BUSY(x)	(((x) >> 24) & 0x1)
51 
52 #define IDENTITY(x) x
53 
54 #define UPDATE_COUNTER(field, mask)					\
55 	do {								\
56 		if (mask(value))					\
57 			p_atomic_inc(&counters->named.field.busy);	\
58 		else							\
59 			p_atomic_inc(&counters->named.field.idle);	\
60 	} while (0)
61 
r600_update_mmio_counters(struct r600_common_screen * rscreen,union r600_mmio_counters * counters)62 static void r600_update_mmio_counters(struct r600_common_screen *rscreen,
63 				      union r600_mmio_counters *counters)
64 {
65 	uint32_t value = 0;
66 	bool gui_busy, sdma_busy = false;
67 
68 	/* GRBM_STATUS */
69 	rscreen->ws->read_registers(rscreen->ws, GRBM_STATUS, 1, &value);
70 
71 	UPDATE_COUNTER(ta, TA_BUSY);
72 	UPDATE_COUNTER(gds, GDS_BUSY);
73 	UPDATE_COUNTER(vgt, VGT_BUSY);
74 	UPDATE_COUNTER(ia, IA_BUSY);
75 	UPDATE_COUNTER(sx, SX_BUSY);
76 	UPDATE_COUNTER(wd, WD_BUSY);
77 	UPDATE_COUNTER(spi, SPI_BUSY);
78 	UPDATE_COUNTER(bci, BCI_BUSY);
79 	UPDATE_COUNTER(sc, SC_BUSY);
80 	UPDATE_COUNTER(pa, PA_BUSY);
81 	UPDATE_COUNTER(db, DB_BUSY);
82 	UPDATE_COUNTER(cp, CP_BUSY);
83 	UPDATE_COUNTER(cb, CB_BUSY);
84 	UPDATE_COUNTER(gui, GUI_ACTIVE);
85 	gui_busy = GUI_ACTIVE(value);
86 
87 	value = gui_busy || sdma_busy;
88 	UPDATE_COUNTER(gpu, IDENTITY);
89 }
90 
91 #undef UPDATE_COUNTER
92 
93 static int
r600_gpu_load_thread(void * param)94 r600_gpu_load_thread(void *param)
95 {
96 	struct r600_common_screen *rscreen = (struct r600_common_screen*)param;
97 	const int period_us = 1000000 / SAMPLES_PER_SEC;
98 	int sleep_us = period_us;
99 	int64_t cur_time, last_time = os_time_get();
100 
101 	while (!p_atomic_read(&rscreen->gpu_load_stop_thread)) {
102 		if (sleep_us)
103 			os_time_sleep(sleep_us);
104 
105 		/* Make sure we sleep the ideal amount of time to match
106 		 * the expected frequency. */
107 		cur_time = os_time_get();
108 
109 		if (os_time_timeout(last_time, last_time + period_us,
110 				    cur_time))
111 			sleep_us = MAX2(sleep_us - 1, 1);
112 		else
113 			sleep_us += 1;
114 
115 		/*printf("Hz: %.1f\n", 1000000.0 / (cur_time - last_time));*/
116 		last_time = cur_time;
117 
118 		/* Update the counters. */
119 		r600_update_mmio_counters(rscreen, &rscreen->mmio_counters);
120 	}
121 	p_atomic_dec(&rscreen->gpu_load_stop_thread);
122 	return 0;
123 }
124 
r600_gpu_load_kill_thread(struct r600_common_screen * rscreen)125 void r600_gpu_load_kill_thread(struct r600_common_screen *rscreen)
126 {
127 	if (!rscreen->gpu_load_thread_created)
128 		return;
129 
130 	p_atomic_inc(&rscreen->gpu_load_stop_thread);
131 	thrd_join(rscreen->gpu_load_thread, NULL);
132 	rscreen->gpu_load_thread_created = false;
133 }
134 
r600_read_mmio_counter(struct r600_common_screen * rscreen,unsigned busy_index)135 static uint64_t r600_read_mmio_counter(struct r600_common_screen *rscreen,
136 				       unsigned busy_index)
137 {
138 	/* Start the thread if needed. */
139 	if (!rscreen->gpu_load_thread_created) {
140 		mtx_lock(&rscreen->gpu_load_mutex);
141 		/* Check again inside the mutex. */
142 		if (!rscreen->gpu_load_thread_created) {
143 			int ret = u_thread_create(&rscreen->gpu_load_thread, r600_gpu_load_thread, rscreen);
144 			if (ret == thrd_success) {
145 				rscreen->gpu_load_thread_created = true;
146 			}
147 		}
148 		mtx_unlock(&rscreen->gpu_load_mutex);
149 	}
150 
151 	unsigned busy = p_atomic_read(&rscreen->mmio_counters.array[busy_index]);
152 	unsigned idle = p_atomic_read(&rscreen->mmio_counters.array[busy_index + 1]);
153 
154 	return busy | ((uint64_t)idle << 32);
155 }
156 
r600_end_mmio_counter(struct r600_common_screen * rscreen,uint64_t begin,unsigned busy_index)157 static unsigned r600_end_mmio_counter(struct r600_common_screen *rscreen,
158 				      uint64_t begin, unsigned busy_index)
159 {
160 	uint64_t end = r600_read_mmio_counter(rscreen, busy_index);
161 	unsigned busy = (end & 0xffffffff) - (begin & 0xffffffff);
162 	unsigned idle = (end >> 32) - (begin >> 32);
163 
164 	/* Calculate the % of time the busy counter was being incremented.
165 	 *
166 	 * If no counters were incremented, return the current counter status.
167 	 * It's for the case when the load is queried faster than
168 	 * the counters are updated.
169 	 */
170 	if (idle || busy) {
171 		return busy*100 / (busy + idle);
172 	} else {
173 		union r600_mmio_counters counters;
174 
175 		memset(&counters, 0, sizeof(counters));
176 		r600_update_mmio_counters(rscreen, &counters);
177 		return counters.array[busy_index] ? 100 : 0;
178 	}
179 }
180 
181 #define BUSY_INDEX(rscreen, field) (&rscreen->mmio_counters.named.field.busy - \
182 				    rscreen->mmio_counters.array)
183 
busy_index_from_type(struct r600_common_screen * rscreen,unsigned type)184 static unsigned busy_index_from_type(struct r600_common_screen *rscreen,
185 				     unsigned type)
186 {
187 	switch (type) {
188 	case R600_QUERY_GPU_LOAD:
189 		return BUSY_INDEX(rscreen, gpu);
190 	case R600_QUERY_GPU_SHADERS_BUSY:
191 		return BUSY_INDEX(rscreen, spi);
192 	case R600_QUERY_GPU_TA_BUSY:
193 		return BUSY_INDEX(rscreen, ta);
194 	case R600_QUERY_GPU_GDS_BUSY:
195 		return BUSY_INDEX(rscreen, gds);
196 	case R600_QUERY_GPU_VGT_BUSY:
197 		return BUSY_INDEX(rscreen, vgt);
198 	case R600_QUERY_GPU_IA_BUSY:
199 		return BUSY_INDEX(rscreen, ia);
200 	case R600_QUERY_GPU_SX_BUSY:
201 		return BUSY_INDEX(rscreen, sx);
202 	case R600_QUERY_GPU_WD_BUSY:
203 		return BUSY_INDEX(rscreen, wd);
204 	case R600_QUERY_GPU_BCI_BUSY:
205 		return BUSY_INDEX(rscreen, bci);
206 	case R600_QUERY_GPU_SC_BUSY:
207 		return BUSY_INDEX(rscreen, sc);
208 	case R600_QUERY_GPU_PA_BUSY:
209 		return BUSY_INDEX(rscreen, pa);
210 	case R600_QUERY_GPU_DB_BUSY:
211 		return BUSY_INDEX(rscreen, db);
212 	case R600_QUERY_GPU_CP_BUSY:
213 		return BUSY_INDEX(rscreen, cp);
214 	case R600_QUERY_GPU_CB_BUSY:
215 		return BUSY_INDEX(rscreen, cb);
216 	case R600_QUERY_GPU_SDMA_BUSY:
217 		return BUSY_INDEX(rscreen, sdma);
218 	case R600_QUERY_GPU_PFP_BUSY:
219 		return BUSY_INDEX(rscreen, pfp);
220 	case R600_QUERY_GPU_MEQ_BUSY:
221 		return BUSY_INDEX(rscreen, meq);
222 	case R600_QUERY_GPU_ME_BUSY:
223 		return BUSY_INDEX(rscreen, me);
224 	case R600_QUERY_GPU_SURF_SYNC_BUSY:
225 		return BUSY_INDEX(rscreen, surf_sync);
226 	case R600_QUERY_GPU_CP_DMA_BUSY:
227 		return BUSY_INDEX(rscreen, cp_dma);
228 	case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
229 		return BUSY_INDEX(rscreen, scratch_ram);
230 	default:
231 		unreachable("invalid query type");
232 	}
233 }
234 
r600_begin_counter(struct r600_common_screen * rscreen,unsigned type)235 uint64_t r600_begin_counter(struct r600_common_screen *rscreen, unsigned type)
236 {
237 	unsigned busy_index = busy_index_from_type(rscreen, type);
238 	return r600_read_mmio_counter(rscreen, busy_index);
239 }
240 
r600_end_counter(struct r600_common_screen * rscreen,unsigned type,uint64_t begin)241 unsigned r600_end_counter(struct r600_common_screen *rscreen, unsigned type,
242 			  uint64_t begin)
243 {
244 	unsigned busy_index = busy_index_from_type(rscreen, type);
245 	return r600_end_mmio_counter(rscreen, begin, busy_index);
246 }
247