xref: /aosp_15_r20/external/mesa3d/src/intel/perf/intel_perf_query.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2019 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <unistd.h>
25 #include <poll.h>
26 
27 #include "common/intel_gem.h"
28 
29 #include "dev/intel_debug.h"
30 #include "dev/intel_device_info.h"
31 
32 #include "perf/intel_perf.h"
33 #include "perf/intel_perf_mdapi.h"
34 #include "perf/intel_perf_private.h"
35 #include "perf/intel_perf_query.h"
36 #include "perf/intel_perf_regs.h"
37 
38 #include "util/compiler.h"
39 #include "util/u_math.h"
40 
41 #define FILE_DEBUG_FLAG DEBUG_PERFMON
42 
43 #define MI_RPC_BO_SIZE                (4096)
44 #define MI_FREQ_OFFSET_BYTES          (256)
45 #define MI_PERF_COUNTERS_OFFSET_BYTES (260)
46 
47 #define MAP_READ  (1 << 0)
48 #define MAP_WRITE (1 << 1)
49 
50 /**
51  * Periodic OA samples are read() into these buffer structures via the
52  * i915 perf kernel interface and appended to the
53  * perf_ctx->sample_buffers linked list. When we process the
54  * results of an OA metrics query we need to consider all the periodic
55  * samples between the Begin and End MI_REPORT_PERF_COUNT command
56  * markers.
57  *
58  * 'Periodic' is a simplification as there are other automatic reports
59  * written by the hardware also buffered here.
60  *
61  * Considering three queries, A, B and C:
62  *
63  *  Time ---->
64  *                ________________A_________________
65  *                |                                |
66  *                | ________B_________ _____C___________
67  *                | |                | |           |   |
68  *
69  * And an illustration of sample buffers read over this time frame:
70  * [HEAD ][     ][     ][     ][     ][     ][     ][     ][TAIL ]
71  *
72  * These nodes may hold samples for query A:
73  * [     ][     ][  A  ][  A  ][  A  ][  A  ][  A  ][     ][     ]
74  *
75  * These nodes may hold samples for query B:
76  * [     ][     ][  B  ][  B  ][  B  ][     ][     ][     ][     ]
77  *
78  * These nodes may hold samples for query C:
79  * [     ][     ][     ][     ][     ][  C  ][  C  ][  C  ][     ]
80  *
81  * The illustration assumes we have an even distribution of periodic
82  * samples so all nodes have the same size plotted against time:
83  *
84  * Note, to simplify code, the list is never empty.
85  *
86  * With overlapping queries we can see that periodic OA reports may
87  * relate to multiple queries and care needs to be take to keep
88  * track of sample buffers until there are no queries that might
89  * depend on their contents.
90  *
91  * We use a node ref counting system where a reference ensures that a
92  * node and all following nodes can't be freed/recycled until the
93  * reference drops to zero.
94  *
95  * E.g. with a ref of one here:
96  * [  0  ][  0  ][  1  ][  0  ][  0  ][  0  ][  0  ][  0  ][  0  ]
97  *
98  * These nodes could be freed or recycled ("reaped"):
99  * [  0  ][  0  ]
100  *
101  * These must be preserved until the leading ref drops to zero:
102  *               [  1  ][  0  ][  0  ][  0  ][  0  ][  0  ][  0  ]
103  *
104  * When a query starts we take a reference on the current tail of
105  * the list, knowing that no already-buffered samples can possibly
106  * relate to the newly-started query. A pointer to this node is
107  * also saved in the query object's ->oa.samples_head.
108  *
109  * E.g. starting query A while there are two nodes in .sample_buffers:
110  *                ________________A________
111  *                |
112  *
113  * [  0  ][  1  ]
114  *           ^_______ Add a reference and store pointer to node in
115  *                    A->oa.samples_head
116  *
117  * Moving forward to when the B query starts with no new buffer nodes:
118  * (for reference, i915 perf reads() are only done when queries finish)
119  *                ________________A_______
120  *                | ________B___
121  *                | |
122  *
123  * [  0  ][  2  ]
124  *           ^_______ Add a reference and store pointer to
125  *                    node in B->oa.samples_head
126  *
127  * Once a query is finished, after an OA query has become 'Ready',
128  * once the End OA report has landed and after we we have processed
129  * all the intermediate periodic samples then we drop the
130  * ->oa.samples_head reference we took at the start.
131  *
132  * So when the B query has finished we have:
133  *                ________________A________
134  *                | ______B___________
135  *                | |                |
136  * [  0  ][  1  ][  0  ][  0  ][  0  ]
137  *           ^_______ Drop B->oa.samples_head reference
138  *
139  * We still can't free these due to the A->oa.samples_head ref:
140  *        [  1  ][  0  ][  0  ][  0  ]
141  *
142  * When the A query finishes: (note there's a new ref for C's samples_head)
143  *                ________________A_________________
144  *                |                                |
145  *                |                    _____C_________
146  *                |                    |           |
147  * [  0  ][  0  ][  0  ][  0  ][  1  ][  0  ][  0  ]
148  *           ^_______ Drop A->oa.samples_head reference
149  *
150  * And we can now reap these nodes up to the C->oa.samples_head:
151  * [  X  ][  X  ][  X  ][  X  ]
152  *                  keeping -> [  1  ][  0  ][  0  ]
153  *
154  * We reap old sample buffers each time we finish processing an OA
155  * query by iterating the sample_buffers list from the head until we
156  * find a referenced node and stop.
157  *
158  * Reaped buffers move to a perfquery.free_sample_buffers list and
159  * when we come to read() we first look to recycle a buffer from the
160  * free_sample_buffers list before allocating a new buffer.
161  */
162 struct oa_sample_buf {
163    struct exec_node link;
164    int refcount;
165    int len;
166    uint32_t last_timestamp;
167    uint8_t buf[];
168 };
169 
170 #define oa_sample_buf_buf_length(perf) (perf->oa_sample_size * 10)
171 
172 /**
173  * gen representation of a performance query object.
174  *
175  * NB: We want to keep this structure relatively lean considering that
176  * applications may expect to allocate enough objects to be able to
177  * query around all draw calls in a frame.
178  */
179 struct intel_perf_query_object
180 {
181    const struct intel_perf_query_info *queryinfo;
182 
183    /* See query->kind to know which state below is in use... */
184    union {
185       struct {
186 
187          /**
188           * BO containing OA counter snapshots at query Begin/End time.
189           */
190          void *bo;
191 
192          /**
193           * Address of mapped of @bo
194           */
195          void *map;
196 
197          /**
198           * The MI_REPORT_PERF_COUNT command lets us specify a unique
199           * ID that will be reflected in the resulting OA report
200           * that's written by the GPU. This is the ID we're expecting
201           * in the begin report and the the end report should be
202           * @begin_report_id + 1.
203           */
204          int begin_report_id;
205 
206          /**
207           * Reference the head of the brw->perfquery.sample_buffers
208           * list at the time that the query started (so we only need
209           * to look at nodes after this point when looking for samples
210           * related to this query)
211           *
212           * (See struct brw_oa_sample_buf description for more details)
213           */
214          struct exec_node *samples_head;
215 
216          /**
217           * false while in the unaccumulated_elements list, and set to
218           * true when the final, end MI_RPC snapshot has been
219           * accumulated.
220           */
221          bool results_accumulated;
222 
223          /**
224           * Accumulated OA results between begin and end of the query.
225           */
226          struct intel_perf_query_result result;
227       } oa;
228 
229       struct {
230          /**
231           * BO containing starting and ending snapshots for the
232           * statistics counters.
233           */
234          void *bo;
235       } pipeline_stats;
236    };
237 };
238 
239 struct intel_perf_context {
240    struct intel_perf_config *perf;
241 
242    void * mem_ctx; /* ralloc context */
243    void * ctx;  /* driver context (eg, brw_context) */
244    void * bufmgr;
245    const struct intel_device_info *devinfo;
246 
247    uint32_t hw_ctx;
248    int drm_fd;
249 
250    /* The i915 perf stream we open to setup + enable the OA counters */
251    int oa_stream_fd;
252 
253    /* An i915 perf stream fd gives exclusive access to the OA unit that will
254     * report counter snapshots for a specific counter set/profile in a
255     * specific layout/format so we can only start OA queries that are
256     * compatible with the currently open fd...
257     */
258    int current_oa_metrics_set_id;
259    int current_oa_format;
260 
261    /* List of buffers containing OA reports */
262    struct exec_list sample_buffers;
263 
264    /* Cached list of empty sample buffers */
265    struct exec_list free_sample_buffers;
266 
267    int n_active_oa_queries;
268    int n_active_pipeline_stats_queries;
269 
270    /* The number of queries depending on running OA counters which
271     * extends beyond brw_end_perf_query() since we need to wait until
272     * the last MI_RPC command has parsed by the GPU.
273     *
274     * Accurate accounting is important here as emitting an
275     * MI_REPORT_PERF_COUNT command while the OA unit is disabled will
276     * effectively hang the gpu.
277     */
278    int n_oa_users;
279 
280    /* To help catch an spurious problem with the hardware or perf
281     * forwarding samples, we emit each MI_REPORT_PERF_COUNT command
282     * with a unique ID that we can explicitly check for...
283     */
284    int next_query_start_report_id;
285 
286    /**
287     * An array of queries whose results haven't yet been assembled
288     * based on the data in buffer objects.
289     *
290     * These may be active, or have already ended.  However, the
291     * results have not been requested.
292     */
293    struct intel_perf_query_object **unaccumulated;
294    int unaccumulated_elements;
295    int unaccumulated_array_size;
296 
297    /* The total number of query objects so we can relinquish
298     * our exclusive access to perf if the application deletes
299     * all of its objects. (NB: We only disable perf while
300     * there are no active queries)
301     */
302    int n_query_instances;
303 
304    int period_exponent;
305 };
306 
307 static bool
inc_n_users(struct intel_perf_context * perf_ctx)308 inc_n_users(struct intel_perf_context *perf_ctx)
309 {
310    if (perf_ctx->n_oa_users == 0 &&
311        intel_perf_stream_set_state(perf_ctx->perf, perf_ctx->oa_stream_fd, true) < 0)
312    {
313       return false;
314    }
315    ++perf_ctx->n_oa_users;
316 
317    return true;
318 }
319 
320 static void
dec_n_users(struct intel_perf_context * perf_ctx)321 dec_n_users(struct intel_perf_context *perf_ctx)
322 {
323    /* Disabling the i915 perf stream will effectively disable the OA
324     * counters.  Note it's important to be sure there are no outstanding
325     * MI_RPC commands at this point since they could stall the CS
326     * indefinitely once OACONTROL is disabled.
327     */
328    --perf_ctx->n_oa_users;
329    if (perf_ctx->n_oa_users == 0 &&
330        intel_perf_stream_set_state(perf_ctx->perf, perf_ctx->oa_stream_fd, false) < 0)
331    {
332       DBG("WARNING: Error disabling gen perf stream: %m\n");
333    }
334 }
335 
336 void
intel_perf_close(struct intel_perf_context * perfquery,const struct intel_perf_query_info * query)337 intel_perf_close(struct intel_perf_context *perfquery,
338                  const struct intel_perf_query_info *query)
339 {
340    if (perfquery->oa_stream_fd != -1) {
341       close(perfquery->oa_stream_fd);
342       perfquery->oa_stream_fd = -1;
343    }
344    if (query && query->kind == INTEL_PERF_QUERY_TYPE_RAW) {
345       struct intel_perf_query_info *raw_query =
346          (struct intel_perf_query_info *) query;
347       raw_query->oa_metrics_set_id = 0;
348    }
349 }
350 
351 bool
intel_perf_open(struct intel_perf_context * perf_ctx,int metrics_set_id,uint64_t report_format,int period_exponent,int drm_fd,uint32_t ctx_id,bool enable)352 intel_perf_open(struct intel_perf_context *perf_ctx,
353                 int metrics_set_id,
354                 uint64_t report_format,
355                 int period_exponent,
356                 int drm_fd,
357                 uint32_t ctx_id,
358                 bool enable)
359 {
360    int fd = intel_perf_stream_open(perf_ctx->perf, drm_fd, ctx_id,
361                                    metrics_set_id, period_exponent, false,
362                                    enable);
363    if (fd == -1) {
364       DBG("Error opening gen perf OA stream: %m\n");
365       return false;
366    }
367 
368    perf_ctx->oa_stream_fd = fd;
369 
370    perf_ctx->current_oa_metrics_set_id = metrics_set_id;
371    perf_ctx->current_oa_format = report_format;
372 
373    if (enable)
374       ++perf_ctx->n_oa_users;
375 
376    return true;
377 }
378 
379 static uint64_t
get_metric_id(struct intel_perf_config * perf,const struct intel_perf_query_info * query)380 get_metric_id(struct intel_perf_config *perf,
381               const struct intel_perf_query_info *query)
382 {
383    /* These queries are know not to ever change, their config ID has been
384     * loaded upon the first query creation. No need to look them up again.
385     */
386    if (query->kind == INTEL_PERF_QUERY_TYPE_OA)
387       return query->oa_metrics_set_id;
388 
389    assert(query->kind == INTEL_PERF_QUERY_TYPE_RAW);
390 
391    /* Raw queries can be reprogrammed up by an external application/library.
392     * When a raw query is used for the first time it's id is set to a value !=
393     * 0. When it stops being used the id returns to 0. No need to reload the
394     * ID when it's already loaded.
395     */
396    if (query->oa_metrics_set_id != 0) {
397       DBG("Raw query '%s' guid=%s using cached ID: %"PRIu64"\n",
398           query->name, query->guid, query->oa_metrics_set_id);
399       return query->oa_metrics_set_id;
400    }
401 
402    struct intel_perf_query_info *raw_query = (struct intel_perf_query_info *)query;
403    if (!intel_perf_load_metric_id(perf, query->guid,
404                                 &raw_query->oa_metrics_set_id)) {
405       DBG("Unable to read query guid=%s ID, falling back to test config\n", query->guid);
406       raw_query->oa_metrics_set_id = perf->fallback_raw_oa_metric;
407    } else {
408       DBG("Raw query '%s'guid=%s loaded ID: %"PRIu64"\n",
409           query->name, query->guid, query->oa_metrics_set_id);
410    }
411    return query->oa_metrics_set_id;
412 }
413 
414 static struct oa_sample_buf *
get_free_sample_buf(struct intel_perf_context * perf_ctx)415 get_free_sample_buf(struct intel_perf_context *perf_ctx)
416 {
417    struct exec_node *node = exec_list_pop_head(&perf_ctx->free_sample_buffers);
418    struct oa_sample_buf *buf;
419 
420    if (node)
421       buf = exec_node_data(struct oa_sample_buf, node, link);
422    else {
423       buf = ralloc_size(perf_ctx->perf, sizeof(*buf) + oa_sample_buf_buf_length(perf_ctx->perf));
424 
425       exec_node_init(&buf->link);
426       buf->refcount = 0;
427    }
428    buf->len = 0;
429 
430    return buf;
431 }
432 
433 static void
reap_old_sample_buffers(struct intel_perf_context * perf_ctx)434 reap_old_sample_buffers(struct intel_perf_context *perf_ctx)
435 {
436    struct exec_node *tail_node =
437       exec_list_get_tail(&perf_ctx->sample_buffers);
438    struct oa_sample_buf *tail_buf =
439       exec_node_data(struct oa_sample_buf, tail_node, link);
440 
441    /* Remove all old, unreferenced sample buffers walking forward from
442     * the head of the list, except always leave at least one node in
443     * the list so we always have a node to reference when we Begin
444     * a new query.
445     */
446    foreach_list_typed_safe(struct oa_sample_buf, buf, link,
447                            &perf_ctx->sample_buffers)
448    {
449       if (buf->refcount == 0 && buf != tail_buf) {
450          exec_node_remove(&buf->link);
451          exec_list_push_head(&perf_ctx->free_sample_buffers, &buf->link);
452       } else
453          return;
454    }
455 }
456 
457 static void
free_sample_bufs(struct intel_perf_context * perf_ctx)458 free_sample_bufs(struct intel_perf_context *perf_ctx)
459 {
460    foreach_list_typed_safe(struct oa_sample_buf, buf, link,
461                            &perf_ctx->free_sample_buffers)
462       ralloc_free(buf);
463 
464    exec_list_make_empty(&perf_ctx->free_sample_buffers);
465 }
466 
467 
468 struct intel_perf_query_object *
intel_perf_new_query(struct intel_perf_context * perf_ctx,unsigned query_index)469 intel_perf_new_query(struct intel_perf_context *perf_ctx, unsigned query_index)
470 {
471    const struct intel_perf_query_info *query =
472       &perf_ctx->perf->queries[query_index];
473 
474    switch (query->kind) {
475    case INTEL_PERF_QUERY_TYPE_OA:
476    case INTEL_PERF_QUERY_TYPE_RAW:
477       if (perf_ctx->period_exponent == 0)
478          return NULL;
479       break;
480    case INTEL_PERF_QUERY_TYPE_PIPELINE:
481       break;
482    }
483 
484    struct intel_perf_query_object *obj =
485       calloc(1, sizeof(struct intel_perf_query_object));
486 
487    if (!obj)
488       return NULL;
489 
490    obj->queryinfo = query;
491 
492    perf_ctx->n_query_instances++;
493    return obj;
494 }
495 
496 int
intel_perf_active_queries(struct intel_perf_context * perf_ctx,const struct intel_perf_query_info * query)497 intel_perf_active_queries(struct intel_perf_context *perf_ctx,
498                           const struct intel_perf_query_info *query)
499 {
500    assert(perf_ctx->n_active_oa_queries == 0 || perf_ctx->n_active_pipeline_stats_queries == 0);
501 
502    switch (query->kind) {
503    case INTEL_PERF_QUERY_TYPE_OA:
504    case INTEL_PERF_QUERY_TYPE_RAW:
505       return perf_ctx->n_active_oa_queries;
506       break;
507 
508    case INTEL_PERF_QUERY_TYPE_PIPELINE:
509       return perf_ctx->n_active_pipeline_stats_queries;
510       break;
511 
512    default:
513       unreachable("Unknown query type");
514       break;
515    }
516 }
517 
518 const struct intel_perf_query_info*
intel_perf_query_info(const struct intel_perf_query_object * query)519 intel_perf_query_info(const struct intel_perf_query_object *query)
520 {
521    return query->queryinfo;
522 }
523 
524 struct intel_perf_context *
intel_perf_new_context(void * parent)525 intel_perf_new_context(void *parent)
526 {
527    struct intel_perf_context *ctx = rzalloc(parent, struct intel_perf_context);
528    if (! ctx)
529       fprintf(stderr, "%s: failed to alloc context\n", __func__);
530    return ctx;
531 }
532 
533 struct intel_perf_config *
intel_perf_config(struct intel_perf_context * ctx)534 intel_perf_config(struct intel_perf_context *ctx)
535 {
536    return ctx->perf;
537 }
538 
intel_perf_free_context(struct intel_perf_context * perf_ctx)539 void intel_perf_free_context(struct intel_perf_context *perf_ctx)
540 {
541    ralloc_free(perf_ctx);
542 }
543 
544 void
intel_perf_init_context(struct intel_perf_context * perf_ctx,struct intel_perf_config * perf_cfg,void * mem_ctx,void * ctx,void * bufmgr,const struct intel_device_info * devinfo,uint32_t hw_ctx,int drm_fd)545 intel_perf_init_context(struct intel_perf_context *perf_ctx,
546                         struct intel_perf_config *perf_cfg,
547                         void * mem_ctx, /* ralloc context */
548                         void * ctx,  /* driver context (eg, brw_context) */
549                         void * bufmgr,  /* eg brw_bufmgr */
550                         const struct intel_device_info *devinfo,
551                         uint32_t hw_ctx,
552                         int drm_fd)
553 {
554    perf_ctx->perf = perf_cfg;
555    perf_ctx->mem_ctx = mem_ctx;
556    perf_ctx->ctx = ctx;
557    perf_ctx->bufmgr = bufmgr;
558    perf_ctx->drm_fd = drm_fd;
559    perf_ctx->hw_ctx = hw_ctx;
560    perf_ctx->devinfo = devinfo;
561 
562    perf_ctx->unaccumulated =
563       ralloc_array(mem_ctx, struct intel_perf_query_object *, 2);
564    perf_ctx->unaccumulated_elements = 0;
565    perf_ctx->unaccumulated_array_size = 2;
566 
567    exec_list_make_empty(&perf_ctx->sample_buffers);
568    exec_list_make_empty(&perf_ctx->free_sample_buffers);
569 
570    /* It's convenient to guarantee that this linked list of sample
571     * buffers is never empty so we add an empty head so when we
572     * Begin an OA query we can always take a reference on a buffer
573     * in this list.
574     */
575    struct oa_sample_buf *buf = get_free_sample_buf(perf_ctx);
576    exec_list_push_head(&perf_ctx->sample_buffers, &buf->link);
577 
578    perf_ctx->oa_stream_fd = -1;
579    perf_ctx->next_query_start_report_id = 1000;
580 
581    /* The period_exponent gives a sampling period as follows:
582     *   sample_period = timestamp_period * 2^(period_exponent + 1)
583     *
584     * The timestamps increments every 80ns (HSW), ~52ns (GFX9LP) or
585     * ~83ns (GFX8/9).
586     *
587     * The counter overflow period is derived from the EuActive counter
588     * which reads a counter that increments by the number of clock
589     * cycles multiplied by the number of EUs. It can be calculated as:
590     *
591     * 2^(number of bits in A counter) / (n_eus * max_intel_freq * 2)
592     *
593     * (E.g. 40 EUs @ 1GHz = ~53ms)
594     *
595     * We select a sampling period inferior to that overflow period to
596     * ensure we cannot see more than 1 counter overflow, otherwise we
597     * could loose information.
598     */
599 
600    int a_counter_in_bits = 32;
601    if (devinfo->ver >= 8)
602       a_counter_in_bits = 40;
603 
604    uint64_t overflow_period = pow(2, a_counter_in_bits) / (perf_cfg->sys_vars.n_eus *
605        /* drop 1GHz freq to have units in nanoseconds */
606        2);
607 
608    DBG("A counter overflow period: %"PRIu64"ns, %"PRIu64"ms (n_eus=%"PRIu64")\n",
609        overflow_period, overflow_period / 1000000ul, perf_cfg->sys_vars.n_eus);
610 
611    int period_exponent = 0;
612    uint64_t prev_sample_period, next_sample_period;
613    for (int e = 0; e < 30; e++) {
614       prev_sample_period = 1000000000ull * pow(2, e + 1) / devinfo->timestamp_frequency;
615       next_sample_period = 1000000000ull * pow(2, e + 2) / devinfo->timestamp_frequency;
616 
617       /* Take the previous sampling period, lower than the overflow
618        * period.
619        */
620       if (prev_sample_period < overflow_period &&
621           next_sample_period > overflow_period)
622          period_exponent = e + 1;
623    }
624 
625    perf_ctx->period_exponent = period_exponent;
626 
627    if (period_exponent == 0) {
628       DBG("WARNING: enable to find a sampling exponent\n");
629    } else {
630       DBG("OA sampling exponent: %i ~= %"PRIu64"ms\n", period_exponent,
631             prev_sample_period / 1000000ul);
632    }
633 }
634 
635 /**
636  * Add a query to the global list of "unaccumulated queries."
637  *
638  * Queries are tracked here until all the associated OA reports have
639  * been accumulated via accumulate_oa_reports() after the end
640  * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
641  */
642 static void
add_to_unaccumulated_query_list(struct intel_perf_context * perf_ctx,struct intel_perf_query_object * obj)643 add_to_unaccumulated_query_list(struct intel_perf_context *perf_ctx,
644                                 struct intel_perf_query_object *obj)
645 {
646    if (perf_ctx->unaccumulated_elements >=
647        perf_ctx->unaccumulated_array_size)
648    {
649       perf_ctx->unaccumulated_array_size *= 1.5;
650       perf_ctx->unaccumulated =
651          reralloc(perf_ctx->mem_ctx, perf_ctx->unaccumulated,
652                   struct intel_perf_query_object *,
653                   perf_ctx->unaccumulated_array_size);
654    }
655 
656    perf_ctx->unaccumulated[perf_ctx->unaccumulated_elements++] = obj;
657 }
658 
659 /**
660  * Emit MI_STORE_REGISTER_MEM commands to capture all of the
661  * pipeline statistics for the performance query object.
662  */
663 static void
snapshot_statistics_registers(struct intel_perf_context * ctx,struct intel_perf_query_object * obj,uint32_t offset_in_bytes)664 snapshot_statistics_registers(struct intel_perf_context *ctx,
665                               struct intel_perf_query_object *obj,
666                               uint32_t offset_in_bytes)
667 {
668    struct intel_perf_config *perf = ctx->perf;
669    const struct intel_perf_query_info *query = obj->queryinfo;
670    const int n_counters = query->n_counters;
671 
672    for (int i = 0; i < n_counters; i++) {
673       const struct intel_perf_query_counter *counter = &query->counters[i];
674 
675       assert(counter->data_type == INTEL_PERF_COUNTER_DATA_TYPE_UINT64);
676 
677       perf->vtbl.store_register_mem(ctx->ctx, obj->pipeline_stats.bo,
678                                     counter->pipeline_stat.reg, 8,
679                                     offset_in_bytes + counter->offset);
680    }
681 }
682 
683 static void
snapshot_query_layout(struct intel_perf_context * perf_ctx,struct intel_perf_query_object * query,bool end_snapshot)684 snapshot_query_layout(struct intel_perf_context *perf_ctx,
685                       struct intel_perf_query_object *query,
686                       bool end_snapshot)
687 {
688    struct intel_perf_config *perf_cfg = perf_ctx->perf;
689    const struct intel_perf_query_field_layout *layout = &perf_cfg->query_layout;
690    uint32_t offset = end_snapshot ? align(layout->size, layout->alignment) : 0;
691 
692    for (uint32_t f = 0; f < layout->n_fields; f++) {
693       const struct intel_perf_query_field *field =
694          &layout->fields[end_snapshot ? f : (layout->n_fields - 1 - f)];
695 
696       switch (field->type) {
697       case INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC:
698          perf_cfg->vtbl.emit_mi_report_perf_count(perf_ctx->ctx, query->oa.bo,
699                                                   offset + field->location,
700                                                   query->oa.begin_report_id +
701                                                   (end_snapshot ? 1 : 0));
702          break;
703       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_PERFCNT:
704       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_RPSTAT:
705       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_A:
706       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B:
707       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C:
708       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_PEC:
709          perf_cfg->vtbl.store_register_mem(perf_ctx->ctx, query->oa.bo,
710                                            field->mmio_offset, field->size,
711                                            offset + field->location);
712          break;
713       default:
714          unreachable("Invalid field type");
715       }
716    }
717 }
718 
719 bool
intel_perf_begin_query(struct intel_perf_context * perf_ctx,struct intel_perf_query_object * query)720 intel_perf_begin_query(struct intel_perf_context *perf_ctx,
721                        struct intel_perf_query_object *query)
722 {
723    struct intel_perf_config *perf_cfg = perf_ctx->perf;
724    const struct intel_perf_query_info *queryinfo = query->queryinfo;
725 
726    /* XXX: We have to consider that the command parser unit that parses batch
727     * buffer commands and is used to capture begin/end counter snapshots isn't
728     * implicitly synchronized with what's currently running across other GPU
729     * units (such as the EUs running shaders) that the performance counters are
730     * associated with.
731     *
732     * The intention of performance queries is to measure the work associated
733     * with commands between the begin/end delimiters and so for that to be the
734     * case we need to explicitly synchronize the parsing of commands to capture
735     * Begin/End counter snapshots with what's running across other parts of the
736     * GPU.
737     *
738     * When the command parser reaches a Begin marker it effectively needs to
739     * drain everything currently running on the GPU until the hardware is idle
740     * before capturing the first snapshot of counters - otherwise the results
741     * would also be measuring the effects of earlier commands.
742     *
743     * When the command parser reaches an End marker it needs to stall until
744     * everything currently running on the GPU has finished before capturing the
745     * end snapshot - otherwise the results won't be a complete representation
746     * of the work.
747     *
748     * To achieve this, we stall the pipeline at pixel scoreboard (prevent any
749     * additional work to be processed by the pipeline until all pixels of the
750     * previous draw has be completed).
751     *
752     * N.B. The final results are based on deltas of counters between (inside)
753     * Begin/End markers so even though the total wall clock time of the
754     * workload is stretched by larger pipeline bubbles the bubbles themselves
755     * are generally invisible to the query results. Whether that's a good or a
756     * bad thing depends on the use case. For a lower real-time impact while
757     * capturing metrics then periodic sampling may be a better choice than
758     * INTEL_performance_query.
759     *
760     *
761     * This is our Begin synchronization point to drain current work on the
762     * GPU before we capture our first counter snapshot...
763     */
764    perf_cfg->vtbl.emit_stall_at_pixel_scoreboard(perf_ctx->ctx);
765 
766    switch (queryinfo->kind) {
767    case INTEL_PERF_QUERY_TYPE_OA:
768    case INTEL_PERF_QUERY_TYPE_RAW: {
769 
770       /* Opening an i915 perf stream implies exclusive access to the OA unit
771        * which will generate counter reports for a specific counter set with a
772        * specific layout/format so we can't begin any OA based queries that
773        * require a different counter set or format unless we get an opportunity
774        * to close the stream and open a new one...
775        */
776       uint64_t metric_id = get_metric_id(perf_ctx->perf, queryinfo);
777 
778       if (perf_ctx->oa_stream_fd != -1 &&
779           perf_ctx->current_oa_metrics_set_id != metric_id) {
780 
781          if (perf_ctx->n_oa_users != 0) {
782             DBG("WARNING: Begin failed already using perf config=%i/%"PRIu64"\n",
783                 perf_ctx->current_oa_metrics_set_id, metric_id);
784             return false;
785          } else
786             intel_perf_close(perf_ctx, queryinfo);
787       }
788 
789       /* If the OA counters aren't already on, enable them. */
790       if (perf_ctx->oa_stream_fd == -1) {
791          assert(perf_ctx->period_exponent != 0);
792 
793          if (!intel_perf_open(perf_ctx, metric_id, queryinfo->oa_format,
794                             perf_ctx->period_exponent, perf_ctx->drm_fd,
795                             perf_ctx->hw_ctx, false))
796             return false;
797       } else {
798          assert(perf_ctx->current_oa_metrics_set_id == metric_id &&
799                 perf_ctx->current_oa_format == queryinfo->oa_format);
800       }
801 
802       if (!inc_n_users(perf_ctx)) {
803          DBG("WARNING: Error enabling i915 perf stream: %m\n");
804          return false;
805       }
806 
807       if (query->oa.bo) {
808          perf_cfg->vtbl.bo_unreference(query->oa.bo);
809          query->oa.bo = NULL;
810       }
811 
812       query->oa.bo = perf_cfg->vtbl.bo_alloc(perf_ctx->bufmgr,
813                                              "perf. query OA MI_RPC bo",
814                                              MI_RPC_BO_SIZE);
815 #if MESA_DEBUG
816       /* Pre-filling the BO helps debug whether writes landed. */
817       void *map = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->oa.bo, MAP_WRITE);
818       memset(map, 0x80, MI_RPC_BO_SIZE);
819       perf_cfg->vtbl.bo_unmap(query->oa.bo);
820 #endif
821 
822       query->oa.begin_report_id = perf_ctx->next_query_start_report_id;
823       perf_ctx->next_query_start_report_id += 2;
824 
825       snapshot_query_layout(perf_ctx, query, false /* end_snapshot */);
826 
827       ++perf_ctx->n_active_oa_queries;
828 
829       /* No already-buffered samples can possibly be associated with this query
830        * so create a marker within the list of sample buffers enabling us to
831        * easily ignore earlier samples when processing this query after
832        * completion.
833        */
834       assert(!exec_list_is_empty(&perf_ctx->sample_buffers));
835       query->oa.samples_head = exec_list_get_tail(&perf_ctx->sample_buffers);
836 
837       struct oa_sample_buf *buf =
838          exec_node_data(struct oa_sample_buf, query->oa.samples_head, link);
839 
840       /* This reference will ensure that future/following sample
841        * buffers (that may relate to this query) can't be freed until
842        * this drops to zero.
843        */
844       buf->refcount++;
845 
846       intel_perf_query_result_clear(&query->oa.result);
847       query->oa.results_accumulated = false;
848 
849       add_to_unaccumulated_query_list(perf_ctx, query);
850       break;
851    }
852 
853    case INTEL_PERF_QUERY_TYPE_PIPELINE:
854       if (query->pipeline_stats.bo) {
855          perf_cfg->vtbl.bo_unreference(query->pipeline_stats.bo);
856          query->pipeline_stats.bo = NULL;
857       }
858 
859       query->pipeline_stats.bo =
860          perf_cfg->vtbl.bo_alloc(perf_ctx->bufmgr,
861                                  "perf. query pipeline stats bo",
862                                  STATS_BO_SIZE);
863 
864       /* Take starting snapshots. */
865       snapshot_statistics_registers(perf_ctx, query, 0);
866 
867       ++perf_ctx->n_active_pipeline_stats_queries;
868       break;
869 
870    default:
871       unreachable("Unknown query type");
872       break;
873    }
874 
875    return true;
876 }
877 
878 void
intel_perf_end_query(struct intel_perf_context * perf_ctx,struct intel_perf_query_object * query)879 intel_perf_end_query(struct intel_perf_context *perf_ctx,
880                      struct intel_perf_query_object *query)
881 {
882    struct intel_perf_config *perf_cfg = perf_ctx->perf;
883 
884    /* Ensure that the work associated with the queried commands will have
885     * finished before taking our query end counter readings.
886     *
887     * For more details see comment in brw_begin_perf_query for
888     * corresponding flush.
889     */
890    perf_cfg->vtbl.emit_stall_at_pixel_scoreboard(perf_ctx->ctx);
891 
892    switch (query->queryinfo->kind) {
893    case INTEL_PERF_QUERY_TYPE_OA:
894    case INTEL_PERF_QUERY_TYPE_RAW:
895 
896       /* NB: It's possible that the query will have already been marked
897        * as 'accumulated' if an error was seen while reading samples
898        * from perf. In this case we mustn't try and emit a closing
899        * MI_RPC command in case the OA unit has already been disabled
900        */
901       if (!query->oa.results_accumulated)
902          snapshot_query_layout(perf_ctx, query, true /* end_snapshot */);
903 
904       --perf_ctx->n_active_oa_queries;
905 
906       /* NB: even though the query has now ended, it can't be accumulated
907        * until the end MI_REPORT_PERF_COUNT snapshot has been written
908        * to query->oa.bo
909        */
910       break;
911 
912    case INTEL_PERF_QUERY_TYPE_PIPELINE:
913       snapshot_statistics_registers(perf_ctx, query,
914                                     STATS_BO_END_OFFSET_BYTES);
915       --perf_ctx->n_active_pipeline_stats_queries;
916       break;
917 
918    default:
919       unreachable("Unknown query type");
920       break;
921    }
922 }
923 
intel_perf_oa_stream_ready(struct intel_perf_context * perf_ctx)924 bool intel_perf_oa_stream_ready(struct intel_perf_context *perf_ctx)
925 {
926    struct pollfd pfd;
927 
928    pfd.fd = perf_ctx->oa_stream_fd;
929    pfd.events = POLLIN;
930    pfd.revents = 0;
931 
932    if (poll(&pfd, 1, 0) < 0) {
933       DBG("Error polling OA stream\n");
934       return false;
935    }
936 
937    if (!(pfd.revents & POLLIN))
938       return false;
939 
940    return true;
941 }
942 
943 ssize_t
intel_perf_read_oa_stream(struct intel_perf_context * perf_ctx,void * buf,size_t nbytes)944 intel_perf_read_oa_stream(struct intel_perf_context *perf_ctx,
945                           void* buf,
946                           size_t nbytes)
947 {
948    return intel_perf_stream_read_samples(perf_ctx->perf, perf_ctx->oa_stream_fd,
949                                          buf, nbytes);
950 }
951 
952 enum OaReadStatus {
953    OA_READ_STATUS_ERROR,
954    OA_READ_STATUS_UNFINISHED,
955    OA_READ_STATUS_FINISHED,
956 };
957 
958 static enum OaReadStatus
read_oa_samples_until(struct intel_perf_context * perf_ctx,uint32_t start_timestamp,uint32_t end_timestamp)959 read_oa_samples_until(struct intel_perf_context *perf_ctx,
960                       uint32_t start_timestamp,
961                       uint32_t end_timestamp)
962 {
963    struct exec_node *tail_node =
964       exec_list_get_tail(&perf_ctx->sample_buffers);
965    struct oa_sample_buf *tail_buf =
966       exec_node_data(struct oa_sample_buf, tail_node, link);
967    uint32_t last_timestamp =
968       tail_buf->len == 0 ? start_timestamp : tail_buf->last_timestamp;
969    bool sample_read = false;
970 
971    while (1) {
972       struct oa_sample_buf *buf = get_free_sample_buf(perf_ctx);
973       uint32_t offset;
974       int len;
975 
976       len = intel_perf_stream_read_samples(perf_ctx->perf,
977                                            perf_ctx->oa_stream_fd,
978                                            buf->buf,
979                                            oa_sample_buf_buf_length(perf_ctx->perf));
980 
981       if (len <= 0) {
982          exec_list_push_tail(&perf_ctx->free_sample_buffers, &buf->link);
983 
984          if (len == 0) {
985             if (sample_read)
986                return OA_READ_STATUS_FINISHED;
987 
988             DBG("Spurious EOF reading i915 perf samples\n");
989             return OA_READ_STATUS_ERROR;
990          }
991 
992          if (len != -EAGAIN) {
993             if (sample_read)
994                return OA_READ_STATUS_FINISHED;
995 
996             DBG("Error reading i915 perf samples: %m\n");
997             return OA_READ_STATUS_ERROR;
998          }
999 
1000          if ((last_timestamp - start_timestamp) >= INT32_MAX)
1001             return OA_READ_STATUS_UNFINISHED;
1002 
1003          if ((last_timestamp - start_timestamp) <
1004               (end_timestamp - start_timestamp))
1005             return OA_READ_STATUS_UNFINISHED;
1006 
1007          return OA_READ_STATUS_FINISHED;
1008       }
1009 
1010       buf->len = len;
1011       exec_list_push_tail(&perf_ctx->sample_buffers, &buf->link);
1012 
1013       /* Go through the reports and update the last timestamp. */
1014       offset = 0;
1015       while (offset < buf->len) {
1016          const struct intel_perf_record_header *header =
1017             (const struct intel_perf_record_header *) &buf->buf[offset];
1018          uint32_t *report = (uint32_t *) (header + 1);
1019 
1020          if (header->type == INTEL_PERF_RECORD_TYPE_SAMPLE)
1021             last_timestamp = report[1];
1022 
1023          offset += header->size;
1024          sample_read = true;
1025       }
1026 
1027       buf->last_timestamp = last_timestamp;
1028    }
1029 
1030    unreachable("not reached");
1031    return OA_READ_STATUS_ERROR;
1032 }
1033 
1034 /**
1035  * Try to read all the reports until either the delimiting timestamp
1036  * or an error arises.
1037  */
1038 static bool
read_oa_samples_for_query(struct intel_perf_context * perf_ctx,struct intel_perf_query_object * query,void * current_batch)1039 read_oa_samples_for_query(struct intel_perf_context *perf_ctx,
1040                           struct intel_perf_query_object *query,
1041                           void *current_batch)
1042 {
1043    uint32_t *start;
1044    uint32_t *last;
1045    uint32_t *end;
1046    struct intel_perf_config *perf_cfg = perf_ctx->perf;
1047 
1048    /* We need the MI_REPORT_PERF_COUNT to land before we can start
1049     * accumulate. */
1050    assert(!perf_cfg->vtbl.batch_references(current_batch, query->oa.bo) &&
1051           !perf_cfg->vtbl.bo_busy(query->oa.bo));
1052 
1053    /* Map the BO once here and let accumulate_oa_reports() unmap
1054     * it. */
1055    if (query->oa.map == NULL)
1056       query->oa.map = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->oa.bo, MAP_READ);
1057 
1058    start = last = query->oa.map;
1059    end = query->oa.map + perf_ctx->perf->query_layout.size;
1060 
1061    if (start[0] != query->oa.begin_report_id) {
1062       DBG("Spurious start report id=%"PRIu32"\n", start[0]);
1063       return true;
1064    }
1065    if (end[0] != (query->oa.begin_report_id + 1)) {
1066       DBG("Spurious end report id=%"PRIu32"\n", end[0]);
1067       return true;
1068    }
1069 
1070    /* Read the reports until the end timestamp. */
1071    switch (read_oa_samples_until(perf_ctx, start[1], end[1])) {
1072    case OA_READ_STATUS_ERROR:
1073       FALLTHROUGH; /* Let accumulate_oa_reports() deal with the error. */
1074    case OA_READ_STATUS_FINISHED:
1075       return true;
1076    case OA_READ_STATUS_UNFINISHED:
1077       return false;
1078    }
1079 
1080    unreachable("invalid read status");
1081    return false;
1082 }
1083 
1084 void
intel_perf_wait_query(struct intel_perf_context * perf_ctx,struct intel_perf_query_object * query,void * current_batch)1085 intel_perf_wait_query(struct intel_perf_context *perf_ctx,
1086                       struct intel_perf_query_object *query,
1087                       void *current_batch)
1088 {
1089    struct intel_perf_config *perf_cfg = perf_ctx->perf;
1090    struct brw_bo *bo = NULL;
1091 
1092    switch (query->queryinfo->kind) {
1093    case INTEL_PERF_QUERY_TYPE_OA:
1094    case INTEL_PERF_QUERY_TYPE_RAW:
1095       bo = query->oa.bo;
1096       break;
1097 
1098    case INTEL_PERF_QUERY_TYPE_PIPELINE:
1099       bo = query->pipeline_stats.bo;
1100       break;
1101 
1102    default:
1103       unreachable("Unknown query type");
1104       break;
1105    }
1106 
1107    if (bo == NULL)
1108       return;
1109 
1110    /* If the current batch references our results bo then we need to
1111     * flush first...
1112     */
1113    if (perf_cfg->vtbl.batch_references(current_batch, bo))
1114       perf_cfg->vtbl.batchbuffer_flush(perf_ctx->ctx, __FILE__, __LINE__);
1115 
1116    perf_cfg->vtbl.bo_wait_rendering(bo);
1117 }
1118 
1119 bool
intel_perf_is_query_ready(struct intel_perf_context * perf_ctx,struct intel_perf_query_object * query,void * current_batch)1120 intel_perf_is_query_ready(struct intel_perf_context *perf_ctx,
1121                           struct intel_perf_query_object *query,
1122                           void *current_batch)
1123 {
1124    struct intel_perf_config *perf_cfg = perf_ctx->perf;
1125 
1126    switch (query->queryinfo->kind) {
1127    case INTEL_PERF_QUERY_TYPE_OA:
1128    case INTEL_PERF_QUERY_TYPE_RAW:
1129       return (query->oa.results_accumulated ||
1130               (query->oa.bo &&
1131                !perf_cfg->vtbl.batch_references(current_batch, query->oa.bo) &&
1132                !perf_cfg->vtbl.bo_busy(query->oa.bo)));
1133 
1134    case INTEL_PERF_QUERY_TYPE_PIPELINE:
1135       return (query->pipeline_stats.bo &&
1136               !perf_cfg->vtbl.batch_references(current_batch, query->pipeline_stats.bo) &&
1137               !perf_cfg->vtbl.bo_busy(query->pipeline_stats.bo));
1138 
1139    default:
1140       unreachable("Unknown query type");
1141       break;
1142    }
1143 
1144    return false;
1145 }
1146 
1147 /**
1148  * Remove a query from the global list of unaccumulated queries once
1149  * after successfully accumulating the OA reports associated with the
1150  * query in accumulate_oa_reports() or when discarding unwanted query
1151  * results.
1152  */
1153 static void
drop_from_unaccumulated_query_list(struct intel_perf_context * perf_ctx,struct intel_perf_query_object * query)1154 drop_from_unaccumulated_query_list(struct intel_perf_context *perf_ctx,
1155                                    struct intel_perf_query_object *query)
1156 {
1157    for (int i = 0; i < perf_ctx->unaccumulated_elements; i++) {
1158       if (perf_ctx->unaccumulated[i] == query) {
1159          int last_elt = --perf_ctx->unaccumulated_elements;
1160 
1161          if (i == last_elt)
1162             perf_ctx->unaccumulated[i] = NULL;
1163          else {
1164             perf_ctx->unaccumulated[i] =
1165                perf_ctx->unaccumulated[last_elt];
1166          }
1167 
1168          break;
1169       }
1170    }
1171 
1172    /* Drop our samples_head reference so that associated periodic
1173     * sample data buffers can potentially be reaped if they aren't
1174     * referenced by any other queries...
1175     */
1176 
1177    struct oa_sample_buf *buf =
1178       exec_node_data(struct oa_sample_buf, query->oa.samples_head, link);
1179 
1180    assert(buf->refcount > 0);
1181    buf->refcount--;
1182 
1183    query->oa.samples_head = NULL;
1184 
1185    reap_old_sample_buffers(perf_ctx);
1186 }
1187 
1188 /* In general if we see anything spurious while accumulating results,
1189  * we don't try and continue accumulating the current query, hoping
1190  * for the best, we scrap anything outstanding, and then hope for the
1191  * best with new queries.
1192  */
1193 static void
discard_all_queries(struct intel_perf_context * perf_ctx)1194 discard_all_queries(struct intel_perf_context *perf_ctx)
1195 {
1196    while (perf_ctx->unaccumulated_elements) {
1197       struct intel_perf_query_object *query = perf_ctx->unaccumulated[0];
1198 
1199       query->oa.results_accumulated = true;
1200       drop_from_unaccumulated_query_list(perf_ctx, query);
1201 
1202       dec_n_users(perf_ctx);
1203    }
1204 }
1205 
1206 /* Looks for the validity bit of context ID (dword 2) of an OA report. */
1207 static bool
oa_report_ctx_id_valid(const struct intel_device_info * devinfo,const uint32_t * report)1208 oa_report_ctx_id_valid(const struct intel_device_info *devinfo,
1209                        const uint32_t *report)
1210 {
1211    assert(devinfo->ver >= 8);
1212    if (devinfo->ver == 8)
1213       return (report[0] & (1 << 25)) != 0;
1214    return (report[0] & (1 << 16)) != 0;
1215 }
1216 
1217 /**
1218  * Accumulate raw OA counter values based on deltas between pairs of
1219  * OA reports.
1220  *
1221  * Accumulation starts from the first report captured via
1222  * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
1223  * last MI_RPC report requested by brw_end_perf_query(). Between these
1224  * two reports there may also some number of periodically sampled OA
1225  * reports collected via the i915 perf interface - depending on the
1226  * duration of the query.
1227  *
1228  * These periodic snapshots help to ensure we handle counter overflow
1229  * correctly by being frequent enough to ensure we don't miss multiple
1230  * overflows of a counter between snapshots. For Gfx8+ the i915 perf
1231  * snapshots provide the extra context-switch reports that let us
1232  * subtract out the progress of counters associated with other
1233  * contexts running on the system.
1234  */
1235 static void
accumulate_oa_reports(struct intel_perf_context * perf_ctx,struct intel_perf_query_object * query)1236 accumulate_oa_reports(struct intel_perf_context *perf_ctx,
1237                       struct intel_perf_query_object *query)
1238 {
1239    const struct intel_device_info *devinfo = perf_ctx->devinfo;
1240    uint32_t *start;
1241    uint32_t *last;
1242    uint32_t *end;
1243    struct exec_node *first_samples_node;
1244    bool last_report_ctx_match = true;
1245    int out_duration = 0;
1246 
1247    assert(query->oa.map != NULL);
1248 
1249    start = last = query->oa.map;
1250    end = query->oa.map + perf_ctx->perf->query_layout.size;
1251 
1252    if (start[0] != query->oa.begin_report_id) {
1253       DBG("Spurious start report id=%"PRIu32"\n", start[0]);
1254       goto error;
1255    }
1256    if (end[0] != (query->oa.begin_report_id + 1)) {
1257       DBG("Spurious end report id=%"PRIu32"\n", end[0]);
1258       goto error;
1259    }
1260 
1261    /* On Gfx12+ OA reports are sourced from per context counters, so we don't
1262     * ever have to look at the global OA buffer. Yey \o/
1263     */
1264    if (perf_ctx->devinfo->ver >= 12) {
1265       last = start;
1266       goto end;
1267    }
1268 
1269    /* See if we have any periodic reports to accumulate too... */
1270 
1271    /* N.B. The oa.samples_head was set when the query began and
1272     * pointed to the tail of the perf_ctx->sample_buffers list at
1273     * the time the query started. Since the buffer existed before the
1274     * first MI_REPORT_PERF_COUNT command was emitted we therefore know
1275     * that no data in this particular node's buffer can possibly be
1276     * associated with the query - so skip ahead one...
1277     */
1278    first_samples_node = query->oa.samples_head->next;
1279 
1280    foreach_list_typed_from(struct oa_sample_buf, buf, link,
1281                            &perf_ctx->sample_buffers,
1282                            first_samples_node)
1283    {
1284       int offset = 0;
1285 
1286       while (offset < buf->len) {
1287          const struct intel_perf_record_header *header =
1288             (const struct intel_perf_record_header *)(buf->buf + offset);
1289 
1290          assert(header->size != 0);
1291          assert(header->size <= buf->len);
1292 
1293          offset += header->size;
1294 
1295          switch (header->type) {
1296          case INTEL_PERF_RECORD_TYPE_SAMPLE: {
1297             uint32_t *report = (uint32_t *)(header + 1);
1298             bool report_ctx_match = true;
1299             bool add = true;
1300 
1301             /* Ignore reports that come before the start marker.
1302              * (Note: takes care to allow overflow of 32bit timestamps)
1303              */
1304             if (intel_device_info_timebase_scale(devinfo,
1305                                                report[1] - start[1]) > 5000000000) {
1306                continue;
1307             }
1308 
1309             /* Ignore reports that come after the end marker.
1310              * (Note: takes care to allow overflow of 32bit timestamps)
1311              */
1312             if (intel_device_info_timebase_scale(devinfo,
1313                                                report[1] - end[1]) <= 5000000000) {
1314                goto end;
1315             }
1316 
1317             /* For Gfx8+ since the counters continue while other
1318              * contexts are running we need to discount any unrelated
1319              * deltas. The hardware automatically generates a report
1320              * on context switch which gives us a new reference point
1321              * to continuing adding deltas from.
1322              *
1323              * For Haswell we can rely on the HW to stop the progress
1324              * of OA counters while any other context is acctive.
1325              */
1326             if (devinfo->ver >= 8) {
1327                /* Consider that the current report matches our context only if
1328                 * the report says the report ID is valid.
1329                 */
1330                report_ctx_match = oa_report_ctx_id_valid(devinfo, report) &&
1331                   report[2] == start[2];
1332                if (report_ctx_match)
1333                   out_duration = 0;
1334                else
1335                   out_duration++;
1336 
1337                /* Only add the delta between <last, report> if the last report
1338                 * was clearly identified as our context, or if we have at most
1339                 * 1 report without a matching ID.
1340                 *
1341                 * The OA unit will sometimes label reports with an invalid
1342                 * context ID when i915 rewrites the execlist submit register
1343                 * with the same context as the one currently running. This
1344                 * happens when i915 wants to notify the HW of ringbuffer tail
1345                 * register update. We have to consider this report as part of
1346                 * our context as the 3d pipeline behind the OACS unit is still
1347                 * processing the operations started at the previous execlist
1348                 * submission.
1349                 */
1350                add = last_report_ctx_match && out_duration < 2;
1351             }
1352 
1353             if (add) {
1354                intel_perf_query_result_accumulate(&query->oa.result,
1355                                                 query->queryinfo,
1356                                                 last, report);
1357             } else {
1358                /* We're not adding the delta because we've identified it's not
1359                 * for the context we filter for. We can consider that the
1360                 * query was split.
1361                 */
1362                query->oa.result.query_disjoint = true;
1363             }
1364 
1365             last = report;
1366             last_report_ctx_match = report_ctx_match;
1367 
1368             break;
1369          }
1370 
1371          case INTEL_PERF_RECORD_TYPE_OA_BUFFER_LOST:
1372              DBG("perf: OA error: all reports lost\n");
1373              goto error;
1374          case INTEL_PERF_RECORD_TYPE_OA_REPORT_LOST:
1375              DBG("perf: OA report lost\n");
1376              break;
1377          }
1378       }
1379    }
1380 
1381 end:
1382 
1383    intel_perf_query_result_accumulate(&query->oa.result, query->queryinfo,
1384                                     last, end);
1385 
1386    query->oa.results_accumulated = true;
1387    drop_from_unaccumulated_query_list(perf_ctx, query);
1388    dec_n_users(perf_ctx);
1389 
1390    return;
1391 
1392 error:
1393 
1394    discard_all_queries(perf_ctx);
1395 }
1396 
1397 void
intel_perf_delete_query(struct intel_perf_context * perf_ctx,struct intel_perf_query_object * query)1398 intel_perf_delete_query(struct intel_perf_context *perf_ctx,
1399                         struct intel_perf_query_object *query)
1400 {
1401    struct intel_perf_config *perf_cfg = perf_ctx->perf;
1402 
1403    /* We can assume that the frontend waits for a query to complete
1404     * before ever calling into here, so we don't have to worry about
1405     * deleting an in-flight query object.
1406     */
1407    switch (query->queryinfo->kind) {
1408    case INTEL_PERF_QUERY_TYPE_OA:
1409    case INTEL_PERF_QUERY_TYPE_RAW:
1410       if (query->oa.bo) {
1411          if (!query->oa.results_accumulated) {
1412             drop_from_unaccumulated_query_list(perf_ctx, query);
1413             dec_n_users(perf_ctx);
1414          }
1415 
1416          perf_cfg->vtbl.bo_unreference(query->oa.bo);
1417          query->oa.bo = NULL;
1418       }
1419 
1420       query->oa.results_accumulated = false;
1421       break;
1422 
1423    case INTEL_PERF_QUERY_TYPE_PIPELINE:
1424       if (query->pipeline_stats.bo) {
1425          perf_cfg->vtbl.bo_unreference(query->pipeline_stats.bo);
1426          query->pipeline_stats.bo = NULL;
1427       }
1428       break;
1429 
1430    default:
1431       unreachable("Unknown query type");
1432       break;
1433    }
1434 
1435    /* As an indication that the INTEL_performance_query extension is no
1436     * longer in use, it's a good time to free our cache of sample
1437     * buffers and close any current i915-perf stream.
1438     */
1439    if (--perf_ctx->n_query_instances == 0) {
1440       free_sample_bufs(perf_ctx);
1441       intel_perf_close(perf_ctx, query->queryinfo);
1442    }
1443 
1444    free(query);
1445 }
1446 
1447 static int
get_oa_counter_data(struct intel_perf_context * perf_ctx,struct intel_perf_query_object * query,size_t data_size,uint8_t * data)1448 get_oa_counter_data(struct intel_perf_context *perf_ctx,
1449                     struct intel_perf_query_object *query,
1450                     size_t data_size,
1451                     uint8_t *data)
1452 {
1453    struct intel_perf_config *perf_cfg = perf_ctx->perf;
1454    const struct intel_perf_query_info *queryinfo = query->queryinfo;
1455    int n_counters = queryinfo->n_counters;
1456    int written = 0;
1457 
1458    for (int i = 0; i < n_counters; i++) {
1459       const struct intel_perf_query_counter *counter = &queryinfo->counters[i];
1460       uint64_t *out_uint64;
1461       float *out_float;
1462       size_t counter_size = intel_perf_query_counter_get_size(counter);
1463 
1464       if (counter_size) {
1465          switch (counter->data_type) {
1466          case INTEL_PERF_COUNTER_DATA_TYPE_UINT64:
1467             out_uint64 = (uint64_t *)(data + counter->offset);
1468             *out_uint64 =
1469                counter->oa_counter_read_uint64(perf_cfg, queryinfo,
1470                                                &query->oa.result);
1471             break;
1472          case INTEL_PERF_COUNTER_DATA_TYPE_FLOAT:
1473             out_float = (float *)(data + counter->offset);
1474             *out_float =
1475                counter->oa_counter_read_float(perf_cfg, queryinfo,
1476                                               &query->oa.result);
1477             break;
1478          default:
1479             /* So far we aren't using uint32, double or bool32... */
1480             unreachable("unexpected counter data type");
1481          }
1482 
1483          if (counter->offset + counter_size > written)
1484             written = counter->offset + counter_size;
1485       }
1486    }
1487 
1488    return written;
1489 }
1490 
1491 static int
get_pipeline_stats_data(struct intel_perf_context * perf_ctx,struct intel_perf_query_object * query,size_t data_size,uint8_t * data)1492 get_pipeline_stats_data(struct intel_perf_context *perf_ctx,
1493                         struct intel_perf_query_object *query,
1494                         size_t data_size,
1495                         uint8_t *data)
1496 
1497 {
1498    struct intel_perf_config *perf_cfg = perf_ctx->perf;
1499    const struct intel_perf_query_info *queryinfo = query->queryinfo;
1500    int n_counters = queryinfo->n_counters;
1501    uint8_t *p = data;
1502 
1503    uint64_t *start = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->pipeline_stats.bo, MAP_READ);
1504    uint64_t *end = start + (STATS_BO_END_OFFSET_BYTES / sizeof(uint64_t));
1505 
1506    for (int i = 0; i < n_counters; i++) {
1507       const struct intel_perf_query_counter *counter = &queryinfo->counters[i];
1508       uint64_t value = end[i] - start[i];
1509 
1510       if (counter->pipeline_stat.numerator !=
1511           counter->pipeline_stat.denominator) {
1512          value *= counter->pipeline_stat.numerator;
1513          value /= counter->pipeline_stat.denominator;
1514       }
1515 
1516       *((uint64_t *)p) = value;
1517       p += 8;
1518    }
1519 
1520    perf_cfg->vtbl.bo_unmap(query->pipeline_stats.bo);
1521 
1522    return p - data;
1523 }
1524 
1525 void
intel_perf_get_query_data(struct intel_perf_context * perf_ctx,struct intel_perf_query_object * query,void * current_batch,int data_size,unsigned * data,unsigned * bytes_written)1526 intel_perf_get_query_data(struct intel_perf_context *perf_ctx,
1527                           struct intel_perf_query_object *query,
1528                           void *current_batch,
1529                           int data_size,
1530                           unsigned *data,
1531                           unsigned *bytes_written)
1532 {
1533    struct intel_perf_config *perf_cfg = perf_ctx->perf;
1534    int written = 0;
1535 
1536    switch (query->queryinfo->kind) {
1537    case INTEL_PERF_QUERY_TYPE_OA:
1538    case INTEL_PERF_QUERY_TYPE_RAW:
1539       if (!query->oa.results_accumulated) {
1540          /* Due to the sampling frequency of the OA buffer by the i915-perf
1541           * driver, there can be a 5ms delay between the Mesa seeing the query
1542           * complete and i915 making all the OA buffer reports available to us.
1543           * We need to wait for all the reports to come in before we can do
1544           * the post processing removing unrelated deltas.
1545           * There is a i915-perf series to address this issue, but it's
1546           * not been merged upstream yet.
1547           */
1548          while (!read_oa_samples_for_query(perf_ctx, query, current_batch))
1549             ;
1550 
1551          uint32_t *begin_report = query->oa.map;
1552          uint32_t *end_report = query->oa.map + perf_cfg->query_layout.size;
1553          intel_perf_query_result_accumulate_fields(&query->oa.result,
1554                                                  query->queryinfo,
1555                                                  begin_report,
1556                                                  end_report,
1557                                                  true /* no_oa_accumulate */);
1558          accumulate_oa_reports(perf_ctx, query);
1559          assert(query->oa.results_accumulated);
1560 
1561          perf_cfg->vtbl.bo_unmap(query->oa.bo);
1562          query->oa.map = NULL;
1563       }
1564       if (query->queryinfo->kind == INTEL_PERF_QUERY_TYPE_OA) {
1565          written = get_oa_counter_data(perf_ctx, query, data_size, (uint8_t *)data);
1566       } else {
1567          const struct intel_device_info *devinfo = perf_ctx->devinfo;
1568 
1569          written = intel_perf_query_result_write_mdapi((uint8_t *)data, data_size,
1570                                                      devinfo, query->queryinfo,
1571                                                      &query->oa.result);
1572       }
1573       break;
1574 
1575    case INTEL_PERF_QUERY_TYPE_PIPELINE:
1576       written = get_pipeline_stats_data(perf_ctx, query, data_size, (uint8_t *)data);
1577       break;
1578 
1579    default:
1580       unreachable("Unknown query type");
1581       break;
1582    }
1583 
1584    if (bytes_written)
1585       *bytes_written = written;
1586 }
1587 
1588 void
intel_perf_dump_query_count(struct intel_perf_context * perf_ctx)1589 intel_perf_dump_query_count(struct intel_perf_context *perf_ctx)
1590 {
1591    DBG("Queries: (Open queries = %d, OA users = %d)\n",
1592        perf_ctx->n_active_oa_queries, perf_ctx->n_oa_users);
1593 }
1594 
1595 void
intel_perf_dump_query(struct intel_perf_context * ctx,struct intel_perf_query_object * obj,void * current_batch)1596 intel_perf_dump_query(struct intel_perf_context *ctx,
1597                       struct intel_perf_query_object *obj,
1598                       void *current_batch)
1599 {
1600    switch (obj->queryinfo->kind) {
1601    case INTEL_PERF_QUERY_TYPE_OA:
1602    case INTEL_PERF_QUERY_TYPE_RAW:
1603       DBG("BO: %-4s OA data: %-10s %-15s\n",
1604           obj->oa.bo ? "yes," : "no,",
1605           intel_perf_is_query_ready(ctx, obj, current_batch) ? "ready," : "not ready,",
1606           obj->oa.results_accumulated ? "accumulated" : "not accumulated");
1607       break;
1608    case INTEL_PERF_QUERY_TYPE_PIPELINE:
1609       DBG("BO: %-4s\n",
1610           obj->pipeline_stats.bo ? "yes" : "no");
1611       break;
1612    default:
1613       unreachable("Unknown query type");
1614       break;
1615    }
1616 }
1617