xref: /aosp_15_r20/external/mesa3d/src/freedreno/decode/buffers.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2012 Rob Clark <[email protected]>
3  * SPDX-License-Identifier: MIT
4  */
5 
6 /*
7  * Helper lib to track gpu buffers contents/address, and map between gpu and
8  * host address while decoding cmdstream/crashdumps
9  */
10 
11 #include <assert.h>
12 #include <stdlib.h>
13 #include <string.h>
14 
15 #include "util/rb_tree.h"
16 #include "buffers.h"
17 
18 struct buffer {
19    struct rb_node node;
20    void *hostptr;
21    unsigned int len;
22    uint64_t gpuaddr;
23 
24    /* for 'once' mode, for buffers containing cmdstream keep track per offset
25     * into buffer of which modes it has already been dumped;
26     */
27    struct {
28       unsigned offset;
29       unsigned dumped_mask;
30    } offsets[256];
31    unsigned noffsets;
32 };
33 
34 static struct rb_tree buffers;
35 
36 static int
buffer_insert_cmp(const struct rb_node * n1,const struct rb_node * n2)37 buffer_insert_cmp(const struct rb_node *n1, const struct rb_node *n2)
38 {
39    const struct buffer *buf1 = (const struct buffer *)n1;
40    const struct buffer *buf2 = (const struct buffer *)n2;
41    /* Note that gpuaddr comparisions can overflow an int: */
42    if (buf1->gpuaddr > buf2->gpuaddr)
43       return 1;
44    else if (buf1->gpuaddr < buf2->gpuaddr)
45       return -1;
46    return 0;
47 }
48 
49 static int
buffer_search_cmp(const struct rb_node * node,const void * addrptr)50 buffer_search_cmp(const struct rb_node *node, const void *addrptr)
51 {
52    const struct buffer *buf = (const struct buffer *)node;
53    uint64_t gpuaddr = *(uint64_t *)addrptr;
54    if (buf->gpuaddr + buf->len <= gpuaddr)
55       return -1;
56    else if (buf->gpuaddr > gpuaddr)
57       return 1;
58    return 0;
59 }
60 
61 static struct buffer *
get_buffer(uint64_t gpuaddr)62 get_buffer(uint64_t gpuaddr)
63 {
64    if (gpuaddr == 0)
65       return NULL;
66    return (struct buffer *)rb_tree_search(&buffers, &gpuaddr,
67                                           buffer_search_cmp);
68 }
69 
70 static int
buffer_contains_hostptr(struct buffer * buf,void * hostptr)71 buffer_contains_hostptr(struct buffer *buf, void *hostptr)
72 {
73    return (buf->hostptr <= hostptr) && (hostptr < (buf->hostptr + buf->len));
74 }
75 
76 uint64_t
gpuaddr(void * hostptr)77 gpuaddr(void *hostptr)
78 {
79    rb_tree_foreach (struct buffer, buf, &buffers, node) {
80       if (buffer_contains_hostptr(buf, hostptr))
81          return buf->gpuaddr + (hostptr - buf->hostptr);
82    }
83    return 0;
84 }
85 
86 uint64_t
gpubaseaddr(uint64_t gpuaddr)87 gpubaseaddr(uint64_t gpuaddr)
88 {
89    struct buffer *buf = get_buffer(gpuaddr);
90    if (buf)
91       return buf->gpuaddr;
92    else
93       return 0;
94 }
95 
96 void *
hostptr(uint64_t gpuaddr)97 hostptr(uint64_t gpuaddr)
98 {
99    struct buffer *buf = get_buffer(gpuaddr);
100    if (buf)
101       return buf->hostptr + (gpuaddr - buf->gpuaddr);
102    else
103       return 0;
104 }
105 
106 unsigned
hostlen(uint64_t gpuaddr)107 hostlen(uint64_t gpuaddr)
108 {
109    struct buffer *buf = get_buffer(gpuaddr);
110    if (buf)
111       return buf->len + buf->gpuaddr - gpuaddr;
112    else
113       return 0;
114 }
115 
116 bool
has_dumped(uint64_t gpuaddr,unsigned enable_mask)117 has_dumped(uint64_t gpuaddr, unsigned enable_mask)
118 {
119    if (!gpuaddr)
120       return false;
121 
122    struct buffer *b = get_buffer(gpuaddr);
123    if (!b)
124       return false;
125 
126    assert(gpuaddr >= b->gpuaddr);
127    unsigned offset = gpuaddr - b->gpuaddr;
128 
129    unsigned n = 0;
130    while (n < b->noffsets) {
131       if (offset == b->offsets[n].offset)
132          break;
133       n++;
134    }
135 
136    /* if needed, allocate a new offset entry: */
137    if (n == b->noffsets) {
138       b->noffsets++;
139       assert(b->noffsets < ARRAY_SIZE(b->offsets));
140       b->offsets[n].dumped_mask = 0;
141       b->offsets[n].offset = offset;
142    }
143 
144    if ((b->offsets[n].dumped_mask & enable_mask) == enable_mask)
145       return true;
146 
147    b->offsets[n].dumped_mask |= enable_mask;
148 
149    return false;
150 }
151 
152 void
reset_buffers(void)153 reset_buffers(void)
154 {
155    rb_tree_foreach_safe (struct buffer, buf, &buffers, node) {
156       rb_tree_remove(&buffers, &buf->node);
157       free(buf->hostptr);
158       free(buf);
159    }
160 }
161 
162 /**
163  * Record buffer contents, takes ownership of hostptr (freed in
164  * reset_buffers())
165  */
166 void
add_buffer(uint64_t gpuaddr,unsigned int len,void * hostptr)167 add_buffer(uint64_t gpuaddr, unsigned int len, void *hostptr)
168 {
169    struct buffer *buf = get_buffer(gpuaddr);
170 
171    if (!buf) {
172       buf = calloc(sizeof(struct buffer), 1);
173       buf->gpuaddr = gpuaddr;
174       rb_tree_insert(&buffers, &buf->node, buffer_insert_cmp);
175    }
176 
177    /* We can end up in scenarios where we capture parts of a buffer that
178     * has been suballocated from twice, once as a dumped buffer and once
179     * as a cmd.. possibly the kernel should get more clever about this,
180     * but we need to tolerate it:
181     */
182    if (buf->gpuaddr != gpuaddr) {
183       assert(gpuaddr > buf->gpuaddr);
184       assert((gpuaddr + len) <= (buf->gpuaddr + buf->len));
185 
186       void *ptr = ((uint8_t *)buf->hostptr) + (gpuaddr - buf->gpuaddr);
187       assert(!memcmp(ptr, hostptr, len));
188 
189       return;
190    }
191 
192    buf->hostptr = hostptr;
193    buf->len = len;
194 }
195