xref: /aosp_15_r20/external/mesa3d/src/gallium/frontends/nine/nine_queue.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright 2016 Patrick Rudolph <[email protected]>
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include "nine_queue.h"
7 #include "util/u_thread.h"
8 #include "util/macros.h"
9 #include "nine_helpers.h"
10 
11 #define NINE_CMD_BUF_INSTR (256)
12 
13 #define NINE_CMD_BUFS (32)
14 #define NINE_CMD_BUFS_MASK (NINE_CMD_BUFS - 1)
15 
16 #define NINE_QUEUE_SIZE (8192 * 16 + 128)
17 
18 #define DBG_CHANNEL DBG_DEVICE
19 
20 /*
21  * Single producer - single consumer pool queue
22  *
23  * Producer:
24  * Calls nine_queue_alloc to get a slice of memory in current cmdbuf.
25  * Calls nine_queue_flush to flush the queue by request.
26  * The queue is flushed automatically on insufficient space or once the
27  * cmdbuf contains NINE_CMD_BUF_INSTR instructions.
28  *
29  * nine_queue_flush does block, while nine_queue_alloc doesn't block.
30  *
31  * nine_queue_alloc returns NULL on insufficient space.
32  *
33  * Consumer:
34  * Calls nine_queue_wait_flush to wait for a cmdbuf.
35  * After waiting for a cmdbuf it calls nine_queue_get until NULL is returned.
36  *
37  * nine_queue_wait_flush does block, while nine_queue_get doesn't block.
38  *
39  * Constrains:
40  * Only a single consumer and a single producer are supported.
41  *
42  */
43 
44 struct nine_cmdbuf {
45     unsigned instr_size[NINE_CMD_BUF_INSTR];
46     unsigned num_instr;
47     unsigned offset;
48     void *mem_pool;
49     BOOL full;
50 };
51 
52 struct nine_queue_pool {
53     struct nine_cmdbuf pool[NINE_CMD_BUFS];
54     unsigned head;
55     unsigned tail;
56     unsigned cur_instr;
57     BOOL worker_wait;
58     cnd_t event_pop;
59     cnd_t event_push;
60     mtx_t mutex_pop;
61     mtx_t mutex_push;
62 };
63 
64 /* Consumer functions: */
65 void
nine_queue_wait_flush(struct nine_queue_pool * ctx)66 nine_queue_wait_flush(struct nine_queue_pool* ctx)
67 {
68     struct nine_cmdbuf *cmdbuf = &ctx->pool[ctx->tail];
69 
70     /* wait for cmdbuf full */
71     mtx_lock(&ctx->mutex_push);
72     while (!cmdbuf->full)
73     {
74         DBG("waiting for full cmdbuf\n");
75         cnd_wait(&ctx->event_push, &ctx->mutex_push);
76     }
77     DBG("got cmdbuf=%p\n", cmdbuf);
78     mtx_unlock(&ctx->mutex_push);
79 
80     cmdbuf->offset = 0;
81     ctx->cur_instr = 0;
82 }
83 
84 /* Gets a pointer to the next memory slice.
85  * Does not block.
86  * Returns NULL on empty cmdbuf. */
87 void *
nine_queue_get(struct nine_queue_pool * ctx)88 nine_queue_get(struct nine_queue_pool* ctx)
89 {
90     struct nine_cmdbuf *cmdbuf = &ctx->pool[ctx->tail];
91     unsigned offset;
92 
93     /* At this pointer there's always a cmdbuf. */
94 
95     if (ctx->cur_instr == cmdbuf->num_instr) {
96         /* signal waiting producer */
97         mtx_lock(&ctx->mutex_pop);
98         DBG("freeing cmdbuf=%p\n", cmdbuf);
99         cmdbuf->full = 0;
100         cnd_signal(&ctx->event_pop);
101         mtx_unlock(&ctx->mutex_pop);
102 
103         ctx->tail = (ctx->tail + 1) & NINE_CMD_BUFS_MASK;
104 
105         return NULL;
106     }
107 
108     /* At this pointer there's always a cmdbuf with instruction to process. */
109     offset = cmdbuf->offset;
110     cmdbuf->offset += cmdbuf->instr_size[ctx->cur_instr];
111     ctx->cur_instr ++;
112 
113     return cmdbuf->mem_pool + offset;
114 }
115 
116 /* Producer functions: */
117 
118 /* Flushes the queue.
119  * Moves the current cmdbuf to worker thread.
120  * Blocks until next cmdbuf is free. */
121 void
nine_queue_flush(struct nine_queue_pool * ctx)122 nine_queue_flush(struct nine_queue_pool* ctx)
123 {
124     struct nine_cmdbuf *cmdbuf = &ctx->pool[ctx->head];
125 
126     DBG("flushing cmdbuf=%p instr=%d size=%d\n",
127            cmdbuf, cmdbuf->num_instr, cmdbuf->offset);
128 
129     /* Nothing to flush */
130     if (!cmdbuf->num_instr)
131         return;
132 
133     /* signal waiting worker */
134     mtx_lock(&ctx->mutex_push);
135     cmdbuf->full = 1;
136     cnd_signal(&ctx->event_push);
137     mtx_unlock(&ctx->mutex_push);
138 
139     ctx->head = (ctx->head + 1) & NINE_CMD_BUFS_MASK;
140 
141     cmdbuf = &ctx->pool[ctx->head];
142 
143     /* wait for queue empty */
144     mtx_lock(&ctx->mutex_pop);
145     while (cmdbuf->full)
146     {
147         DBG("waiting for empty cmdbuf\n");
148         cnd_wait(&ctx->event_pop, &ctx->mutex_pop);
149     }
150     DBG("got empty cmdbuf=%p\n", cmdbuf);
151     mtx_unlock(&ctx->mutex_pop);
152     cmdbuf->offset = 0;
153     cmdbuf->num_instr = 0;
154 }
155 
156 /* Gets a a pointer to slice of memory with size @space.
157  * Does block if queue is full.
158  * Returns NULL on @space > NINE_QUEUE_SIZE. */
159 void *
nine_queue_alloc(struct nine_queue_pool * ctx,unsigned space)160 nine_queue_alloc(struct nine_queue_pool* ctx, unsigned space)
161 {
162     unsigned offset;
163     struct nine_cmdbuf *cmdbuf = &ctx->pool[ctx->head];
164 
165     if (space > NINE_QUEUE_SIZE)
166         return NULL;
167 
168     /* at this pointer there's always a free queue available */
169 
170     if ((cmdbuf->offset + space > NINE_QUEUE_SIZE) ||
171         (cmdbuf->num_instr == NINE_CMD_BUF_INSTR)) {
172 
173         nine_queue_flush(ctx);
174 
175         cmdbuf = &ctx->pool[ctx->head];
176     }
177 
178     DBG("cmdbuf=%p space=%d\n", cmdbuf, space);
179 
180     /* at this pointer there's always a free queue with sufficient space available */
181 
182     offset = cmdbuf->offset;
183     cmdbuf->offset += space;
184     cmdbuf->instr_size[cmdbuf->num_instr] = space;
185     cmdbuf->num_instr ++;
186 
187     return cmdbuf->mem_pool + offset;
188 }
189 
190 /* Returns the current queue flush state.
191  * TRUE nothing flushed
192  * FALSE one or more instructions queued flushed. */
193 bool
nine_queue_no_flushed_work(struct nine_queue_pool * ctx)194 nine_queue_no_flushed_work(struct nine_queue_pool* ctx)
195 {
196     return (ctx->tail == ctx->head);
197 }
198 
199 /* Returns the current queue empty state.
200  * TRUE no instructions queued.
201  * FALSE one or more instructions queued. */
202 bool
nine_queue_isempty(struct nine_queue_pool * ctx)203 nine_queue_isempty(struct nine_queue_pool* ctx)
204 {
205     struct nine_cmdbuf *cmdbuf = &ctx->pool[ctx->head];
206 
207     return (ctx->tail == ctx->head) && !cmdbuf->num_instr;
208 }
209 
210 struct nine_queue_pool*
nine_queue_create(void)211 nine_queue_create(void)
212 {
213     unsigned i;
214     struct nine_queue_pool *ctx;
215 
216     ctx = CALLOC_STRUCT(nine_queue_pool);
217     if (!ctx)
218         goto failed;
219 
220     for (i = 0; i < NINE_CMD_BUFS; i++) {
221         ctx->pool[i].mem_pool = MALLOC(NINE_QUEUE_SIZE);
222         if (!ctx->pool[i].mem_pool)
223             goto failed;
224     }
225 
226     cnd_init(&ctx->event_pop);
227     (void) mtx_init(&ctx->mutex_pop, mtx_plain);
228 
229     cnd_init(&ctx->event_push);
230     (void) mtx_init(&ctx->mutex_push, mtx_plain);
231 
232     /* Block until first cmdbuf has been flushed. */
233     ctx->worker_wait = true;
234 
235     return ctx;
236 failed:
237     if (ctx) {
238         for (i = 0; i < NINE_CMD_BUFS; i++) {
239             if (ctx->pool[i].mem_pool)
240                 FREE(ctx->pool[i].mem_pool);
241         }
242         FREE(ctx);
243     }
244     return NULL;
245 }
246 
247 void
nine_queue_delete(struct nine_queue_pool * ctx)248 nine_queue_delete(struct nine_queue_pool *ctx)
249 {
250     unsigned i;
251 
252     mtx_destroy(&ctx->mutex_pop);
253     cnd_destroy(&ctx->event_pop);
254 
255     mtx_destroy(&ctx->mutex_push);
256     cnd_destroy(&ctx->event_push);
257 
258     for (i = 0; i < NINE_CMD_BUFS; i++)
259         FREE(ctx->pool[i].mem_pool);
260 
261     FREE(ctx);
262 }
263