1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * Copyright 2010 VMware, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 /* Authors:
30 * Keith Whitwell, Qicheng Christopher Li, Brian Paul
31 */
32
33 #include "draw/draw_context.h"
34 #include "pipe/p_defines.h"
35 #include "util/u_memory.h"
36 #include "util/os_time.h"
37 #include "lp_context.h"
38 #include "lp_flush.h"
39 #include "lp_fence.h"
40 #include "lp_query.h"
41 #include "lp_screen.h"
42 #include "lp_state.h"
43 #include "lp_rast.h"
44
45
46 static struct llvmpipe_query *
llvmpipe_query(struct pipe_query * p)47 llvmpipe_query(struct pipe_query *p)
48 {
49 return (struct llvmpipe_query *) p;
50 }
51
52
53 static struct pipe_query *
llvmpipe_create_query(struct pipe_context * pipe,unsigned type,unsigned index)54 llvmpipe_create_query(struct pipe_context *pipe,
55 unsigned type,
56 unsigned index)
57 {
58 assert(type < PIPE_QUERY_TYPES);
59
60 struct llvmpipe_query *pq = CALLOC_STRUCT(llvmpipe_query);
61 if (pq) {
62 pq->type = type;
63 pq->index = index;
64 }
65
66 return (struct pipe_query *) pq;
67 }
68
69
70 static void
llvmpipe_destroy_query(struct pipe_context * pipe,struct pipe_query * q)71 llvmpipe_destroy_query(struct pipe_context *pipe, struct pipe_query *q)
72 {
73 struct llvmpipe_query *pq = llvmpipe_query(q);
74
75 /* Ideally we would refcount queries & not get destroyed until the
76 * last scene had finished with us.
77 */
78 if (pq->fence) {
79 if (!lp_fence_issued(pq->fence))
80 llvmpipe_flush(pipe, NULL, __func__);
81
82 if (!lp_fence_signalled(pq->fence))
83 lp_fence_wait(pq->fence);
84
85 lp_fence_reference(&pq->fence, NULL);
86 }
87
88 FREE(pq);
89 }
90
91
92 static bool
llvmpipe_get_query_result(struct pipe_context * pipe,struct pipe_query * q,bool wait,union pipe_query_result * result)93 llvmpipe_get_query_result(struct pipe_context *pipe,
94 struct pipe_query *q,
95 bool wait,
96 union pipe_query_result *result)
97 {
98 const struct llvmpipe_screen *screen = llvmpipe_screen(pipe->screen);
99 const unsigned num_threads = MAX2(1, screen->num_threads);
100 struct llvmpipe_query *pq = llvmpipe_query(q);
101
102 if (pq->fence) {
103 /* only have a fence if there was a scene */
104 if (!lp_fence_signalled(pq->fence)) {
105 if (!lp_fence_issued(pq->fence))
106 llvmpipe_flush(pipe, NULL, __func__);
107
108 if (!wait)
109 return false;
110
111 lp_fence_wait(pq->fence);
112 }
113 }
114
115 /* Always initialize the first 64-bit result word to zero since some
116 * callers don't consider whether the result is actually a 1-byte or 4-byte
117 * quantity.
118 */
119 result->u64 = 0;
120
121 /* Combine the per-thread results */
122 switch (pq->type) {
123 case PIPE_QUERY_OCCLUSION_COUNTER:
124 {
125 uint64_t sum = 0;
126 for (unsigned i = 0; i < num_threads; i++) {
127 sum += pq->end[i];
128 }
129 result->u64 = sum;
130 }
131 break;
132 case PIPE_QUERY_OCCLUSION_PREDICATE:
133 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
134 result->b = false;
135 for (unsigned i = 0; i < num_threads; i++) {
136 /* safer (still not guaranteed) when there's an overflow */
137 if (pq->end[i] > 0) {
138 result->b = true;
139 break;
140 }
141 }
142 break;
143 case PIPE_QUERY_TIMESTAMP:
144 {
145 uint64_t max_time = 0;
146 for (unsigned i = 0; i < num_threads; i++) {
147 max_time = MAX2(max_time, pq->end[i]);
148 }
149 result->u64 = max_time;
150 }
151 break;
152 case PIPE_QUERY_TIME_ELAPSED:
153 {
154 uint64_t start = UINT64_MAX, end = 0;
155 for (unsigned i = 0; i < num_threads; i++) {
156 if (pq->start[i]) {
157 start = MIN2(start, pq->start[i]);
158 }
159 if (pq->end[i]) {
160 end = MAX2(end, pq->end[i]);
161 }
162 }
163 result->u64 = end - start;
164 }
165 break;
166 case PIPE_QUERY_TIMESTAMP_DISJOINT:
167 /* os_get_time_nano return nanoseconds */
168 result->timestamp_disjoint.frequency = UINT64_C(1000000000);
169 result->timestamp_disjoint.disjoint = false;
170 break;
171 case PIPE_QUERY_GPU_FINISHED:
172 result->b = true;
173 break;
174 case PIPE_QUERY_PRIMITIVES_GENERATED:
175 result->u64 = pq->num_primitives_generated[0];
176 break;
177 case PIPE_QUERY_PRIMITIVES_EMITTED:
178 result->u64 = pq->num_primitives_written[0];
179 break;
180 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
181 result->b = false;
182 for (unsigned s = 0; s < PIPE_MAX_VERTEX_STREAMS; s++) {
183 if (pq->num_primitives_generated[s] > pq->num_primitives_written[s]) {
184 result->b = true;
185 break;
186 }
187 }
188 break;
189 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
190 result->b = pq->num_primitives_generated[0] > pq->num_primitives_written[0];
191 break;
192 case PIPE_QUERY_SO_STATISTICS:
193 result->so_statistics.num_primitives_written = pq->num_primitives_written[0];
194 result->so_statistics.primitives_storage_needed = pq->num_primitives_generated[0];
195 break;
196 case PIPE_QUERY_PIPELINE_STATISTICS:
197 {
198 /* only ps_invocations are per-bin/thread */
199 uint64_t sum = 0;
200 for (unsigned i = 0; i < num_threads; i++) {
201 sum += pq->end[i];
202 }
203 pq->stats.ps_invocations = sum;
204 result->pipeline_statistics = pq->stats;
205 }
206 break;
207 default:
208 assert(0);
209 break;
210 }
211
212 return true;
213 }
214
215
216 static void
llvmpipe_get_query_result_resource(struct pipe_context * pipe,struct pipe_query * q,enum pipe_query_flags flags,enum pipe_query_value_type result_type,int index,struct pipe_resource * resource,unsigned offset)217 llvmpipe_get_query_result_resource(struct pipe_context *pipe,
218 struct pipe_query *q,
219 enum pipe_query_flags flags,
220 enum pipe_query_value_type result_type,
221 int index,
222 struct pipe_resource *resource,
223 unsigned offset)
224 {
225 const struct llvmpipe_screen *screen = llvmpipe_screen(pipe->screen);
226 const unsigned num_threads = MAX2(1, screen->num_threads);
227 const struct llvmpipe_query *pq = llvmpipe_query(q);
228 const struct llvmpipe_resource *lpr = llvmpipe_resource(resource);
229 uint64_t ready;
230
231 if (pq->fence) {
232 /* only have a fence if there was a scene */
233 if (!lp_fence_signalled(pq->fence)) {
234 if (!lp_fence_issued(pq->fence))
235 llvmpipe_flush(pipe, NULL, __func__);
236
237 if (flags & PIPE_QUERY_WAIT)
238 lp_fence_wait(pq->fence);
239 }
240 ready = lp_fence_signalled(pq->fence);
241 } else {
242 ready = 1;
243 }
244
245 uint64_t value = 0, value2 = 0;
246 unsigned num_values = 1;
247 if (index == -1) {
248 value = ready;
249 } else {
250 /* don't write a value if fence hasn't signalled and partial isn't set */
251 if (!ready && !(flags & PIPE_QUERY_PARTIAL))
252 return;
253
254 switch (pq->type) {
255 case PIPE_QUERY_OCCLUSION_COUNTER:
256 for (unsigned i = 0; i < num_threads; i++) {
257 value += pq->end[i];
258 }
259 break;
260 case PIPE_QUERY_OCCLUSION_PREDICATE:
261 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
262 for (unsigned i = 0; i < num_threads; i++) {
263 /* safer (still not guaranteed) when there's an overflow */
264 value = value || pq->end[i];
265 }
266 break;
267 case PIPE_QUERY_PRIMITIVES_GENERATED:
268 value = pq->num_primitives_generated[0];
269 break;
270 case PIPE_QUERY_PRIMITIVES_EMITTED:
271 value = pq->num_primitives_written[0];
272 break;
273 case PIPE_QUERY_TIMESTAMP:
274 for (unsigned i = 0; i < num_threads; i++) {
275 if (pq->end[i] > value) {
276 value = pq->end[i];
277 }
278 }
279 break;
280 case PIPE_QUERY_TIME_ELAPSED: {
281 uint64_t start = (uint64_t)-1, end = 0;
282 for (unsigned i = 0; i < num_threads; i++) {
283 if (pq->start[i] && pq->start[i] < start)
284 start = pq->start[i];
285 if (pq->end[i] && pq->end[i] > end)
286 end = pq->end[i];
287 }
288 value = end - start;
289 break;
290 }
291 case PIPE_QUERY_SO_STATISTICS:
292 value = pq->num_primitives_written[0];
293 value2 = pq->num_primitives_generated[0];
294 num_values = 2;
295 break;
296 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
297 value = 0;
298 for (unsigned s = 0; s < PIPE_MAX_VERTEX_STREAMS; s++)
299 value |= (pq->num_primitives_generated[s] > pq->num_primitives_written[s]);
300 break;
301 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
302 value = (pq->num_primitives_generated[0] > pq->num_primitives_written[0]);
303 break;
304 case PIPE_QUERY_PIPELINE_STATISTICS:
305 switch ((enum pipe_statistics_query_index)index) {
306 case PIPE_STAT_QUERY_IA_VERTICES:
307 value = pq->stats.ia_vertices;
308 break;
309 case PIPE_STAT_QUERY_IA_PRIMITIVES:
310 value = pq->stats.ia_primitives;
311 break;
312 case PIPE_STAT_QUERY_VS_INVOCATIONS:
313 value = pq->stats.vs_invocations;
314 break;
315 case PIPE_STAT_QUERY_GS_INVOCATIONS:
316 value = pq->stats.gs_invocations;
317 break;
318 case PIPE_STAT_QUERY_GS_PRIMITIVES:
319 value = pq->stats.gs_primitives;
320 break;
321 case PIPE_STAT_QUERY_C_INVOCATIONS:
322 value = pq->stats.c_invocations;
323 break;
324 case PIPE_STAT_QUERY_C_PRIMITIVES:
325 value = pq->stats.c_primitives;
326 break;
327 case PIPE_STAT_QUERY_PS_INVOCATIONS:
328 value = 0;
329 for (unsigned i = 0; i < num_threads; i++) {
330 value += pq->end[i];
331 }
332 break;
333 case PIPE_STAT_QUERY_HS_INVOCATIONS:
334 value = pq->stats.hs_invocations;
335 break;
336 case PIPE_STAT_QUERY_DS_INVOCATIONS:
337 value = pq->stats.ds_invocations;
338 break;
339 case PIPE_STAT_QUERY_CS_INVOCATIONS:
340 value = pq->stats.cs_invocations;
341 break;
342 case PIPE_STAT_QUERY_TS_INVOCATIONS:
343 value = pq->stats.ts_invocations;
344 break;
345 case PIPE_STAT_QUERY_MS_INVOCATIONS:
346 value = pq->stats.ms_invocations;
347 break;
348 }
349 break;
350 default:
351 fprintf(stderr, "Unknown query type %d\n", pq->type);
352 break;
353 }
354 }
355
356 uint8_t *dst = (uint8_t *) lpr->data + offset;
357
358 /* Write 1 or 2 result values */
359 for (unsigned i = 0; i < num_values; i++) {
360 if (i == 1) {
361 value = value2;
362 // advance dst pointer by 4 or 8 bytes
363 dst += (result_type == PIPE_QUERY_TYPE_I64 ||
364 result_type == PIPE_QUERY_TYPE_U64) ? 8 : 4;
365 }
366 switch (result_type) {
367 case PIPE_QUERY_TYPE_I32: {
368 int32_t *iptr = (int32_t *)dst;
369 *iptr = (int32_t) (value & INT32_MAX);
370 break;
371 }
372 case PIPE_QUERY_TYPE_U32: {
373 uint32_t *uptr = (uint32_t *)dst;
374 *uptr = (uint32_t) (value & UINT32_MAX);
375 break;
376 }
377 case PIPE_QUERY_TYPE_I64: {
378 int64_t *iptr = (int64_t *)dst;
379 *iptr = (int64_t)value;
380 break;
381 }
382 case PIPE_QUERY_TYPE_U64: {
383 uint64_t *uptr = (uint64_t *)dst;
384 *uptr = (uint64_t)value;
385 break;
386 }
387 }
388 }
389 }
390
391
392 static bool
llvmpipe_begin_query(struct pipe_context * pipe,struct pipe_query * q)393 llvmpipe_begin_query(struct pipe_context *pipe, struct pipe_query *q)
394 {
395 struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
396 struct llvmpipe_query *pq = llvmpipe_query(q);
397
398 /* Check if the query is already in the scene. If so, we need to
399 * flush the scene now. Real apps shouldn't re-use a query in a
400 * frame of rendering.
401 */
402 if (pq->fence && !lp_fence_issued(pq->fence)) {
403 llvmpipe_finish(pipe, __func__);
404 }
405
406 memset(pq->start, 0, sizeof(pq->start));
407 memset(pq->end, 0, sizeof(pq->end));
408 lp_setup_begin_query(llvmpipe->setup, pq);
409
410 switch (pq->type) {
411 case PIPE_QUERY_PRIMITIVES_EMITTED:
412 pq->num_primitives_written[0] = llvmpipe->so_stats[pq->index].num_primitives_written;
413 break;
414 case PIPE_QUERY_PRIMITIVES_GENERATED:
415 pq->num_primitives_generated[0] = llvmpipe->so_stats[pq->index].primitives_storage_needed;
416 llvmpipe->active_primgen_queries++;
417 break;
418 case PIPE_QUERY_SO_STATISTICS:
419 pq->num_primitives_written[0] = llvmpipe->so_stats[pq->index].num_primitives_written;
420 pq->num_primitives_generated[0] = llvmpipe->so_stats[pq->index].primitives_storage_needed;
421 break;
422 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
423 for (unsigned s = 0; s < PIPE_MAX_VERTEX_STREAMS; s++) {
424 pq->num_primitives_written[s] = llvmpipe->so_stats[s].num_primitives_written;
425 pq->num_primitives_generated[s] = llvmpipe->so_stats[s].primitives_storage_needed;
426 }
427 break;
428 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
429 pq->num_primitives_written[0] = llvmpipe->so_stats[pq->index].num_primitives_written;
430 pq->num_primitives_generated[0] = llvmpipe->so_stats[pq->index].primitives_storage_needed;
431 break;
432 case PIPE_QUERY_PIPELINE_STATISTICS:
433 /* reset our cache */
434 if (llvmpipe->active_statistics_queries == 0) {
435 memset(&llvmpipe->pipeline_statistics, 0,
436 sizeof(llvmpipe->pipeline_statistics));
437 }
438 memcpy(&pq->stats, &llvmpipe->pipeline_statistics, sizeof(pq->stats));
439 llvmpipe->active_statistics_queries++;
440 break;
441 case PIPE_QUERY_OCCLUSION_COUNTER:
442 case PIPE_QUERY_OCCLUSION_PREDICATE:
443 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
444 llvmpipe->active_occlusion_queries++;
445 llvmpipe->dirty |= LP_NEW_OCCLUSION_QUERY;
446 break;
447 default:
448 break;
449 }
450 return true;
451 }
452
453
454 static bool
llvmpipe_end_query(struct pipe_context * pipe,struct pipe_query * q)455 llvmpipe_end_query(struct pipe_context *pipe, struct pipe_query *q)
456 {
457 struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
458 struct llvmpipe_query *pq = llvmpipe_query(q);
459
460 lp_setup_end_query(llvmpipe->setup, pq);
461
462 switch (pq->type) {
463
464 case PIPE_QUERY_PRIMITIVES_EMITTED:
465 pq->num_primitives_written[0] =
466 llvmpipe->so_stats[pq->index].num_primitives_written - pq->num_primitives_written[0];
467 break;
468 case PIPE_QUERY_PRIMITIVES_GENERATED:
469 assert(llvmpipe->active_primgen_queries);
470 llvmpipe->active_primgen_queries--;
471 pq->num_primitives_generated[0] =
472 llvmpipe->so_stats[pq->index].primitives_storage_needed - pq->num_primitives_generated[0];
473 break;
474 case PIPE_QUERY_SO_STATISTICS:
475 pq->num_primitives_written[0] =
476 llvmpipe->so_stats[pq->index].num_primitives_written - pq->num_primitives_written[0];
477 pq->num_primitives_generated[0] =
478 llvmpipe->so_stats[pq->index].primitives_storage_needed - pq->num_primitives_generated[0];
479 break;
480 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
481 for (unsigned s = 0; s < PIPE_MAX_VERTEX_STREAMS; s++) {
482 pq->num_primitives_written[s] =
483 llvmpipe->so_stats[s].num_primitives_written - pq->num_primitives_written[s];
484 pq->num_primitives_generated[s] =
485 llvmpipe->so_stats[s].primitives_storage_needed - pq->num_primitives_generated[s];
486 }
487 break;
488 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
489 pq->num_primitives_written[0] =
490 llvmpipe->so_stats[pq->index].num_primitives_written - pq->num_primitives_written[0];
491 pq->num_primitives_generated[0] =
492 llvmpipe->so_stats[pq->index].primitives_storage_needed - pq->num_primitives_generated[0];
493 break;
494 case PIPE_QUERY_PIPELINE_STATISTICS:
495 pq->stats.ia_vertices =
496 llvmpipe->pipeline_statistics.ia_vertices - pq->stats.ia_vertices;
497 pq->stats.ia_primitives =
498 llvmpipe->pipeline_statistics.ia_primitives - pq->stats.ia_primitives;
499 pq->stats.vs_invocations =
500 llvmpipe->pipeline_statistics.vs_invocations - pq->stats.vs_invocations;
501 pq->stats.gs_invocations =
502 llvmpipe->pipeline_statistics.gs_invocations - pq->stats.gs_invocations;
503 pq->stats.gs_primitives =
504 llvmpipe->pipeline_statistics.gs_primitives - pq->stats.gs_primitives;
505 pq->stats.c_invocations =
506 llvmpipe->pipeline_statistics.c_invocations - pq->stats.c_invocations;
507 pq->stats.c_primitives =
508 llvmpipe->pipeline_statistics.c_primitives - pq->stats.c_primitives;
509 pq->stats.ps_invocations =
510 llvmpipe->pipeline_statistics.ps_invocations - pq->stats.ps_invocations;
511 pq->stats.cs_invocations =
512 llvmpipe->pipeline_statistics.cs_invocations - pq->stats.cs_invocations;
513 pq->stats.hs_invocations =
514 llvmpipe->pipeline_statistics.hs_invocations - pq->stats.hs_invocations;
515 pq->stats.ds_invocations =
516 llvmpipe->pipeline_statistics.ds_invocations - pq->stats.ds_invocations;
517 pq->stats.ts_invocations =
518 llvmpipe->pipeline_statistics.ts_invocations - pq->stats.ts_invocations;
519 pq->stats.ms_invocations =
520 llvmpipe->pipeline_statistics.ms_invocations - pq->stats.ms_invocations;
521 llvmpipe->active_statistics_queries--;
522 break;
523 case PIPE_QUERY_OCCLUSION_COUNTER:
524 case PIPE_QUERY_OCCLUSION_PREDICATE:
525 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
526 assert(llvmpipe->active_occlusion_queries);
527 llvmpipe->active_occlusion_queries--;
528 llvmpipe->dirty |= LP_NEW_OCCLUSION_QUERY;
529 break;
530 default:
531 break;
532 }
533
534 return true;
535 }
536
537
538 bool
llvmpipe_check_render_cond(struct llvmpipe_context * lp)539 llvmpipe_check_render_cond(struct llvmpipe_context *lp)
540 {
541 struct pipe_context *pipe = &lp->pipe;
542
543 if (lp->render_cond_buffer) {
544 uint32_t data = *(uint32_t *)((char *)lp->render_cond_buffer->data
545 + lp->render_cond_offset);
546 return (!data) == lp->render_cond_cond;
547 }
548 if (!lp->render_cond_query)
549 return true; /* no query predicate, draw normally */
550
551 bool wait = (lp->render_cond_mode == PIPE_RENDER_COND_WAIT ||
552 lp->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT);
553
554 uint64_t result;
555 bool b = pipe->get_query_result(pipe, lp->render_cond_query, wait,
556 (void*)&result);
557 if (b)
558 return ((!result) == lp->render_cond_cond);
559 else
560 return true;
561 }
562
563
564 static void
llvmpipe_set_active_query_state(struct pipe_context * pipe,bool enable)565 llvmpipe_set_active_query_state(struct pipe_context *pipe, bool enable)
566 {
567 struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
568
569 llvmpipe->queries_disabled = !enable;
570 /* for OQs we need to regenerate the fragment shader */
571 llvmpipe->dirty |= LP_NEW_OCCLUSION_QUERY;
572 }
573
574
575 void
llvmpipe_init_query_funcs(struct llvmpipe_context * llvmpipe)576 llvmpipe_init_query_funcs(struct llvmpipe_context *llvmpipe)
577 {
578 llvmpipe->pipe.create_query = llvmpipe_create_query;
579 llvmpipe->pipe.destroy_query = llvmpipe_destroy_query;
580 llvmpipe->pipe.begin_query = llvmpipe_begin_query;
581 llvmpipe->pipe.end_query = llvmpipe_end_query;
582 llvmpipe->pipe.get_query_result = llvmpipe_get_query_result;
583 llvmpipe->pipe.get_query_result_resource = llvmpipe_get_query_result_resource;
584 llvmpipe->pipe.set_active_query_state = llvmpipe_set_active_query_state;
585 }
586
587
588