xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/r600/r600_buffer_common.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  * Authors:
4  *      Marek Olšák
5  * SPDX-License-Identifier: MIT
6  */
7 
8 #include "r600_cs.h"
9 #include "evergreen_compute.h"
10 #include "compute_memory_pool.h"
11 #include "util/macros.h"
12 #include "util/u_memory.h"
13 #include "util/u_upload_mgr.h"
14 #include <inttypes.h>
15 #include <stdio.h>
16 
r600_rings_is_buffer_referenced(struct r600_common_context * ctx,struct pb_buffer_lean * buf,unsigned usage)17 bool r600_rings_is_buffer_referenced(struct r600_common_context *ctx,
18 				     struct pb_buffer_lean *buf,
19 				     unsigned usage)
20 {
21 	if (ctx->ws->cs_is_buffer_referenced(&ctx->gfx.cs, buf, usage)) {
22 		return true;
23 	}
24 	if (radeon_emitted(&ctx->dma.cs, 0) &&
25 	    ctx->ws->cs_is_buffer_referenced(&ctx->dma.cs, buf, usage)) {
26 		return true;
27 	}
28 	return false;
29 }
30 
r600_buffer_map_sync_with_rings(struct r600_common_context * ctx,struct r600_resource * resource,unsigned usage)31 void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
32                                       struct r600_resource *resource,
33                                       unsigned usage)
34 {
35 	unsigned rusage = RADEON_USAGE_READWRITE;
36 	bool busy = false;
37 
38 	assert(!(resource->flags & RADEON_FLAG_SPARSE));
39 
40 	if (usage & PIPE_MAP_UNSYNCHRONIZED) {
41 		return ctx->ws->buffer_map(ctx->ws, resource->buf, NULL, usage);
42 	}
43 
44 	if (!(usage & PIPE_MAP_WRITE)) {
45 		/* have to wait for the last write */
46 		rusage = RADEON_USAGE_WRITE;
47 	}
48 
49 	if (radeon_emitted(&ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
50 	    ctx->ws->cs_is_buffer_referenced(&ctx->gfx.cs,
51 					     resource->buf, rusage)) {
52 		if (usage & PIPE_MAP_DONTBLOCK) {
53 			ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
54 			return NULL;
55 		} else {
56 			ctx->gfx.flush(ctx, 0, NULL);
57 			busy = true;
58 		}
59 	}
60 	if (radeon_emitted(&ctx->dma.cs, 0) &&
61 	    ctx->ws->cs_is_buffer_referenced(&ctx->dma.cs,
62 					     resource->buf, rusage)) {
63 		if (usage & PIPE_MAP_DONTBLOCK) {
64 			ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
65 			return NULL;
66 		} else {
67 			ctx->dma.flush(ctx, 0, NULL);
68 			busy = true;
69 		}
70 	}
71 
72 	if (busy || !ctx->ws->buffer_wait(ctx->ws, resource->buf, 0, rusage)) {
73 		if (usage & PIPE_MAP_DONTBLOCK) {
74 			return NULL;
75 		} else {
76 			/* We will be wait for the GPU. Wait for any offloaded
77 			 * CS flush to complete to avoid busy-waiting in the winsys. */
78 			ctx->ws->cs_sync_flush(&ctx->gfx.cs);
79 			if (ctx->dma.cs.priv)
80 				ctx->ws->cs_sync_flush(&ctx->dma.cs);
81 		}
82 	}
83 
84 	/* Setting the CS to NULL will prevent doing checks we have done already. */
85 	return ctx->ws->buffer_map(ctx->ws, resource->buf, NULL, usage);
86 }
87 
r600_init_resource_fields(struct r600_common_screen * rscreen,struct r600_resource * res,uint64_t size,unsigned alignment)88 void r600_init_resource_fields(struct r600_common_screen *rscreen,
89 			       struct r600_resource *res,
90 			       uint64_t size, unsigned alignment)
91 {
92 	struct r600_texture *rtex = container_of(res, struct r600_texture, resource);
93 
94 	res->bo_size = size;
95 	res->bo_alignment = alignment;
96 	res->flags = 0;
97 	res->texture_handle_allocated = false;
98 	res->image_handle_allocated = false;
99 
100 	switch (res->b.b.usage) {
101 	case PIPE_USAGE_STREAM:
102 		res->flags = RADEON_FLAG_GTT_WC;
103 		FALLTHROUGH;
104 	case PIPE_USAGE_STAGING:
105 		/* Transfers are likely to occur more often with these
106 		 * resources. */
107 		res->domains = RADEON_DOMAIN_GTT;
108 		break;
109 	case PIPE_USAGE_DYNAMIC:
110 	case PIPE_USAGE_DEFAULT:
111 	case PIPE_USAGE_IMMUTABLE:
112 	default:
113 		/* Not listing GTT here improves performance in some
114 		 * apps. */
115 		res->domains = RADEON_DOMAIN_VRAM;
116 		res->flags |= RADEON_FLAG_GTT_WC;
117 		break;
118 	}
119 
120 	/* Tiled textures are unmappable. Always put them in VRAM. */
121 	if ((res->b.b.target != PIPE_BUFFER && !rtex->surface.is_linear) ||
122 	    res->flags & R600_RESOURCE_FLAG_UNMAPPABLE) {
123 		res->domains = RADEON_DOMAIN_VRAM;
124 		res->flags |= RADEON_FLAG_NO_CPU_ACCESS |
125 			 RADEON_FLAG_GTT_WC;
126 	}
127 
128 	/* Displayable and shareable surfaces are not suballocated. */
129 	if (res->b.b.bind & (PIPE_BIND_SHARED | PIPE_BIND_SCANOUT))
130 		res->flags |= RADEON_FLAG_NO_SUBALLOC; /* shareable */
131 	else
132 		res->flags |= RADEON_FLAG_NO_INTERPROCESS_SHARING;
133 
134 	if (rscreen->debug_flags & DBG_NO_WC)
135 		res->flags &= ~RADEON_FLAG_GTT_WC;
136 
137 	/* Set expected VRAM and GART usage for the buffer. */
138 	res->vram_usage = 0;
139 	res->gart_usage = 0;
140 
141 	if (res->domains & RADEON_DOMAIN_VRAM)
142 		res->vram_usage = size;
143 	else if (res->domains & RADEON_DOMAIN_GTT)
144 		res->gart_usage = size;
145 }
146 
r600_alloc_resource(struct r600_common_screen * rscreen,struct r600_resource * res)147 bool r600_alloc_resource(struct r600_common_screen *rscreen,
148 			 struct r600_resource *res)
149 {
150 	struct pb_buffer_lean *old_buf, *new_buf;
151 
152 	/* Allocate a new resource. */
153 	new_buf = rscreen->ws->buffer_create(rscreen->ws, res->bo_size,
154 					     res->bo_alignment,
155 					     res->domains, res->flags);
156 	if (!new_buf) {
157 		return false;
158 	}
159 
160 	/* Replace the pointer such that if res->buf wasn't NULL, it won't be
161 	 * NULL. This should prevent crashes with multiple contexts using
162 	 * the same buffer where one of the contexts invalidates it while
163 	 * the others are using it. */
164 	old_buf = res->buf;
165 	res->buf = new_buf; /* should be atomic */
166 
167 	if (rscreen->info.r600_has_virtual_memory)
168 		res->gpu_address = rscreen->ws->buffer_get_virtual_address(res->buf);
169 	else
170 		res->gpu_address = 0;
171 
172 	radeon_bo_reference(rscreen->ws, &old_buf, NULL);
173 
174 	util_range_set_empty(&res->valid_buffer_range);
175 
176 	/* Print debug information. */
177 	if (rscreen->debug_flags & DBG_VM && res->b.b.target == PIPE_BUFFER) {
178 		fprintf(stderr, "VM start=0x%"PRIX64"  end=0x%"PRIX64" | Buffer %"PRIu64" bytes\n",
179 			res->gpu_address, res->gpu_address + res->buf->size,
180 			res->buf->size);
181 	}
182 	return true;
183 }
184 
r600_buffer_destroy(struct pipe_screen * screen,struct pipe_resource * buf)185 void r600_buffer_destroy(struct pipe_screen *screen, struct pipe_resource *buf)
186 {
187 	struct r600_screen *rscreen = (struct r600_screen*)screen;
188 	struct r600_resource *rbuffer = r600_resource(buf);
189 
190 	threaded_resource_deinit(buf);
191 	util_range_destroy(&rbuffer->valid_buffer_range);
192 	pipe_resource_reference((struct pipe_resource**)&rbuffer->immed_buffer, NULL);
193 	radeon_bo_reference(rscreen->b.ws, &rbuffer->buf, NULL);
194 	FREE(rbuffer);
195 }
196 
197 static bool
r600_invalidate_buffer(struct r600_common_context * rctx,struct r600_resource * rbuffer)198 r600_invalidate_buffer(struct r600_common_context *rctx,
199 		       struct r600_resource *rbuffer)
200 {
201 	/* Shared buffers can't be reallocated. */
202 	if (rbuffer->b.is_shared)
203 		return false;
204 
205 	/* Sparse buffers can't be reallocated. */
206 	if (rbuffer->flags & RADEON_FLAG_SPARSE)
207 		return false;
208 
209 	/* In AMD_pinned_memory, the user pointer association only gets
210 	 * broken when the buffer is explicitly re-allocated.
211 	 */
212 	if (rbuffer->b.is_user_ptr)
213 		return false;
214 
215 	/* Check if mapping this buffer would cause waiting for the GPU. */
216 	if (r600_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) ||
217 	    !rctx->ws->buffer_wait(rctx->ws, rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
218 		rctx->invalidate_buffer(&rctx->b, &rbuffer->b.b);
219 	} else {
220 		util_range_set_empty(&rbuffer->valid_buffer_range);
221 	}
222 
223 	return true;
224 }
225 
226 /* Replace the storage of dst with src. */
r600_replace_buffer_storage(struct pipe_context * ctx,struct pipe_resource * dst,struct pipe_resource * src)227 void r600_replace_buffer_storage(struct pipe_context *ctx,
228 				 struct pipe_resource *dst,
229 				 struct pipe_resource *src)
230 {
231 	struct r600_common_context *rctx = (struct r600_common_context *)ctx;
232 	struct r600_resource *rdst = r600_resource(dst);
233 	struct r600_resource *rsrc = r600_resource(src);
234 	uint64_t old_gpu_address = rdst->gpu_address;
235 
236 	radeon_bo_reference(rctx->ws, &rdst->buf, rsrc->buf);
237 	rdst->gpu_address = rsrc->gpu_address;
238 	rdst->b.b.bind = rsrc->b.b.bind;
239 	rdst->flags = rsrc->flags;
240 
241 	assert(rdst->vram_usage == rsrc->vram_usage);
242 	assert(rdst->gart_usage == rsrc->gart_usage);
243 	assert(rdst->bo_size == rsrc->bo_size);
244 	assert(rdst->bo_alignment == rsrc->bo_alignment);
245 	assert(rdst->domains == rsrc->domains);
246 
247 	rctx->rebind_buffer(ctx, dst, old_gpu_address);
248 }
249 
r600_invalidate_resource(struct pipe_context * ctx,struct pipe_resource * resource)250 void r600_invalidate_resource(struct pipe_context *ctx,
251 			      struct pipe_resource *resource)
252 {
253 	struct r600_common_context *rctx = (struct r600_common_context*)ctx;
254 	struct r600_resource *rbuffer = r600_resource(resource);
255 
256 	/* We currently only do anything here for buffers */
257 	if (resource->target == PIPE_BUFFER)
258 		(void)r600_invalidate_buffer(rctx, rbuffer);
259 }
260 
r600_buffer_get_transfer(struct pipe_context * ctx,struct pipe_resource * resource,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** ptransfer,void * data,struct r600_resource * staging,unsigned offset)261 static void *r600_buffer_get_transfer(struct pipe_context *ctx,
262 				      struct pipe_resource *resource,
263                                       unsigned usage,
264                                       const struct pipe_box *box,
265 				      struct pipe_transfer **ptransfer,
266 				      void *data, struct r600_resource *staging,
267 				      unsigned offset)
268 {
269 	struct r600_common_context *rctx = (struct r600_common_context*)ctx;
270 	struct r600_transfer *transfer;
271 
272 	if (usage & TC_TRANSFER_MAP_THREADED_UNSYNC)
273 		transfer = slab_zalloc(&rctx->pool_transfers_unsync);
274 	else
275 		transfer = slab_zalloc(&rctx->pool_transfers);
276 
277 	pipe_resource_reference(&transfer->b.b.resource, resource);
278 	transfer->b.b.usage = usage;
279 	transfer->b.b.box = *box;
280 	transfer->b.b.offset = offset;
281 	transfer->staging = staging;
282 	*ptransfer = &transfer->b.b;
283 	return data;
284 }
285 
r600_can_dma_copy_buffer(struct r600_common_context * rctx,unsigned dstx,unsigned srcx,unsigned size)286 static bool r600_can_dma_copy_buffer(struct r600_common_context *rctx,
287 				     unsigned dstx, unsigned srcx, unsigned size)
288 {
289 	bool dword_aligned = !(dstx % 4) && !(srcx % 4) && !(size % 4);
290 
291 	return rctx->screen->has_cp_dma ||
292 	       (dword_aligned && (rctx->dma.cs.priv ||
293 				  rctx->screen->has_streamout));
294 
295 }
296 
r600_buffer_transfer_map(struct pipe_context * ctx,struct pipe_resource * resource,unsigned level,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** ptransfer)297 void *r600_buffer_transfer_map(struct pipe_context *ctx,
298                                struct pipe_resource *resource,
299                                unsigned level,
300                                unsigned usage,
301                                const struct pipe_box *box,
302                                struct pipe_transfer **ptransfer)
303 {
304 	struct r600_common_context *rctx = (struct r600_common_context*)ctx;
305 	struct r600_common_screen *rscreen = (struct r600_common_screen*)ctx->screen;
306 	struct r600_resource *rbuffer = r600_resource(resource);
307 	uint8_t *data;
308 
309 	if (r600_resource(resource)->compute_global_bo) {
310 		if ((data = r600_compute_global_transfer_map(ctx, resource, level, usage, box, ptransfer)))
311 			return data;
312 	}
313 
314 	assert(box->x + box->width <= resource->width0);
315 
316 	/* From GL_AMD_pinned_memory issues:
317 	 *
318 	 *     4) Is glMapBuffer on a shared buffer guaranteed to return the
319 	 *        same system address which was specified at creation time?
320 	 *
321 	 *        RESOLVED: NO. The GL implementation might return a different
322 	 *        virtual mapping of that memory, although the same physical
323 	 *        page will be used.
324 	 *
325 	 * So don't ever use staging buffers.
326 	 */
327 	if (rbuffer->b.is_user_ptr)
328 		usage |= PIPE_MAP_PERSISTENT;
329 
330 	/* See if the buffer range being mapped has never been initialized,
331 	 * in which case it can be mapped unsynchronized. */
332 	if (!(usage & (PIPE_MAP_UNSYNCHRONIZED |
333 		       TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED)) &&
334 	    usage & PIPE_MAP_WRITE &&
335 	    !rbuffer->b.is_shared &&
336 	    !util_ranges_intersect(&rbuffer->valid_buffer_range, box->x, box->x + box->width)) {
337 		usage |= PIPE_MAP_UNSYNCHRONIZED;
338 	}
339 
340 	/* If discarding the entire range, discard the whole resource instead. */
341 	if (usage & PIPE_MAP_DISCARD_RANGE &&
342 	    box->x == 0 && box->width == resource->width0) {
343 		usage |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
344 	}
345 
346 	if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE &&
347 	    !(usage & (PIPE_MAP_UNSYNCHRONIZED |
348 		       TC_TRANSFER_MAP_NO_INVALIDATE))) {
349 		assert(usage & PIPE_MAP_WRITE);
350 
351 		if (r600_invalidate_buffer(rctx, rbuffer)) {
352 			/* At this point, the buffer is always idle. */
353 			usage |= PIPE_MAP_UNSYNCHRONIZED;
354 		} else {
355 			/* Fall back to a temporary buffer. */
356 			usage |= PIPE_MAP_DISCARD_RANGE;
357 		}
358 	}
359 
360 	if ((usage & PIPE_MAP_DISCARD_RANGE) &&
361 	    !(rscreen->debug_flags & DBG_NO_DISCARD_RANGE) &&
362 	    ((!(usage & (PIPE_MAP_UNSYNCHRONIZED |
363 			 PIPE_MAP_PERSISTENT)) &&
364 	      r600_can_dma_copy_buffer(rctx, box->x, 0, box->width)) ||
365 	     (rbuffer->flags & RADEON_FLAG_SPARSE))) {
366 		assert(usage & PIPE_MAP_WRITE);
367 
368 		/* Check if mapping this buffer would cause waiting for the GPU.
369 		 */
370 		if (rbuffer->flags & RADEON_FLAG_SPARSE ||
371 		    r600_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) ||
372 		    !rctx->ws->buffer_wait(rctx->ws, rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
373 			/* Do a wait-free write-only transfer using a temporary buffer. */
374 			unsigned offset;
375 			struct r600_resource *staging = NULL;
376 
377 			u_upload_alloc(ctx->stream_uploader, 0,
378                                        box->width + (box->x % R600_MAP_BUFFER_ALIGNMENT),
379 				       rctx->screen->info.tcc_cache_line_size,
380 				       &offset, (struct pipe_resource**)&staging,
381                                        (void**)&data);
382 
383 			if (staging) {
384 				data += box->x % R600_MAP_BUFFER_ALIGNMENT;
385 				return r600_buffer_get_transfer(ctx, resource, usage, box,
386 								ptransfer, data, staging, offset);
387 			} else if (rbuffer->flags & RADEON_FLAG_SPARSE) {
388 				return NULL;
389 			}
390 		} else {
391 			/* At this point, the buffer is always idle (we checked it above). */
392 			usage |= PIPE_MAP_UNSYNCHRONIZED;
393 		}
394 	}
395 	/* Use a staging buffer in cached GTT for reads. */
396 	else if (((usage & PIPE_MAP_READ) &&
397 		  !(usage & PIPE_MAP_PERSISTENT) &&
398 		  (rbuffer->domains & RADEON_DOMAIN_VRAM ||
399 		   rbuffer->flags & RADEON_FLAG_GTT_WC) &&
400 		  r600_can_dma_copy_buffer(rctx, 0, box->x, box->width)) ||
401 		 (rbuffer->flags & RADEON_FLAG_SPARSE)) {
402 		struct r600_resource *staging;
403 
404 		assert(!(usage & TC_TRANSFER_MAP_THREADED_UNSYNC));
405 		staging = (struct r600_resource*) pipe_buffer_create(
406 				ctx->screen, 0, PIPE_USAGE_STAGING,
407 				box->width + (box->x % R600_MAP_BUFFER_ALIGNMENT));
408 		if (staging) {
409 			/* Copy the VRAM buffer to the staging buffer. */
410 			rctx->dma_copy(ctx, &staging->b.b, 0,
411 				       box->x % R600_MAP_BUFFER_ALIGNMENT,
412 				       0, 0, resource, 0, box);
413 
414 			data = r600_buffer_map_sync_with_rings(rctx, staging,
415 							       usage & ~PIPE_MAP_UNSYNCHRONIZED);
416 			if (!data) {
417 				r600_resource_reference(&staging, NULL);
418 				return NULL;
419 			}
420 			data += box->x % R600_MAP_BUFFER_ALIGNMENT;
421 
422 			return r600_buffer_get_transfer(ctx, resource, usage, box,
423 							ptransfer, data, staging, 0);
424 		} else if (rbuffer->flags & RADEON_FLAG_SPARSE) {
425 			return NULL;
426 		}
427 	}
428 
429 	data = r600_buffer_map_sync_with_rings(rctx, rbuffer, usage);
430 	if (!data) {
431 		return NULL;
432 	}
433 	data += box->x;
434 
435 	return r600_buffer_get_transfer(ctx, resource, usage, box,
436 					ptransfer, data, NULL, 0);
437 }
438 
r600_buffer_do_flush_region(struct pipe_context * ctx,struct pipe_transfer * transfer,const struct pipe_box * box)439 static void r600_buffer_do_flush_region(struct pipe_context *ctx,
440 					struct pipe_transfer *transfer,
441 				        const struct pipe_box *box)
442 {
443 	struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
444 	struct r600_resource *rbuffer = r600_resource(transfer->resource);
445 
446 	if (rtransfer->staging) {
447 		struct pipe_resource *dst, *src;
448 		unsigned soffset;
449 		struct pipe_box dma_box;
450 
451 		dst = transfer->resource;
452 		src = &rtransfer->staging->b.b;
453 		soffset = rtransfer->b.b.offset + box->x % R600_MAP_BUFFER_ALIGNMENT;
454 
455 		u_box_1d(soffset, box->width, &dma_box);
456 
457 		/* Copy the staging buffer into the original one. */
458 		ctx->resource_copy_region(ctx, dst, 0, box->x, 0, 0, src, 0, &dma_box);
459 	}
460 
461 	util_range_add(&rbuffer->b.b, &rbuffer->valid_buffer_range, box->x,
462 		       box->x + box->width);
463 }
464 
r600_buffer_flush_region(struct pipe_context * ctx,struct pipe_transfer * transfer,const struct pipe_box * rel_box)465 void r600_buffer_flush_region(struct pipe_context *ctx,
466 			      struct pipe_transfer *transfer,
467 			      const struct pipe_box *rel_box)
468 {
469 	unsigned required_usage = PIPE_MAP_WRITE |
470 				  PIPE_MAP_FLUSH_EXPLICIT;
471 
472 	if (r600_resource(transfer->resource)->compute_global_bo)
473 		return;
474 
475 	if ((transfer->usage & required_usage) == required_usage) {
476 		struct pipe_box box;
477 
478 		u_box_1d(transfer->box.x + rel_box->x, rel_box->width, &box);
479 		r600_buffer_do_flush_region(ctx, transfer, &box);
480 	}
481 }
482 
r600_buffer_transfer_unmap(struct pipe_context * ctx,struct pipe_transfer * transfer)483 void r600_buffer_transfer_unmap(struct pipe_context *ctx,
484 				struct pipe_transfer *transfer)
485 {
486 	struct r600_common_context *rctx = (struct r600_common_context*)ctx;
487 	struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
488 	struct r600_resource *rtransferr = r600_resource(transfer->resource);
489 
490 	if (rtransferr->compute_global_bo && !rtransferr->b.is_user_ptr) {
491 		r600_compute_global_transfer_unmap(ctx, transfer);
492 		return;
493 	}
494 
495 	if (transfer->usage & PIPE_MAP_WRITE &&
496 	    !(transfer->usage & PIPE_MAP_FLUSH_EXPLICIT))
497 		r600_buffer_do_flush_region(ctx, transfer, &transfer->box);
498 
499 	r600_resource_reference(&rtransfer->staging, NULL);
500 	assert(rtransfer->b.staging == NULL); /* for threaded context only */
501 	pipe_resource_reference(&transfer->resource, NULL);
502 
503 	/* Don't use pool_transfers_unsync. We are always in the driver
504 	 * thread. */
505 	slab_free(&rctx->pool_transfers, transfer);
506 }
507 
r600_buffer_subdata(struct pipe_context * ctx,struct pipe_resource * buffer,unsigned usage,unsigned offset,unsigned size,const void * data)508 void r600_buffer_subdata(struct pipe_context *ctx,
509 			 struct pipe_resource *buffer,
510 			 unsigned usage, unsigned offset,
511 			 unsigned size, const void *data)
512 {
513 	struct pipe_transfer *transfer = NULL;
514 	struct pipe_box box;
515 	uint8_t *map = NULL;
516 
517 	usage |= PIPE_MAP_WRITE;
518 
519 	if (!(usage & PIPE_MAP_DIRECTLY))
520 		usage |= PIPE_MAP_DISCARD_RANGE;
521 
522 	u_box_1d(offset, size, &box);
523 	map = r600_buffer_transfer_map(ctx, buffer, 0, usage, &box, &transfer);
524 	if (!map)
525 		return;
526 
527 	memcpy(map, data, size);
528 	r600_buffer_transfer_unmap(ctx, transfer);
529 }
530 
531 static struct r600_resource *
r600_alloc_buffer_struct(struct pipe_screen * screen,const struct pipe_resource * templ)532 r600_alloc_buffer_struct(struct pipe_screen *screen,
533 			 const struct pipe_resource *templ)
534 {
535 	struct r600_resource *rbuffer;
536 
537 	rbuffer = MALLOC_STRUCT(r600_resource);
538 
539 	rbuffer->b.b = *templ;
540 	rbuffer->b.b.next = NULL;
541 	pipe_reference_init(&rbuffer->b.b.reference, 1);
542 	rbuffer->b.b.screen = screen;
543 
544 	threaded_resource_init(&rbuffer->b.b, false);
545 
546 	rbuffer->buf = NULL;
547 	rbuffer->bind_history = 0;
548 	rbuffer->immed_buffer = NULL;
549 	rbuffer->compute_global_bo = false;
550 	util_range_init(&rbuffer->valid_buffer_range);
551 	return rbuffer;
552 }
553 
r600_buffer_create(struct pipe_screen * screen,const struct pipe_resource * templ,unsigned alignment)554 struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,
555 					 const struct pipe_resource *templ,
556 					 unsigned alignment)
557 {
558 	struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
559 	struct r600_resource *rbuffer = r600_alloc_buffer_struct(screen, templ);
560 
561 	r600_init_resource_fields(rscreen, rbuffer, templ->width0, alignment);
562 
563 	if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE)
564 		rbuffer->flags |= RADEON_FLAG_SPARSE;
565 
566 	if (!r600_alloc_resource(rscreen, rbuffer)) {
567 		FREE(rbuffer);
568 		return NULL;
569 	}
570 	return &rbuffer->b.b;
571 }
572 
r600_aligned_buffer_create(struct pipe_screen * screen,unsigned flags,unsigned usage,unsigned size,unsigned alignment)573 struct pipe_resource *r600_aligned_buffer_create(struct pipe_screen *screen,
574 						 unsigned flags,
575 						 unsigned usage,
576 						 unsigned size,
577 						 unsigned alignment)
578 {
579 	struct pipe_resource buffer;
580 
581 	memset(&buffer, 0, sizeof buffer);
582 	buffer.target = PIPE_BUFFER;
583 	buffer.format = PIPE_FORMAT_R8_UNORM;
584 	buffer.bind = 0;
585 	buffer.usage = usage;
586 	buffer.flags = flags;
587 	buffer.width0 = size;
588 	buffer.height0 = 1;
589 	buffer.depth0 = 1;
590 	buffer.array_size = 1;
591 	return r600_buffer_create(screen, &buffer, alignment);
592 }
593 
594 struct pipe_resource *
r600_buffer_from_user_memory(struct pipe_screen * screen,const struct pipe_resource * templ,void * user_memory)595 r600_buffer_from_user_memory(struct pipe_screen *screen,
596 			     const struct pipe_resource *templ,
597 			     void *user_memory)
598 {
599 	struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
600 	struct radeon_winsys *ws = rscreen->ws;
601 	struct r600_resource *rbuffer;
602 
603 	if ((templ->bind & PIPE_BIND_GLOBAL) &&
604 	    (templ->bind & PIPE_BIND_COMPUTE_RESOURCE)) {
605 		rbuffer = r600_resource(r600_compute_global_buffer_create(screen, templ));
606 		((struct r600_resource_global *)rbuffer)->chunk->real_buffer = rbuffer;
607 	} else {
608 		rbuffer = r600_alloc_buffer_struct(screen, templ);
609 	}
610 
611 	rbuffer->domains = RADEON_DOMAIN_GTT;
612 	rbuffer->flags = 0;
613 	rbuffer->b.is_user_ptr = true;
614 	util_range_add(&rbuffer->b.b, &rbuffer->valid_buffer_range, 0, templ->width0);
615 	util_range_add(&rbuffer->b.b, &rbuffer->b.valid_buffer_range, 0, templ->width0);
616 
617 	/* Convert a user pointer to a buffer. */
618 	rbuffer->buf = ws->buffer_from_ptr(ws, user_memory, templ->width0, 0);
619 	if (!rbuffer->buf) {
620 		FREE(rbuffer);
621 		return NULL;
622 	}
623 
624 	if (rscreen->info.r600_has_virtual_memory)
625 		rbuffer->gpu_address =
626 			ws->buffer_get_virtual_address(rbuffer->buf);
627 	else
628 		rbuffer->gpu_address = 0;
629 
630 	rbuffer->vram_usage = 0;
631 	rbuffer->gart_usage = templ->width0;
632 
633 	return &rbuffer->b.b;
634 }
635