1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 *
24 */
25
26 #include "util/format/u_format.h"
27 #include "util/u_inlines.h"
28 #include "util/u_surface.h"
29
30 #include "nv_m2mf.xml.h"
31 #include "nv_object.xml.h"
32 #include "nv30/nv30_screen.h"
33 #include "nv30/nv30_context.h"
34 #include "nv30/nv30_resource.h"
35 #include "nv30/nv30_transfer.h"
36 #include "nv30/nv30_winsys.h"
37
38 static inline unsigned
layer_offset(struct pipe_resource * pt,unsigned level,unsigned layer)39 layer_offset(struct pipe_resource *pt, unsigned level, unsigned layer)
40 {
41 struct nv30_miptree *mt = nv30_miptree(pt);
42 struct nv30_miptree_level *lvl = &mt->level[level];
43
44 if (pt->target == PIPE_TEXTURE_CUBE)
45 return (layer * mt->layer_size) + lvl->offset;
46
47 return lvl->offset + (layer * lvl->zslice_size);
48 }
49
50 bool
nv30_miptree_get_handle(struct pipe_screen * pscreen,struct pipe_context * context,struct pipe_resource * pt,struct winsys_handle * handle,unsigned usage)51 nv30_miptree_get_handle(struct pipe_screen *pscreen,
52 struct pipe_context *context,
53 struct pipe_resource *pt,
54 struct winsys_handle *handle,
55 unsigned usage)
56 {
57 if (pt->target == PIPE_BUFFER)
58 return false;
59
60 struct nv30_miptree *mt = nv30_miptree(pt);
61 unsigned stride;
62
63 if (!mt || !mt->base.bo)
64 return false;
65
66 stride = mt->level[0].pitch;
67
68 return nouveau_screen_bo_get_handle(pscreen, mt->base.bo, stride, handle);
69 }
70
71 void
nv30_miptree_destroy(struct pipe_screen * pscreen,struct pipe_resource * pt)72 nv30_miptree_destroy(struct pipe_screen *pscreen, struct pipe_resource *pt)
73 {
74 struct nv30_miptree *mt = nv30_miptree(pt);
75
76 nouveau_bo_ref(NULL, &mt->base.bo);
77 FREE(mt);
78 }
79
80 struct nv30_transfer {
81 struct pipe_transfer base;
82 struct nv30_rect img;
83 struct nv30_rect tmp;
84 unsigned nblocksx;
85 unsigned nblocksy;
86 };
87
88 static inline struct nv30_transfer *
nv30_transfer(struct pipe_transfer * ptx)89 nv30_transfer(struct pipe_transfer *ptx)
90 {
91 return (struct nv30_transfer *)ptx;
92 }
93
94 static inline void
define_rect(struct pipe_resource * pt,unsigned level,unsigned z,unsigned x,unsigned y,unsigned w,unsigned h,struct nv30_rect * rect)95 define_rect(struct pipe_resource *pt, unsigned level, unsigned z,
96 unsigned x, unsigned y, unsigned w, unsigned h,
97 struct nv30_rect *rect)
98 {
99 struct nv30_miptree *mt = nv30_miptree(pt);
100 struct nv30_miptree_level *lvl = &mt->level[level];
101
102 rect->w = u_minify(pt->width0, level) << mt->ms_x;
103 rect->w = util_format_get_nblocksx(pt->format, rect->w);
104 rect->h = u_minify(pt->height0, level) << mt->ms_y;
105 rect->h = util_format_get_nblocksy(pt->format, rect->h);
106 rect->d = 1;
107 rect->z = 0;
108 if (mt->swizzled) {
109 if (pt->target == PIPE_TEXTURE_3D) {
110 rect->d = u_minify(pt->depth0, level);
111 rect->z = z; z = 0;
112 }
113 rect->pitch = 0;
114 } else {
115 rect->pitch = lvl->pitch;
116 }
117
118 rect->bo = mt->base.bo;
119 rect->domain = NOUVEAU_BO_VRAM;
120 rect->offset = layer_offset(pt, level, z);
121 rect->cpp = util_format_get_blocksize(pt->format);
122
123 rect->x0 = util_format_get_nblocksx(pt->format, x) << mt->ms_x;
124 rect->y0 = util_format_get_nblocksy(pt->format, y) << mt->ms_y;
125 rect->x1 = rect->x0 + (util_format_get_nblocksx(pt->format, w) << mt->ms_x);
126 rect->y1 = rect->y0 + (util_format_get_nblocksy(pt->format, h) << mt->ms_y);
127
128 /* XXX There's some indication that swizzled formats > 4 bytes are treated
129 * differently. However that only applies to RGBA16_FLOAT, RGBA32_FLOAT,
130 * and the DXT* formats. The former aren't properly supported yet, and the
131 * latter avoid swizzled layouts.
132
133 if (mt->swizzled && rect->cpp > 4) {
134 unsigned scale = rect->cpp / 4;
135 rect->w *= scale;
136 rect->x0 *= scale;
137 rect->x1 *= scale;
138 rect->cpp = 4;
139 }
140 */
141 }
142
143 void
nv30_resource_copy_region(struct pipe_context * pipe,struct pipe_resource * dstres,unsigned dst_level,unsigned dstx,unsigned dsty,unsigned dstz,struct pipe_resource * srcres,unsigned src_level,const struct pipe_box * src_box)144 nv30_resource_copy_region(struct pipe_context *pipe,
145 struct pipe_resource *dstres, unsigned dst_level,
146 unsigned dstx, unsigned dsty, unsigned dstz,
147 struct pipe_resource *srcres, unsigned src_level,
148 const struct pipe_box *src_box)
149 {
150 struct nv30_context *nv30 = nv30_context(pipe);
151 struct nv30_rect src, dst;
152
153 if (dstres->target == PIPE_BUFFER && srcres->target == PIPE_BUFFER) {
154 nouveau_copy_buffer(&nv30->base,
155 nv04_resource(dstres), dstx,
156 nv04_resource(srcres), src_box->x, src_box->width);
157 return;
158 }
159
160 define_rect(srcres, src_level, src_box->z, src_box->x, src_box->y,
161 src_box->width, src_box->height, &src);
162 define_rect(dstres, dst_level, dstz, dstx, dsty,
163 src_box->width, src_box->height, &dst);
164
165 nv30_transfer_rect(nv30, NEAREST, &src, &dst);
166 }
167
168 static void
nv30_resource_resolve(struct nv30_context * nv30,const struct pipe_blit_info * info)169 nv30_resource_resolve(struct nv30_context *nv30,
170 const struct pipe_blit_info *info)
171 {
172 struct nv30_miptree *src_mt = nv30_miptree(info->src.resource);
173 struct nv30_rect src, dst;
174 unsigned x, x0, x1, y, y1, w, h;
175
176 define_rect(info->src.resource, 0, info->src.box.z, info->src.box.x,
177 info->src.box.y, info->src.box.width, info->src.box.height, &src);
178 define_rect(info->dst.resource, 0, info->dst.box.z, info->dst.box.x,
179 info->dst.box.y, info->dst.box.width, info->dst.box.height, &dst);
180
181 x0 = src.x0;
182 x1 = src.x1;
183 y1 = src.y1;
184
185 /* On nv3x we must use sifm which is restricted to 1024x1024 tiles */
186 for (y = src.y0; y < y1; y += h) {
187 h = y1 - y;
188 if (h > 1024)
189 h = 1024;
190
191 src.y0 = 0;
192 src.y1 = h;
193 src.h = h;
194
195 dst.y1 = dst.y0 + (h >> src_mt->ms_y);
196 dst.h = h >> src_mt->ms_y;
197
198 for (x = x0; x < x1; x += w) {
199 w = x1 - x;
200 if (w > 1024)
201 w = 1024;
202
203 src.offset = y * src.pitch + x * src.cpp;
204 src.x0 = 0;
205 src.x1 = w;
206 src.w = w;
207
208 dst.offset = (y >> src_mt->ms_y) * dst.pitch +
209 (x >> src_mt->ms_x) * dst.cpp;
210 dst.x1 = dst.x0 + (w >> src_mt->ms_x);
211 dst.w = w >> src_mt->ms_x;
212
213 nv30_transfer_rect(nv30, BILINEAR, &src, &dst);
214 }
215 }
216 }
217
218 void
nv30_blit(struct pipe_context * pipe,const struct pipe_blit_info * blit_info)219 nv30_blit(struct pipe_context *pipe,
220 const struct pipe_blit_info *blit_info)
221 {
222 struct nv30_context *nv30 = nv30_context(pipe);
223 struct pipe_blit_info info = *blit_info;
224
225 if (info.src.resource->nr_samples > 1 &&
226 info.dst.resource->nr_samples <= 1 &&
227 !util_format_is_depth_or_stencil(info.src.resource->format) &&
228 !util_format_is_pure_integer(info.src.resource->format)) {
229 nv30_resource_resolve(nv30, blit_info);
230 return;
231 }
232
233 if (util_try_blit_via_copy_region(pipe, &info, nv30->render_cond_query != NULL)) {
234 return; /* done */
235 }
236
237 if (info.mask & PIPE_MASK_S) {
238 debug_printf("nv30: cannot blit stencil, skipping\n");
239 info.mask &= ~PIPE_MASK_S;
240 }
241
242 if (!util_blitter_is_blit_supported(nv30->blitter, &info)) {
243 debug_printf("nv30: blit unsupported %s -> %s\n",
244 util_format_short_name(info.src.resource->format),
245 util_format_short_name(info.dst.resource->format));
246 return;
247 }
248
249 /* XXX turn off occlusion queries */
250
251 util_blitter_save_vertex_buffers(nv30->blitter, nv30->vtxbuf, nv30->num_vtxbufs);
252 util_blitter_save_vertex_elements(nv30->blitter, nv30->vertex);
253 util_blitter_save_vertex_shader(nv30->blitter, nv30->vertprog.program);
254 util_blitter_save_rasterizer(nv30->blitter, nv30->rast);
255 util_blitter_save_viewport(nv30->blitter, &nv30->viewport);
256 util_blitter_save_scissor(nv30->blitter, &nv30->scissor);
257 util_blitter_save_fragment_shader(nv30->blitter, nv30->fragprog.program);
258 util_blitter_save_blend(nv30->blitter, nv30->blend);
259 util_blitter_save_depth_stencil_alpha(nv30->blitter,
260 nv30->zsa);
261 util_blitter_save_stencil_ref(nv30->blitter, &nv30->stencil_ref);
262 util_blitter_save_sample_mask(nv30->blitter, nv30->sample_mask, 0);
263 util_blitter_save_framebuffer(nv30->blitter, &nv30->framebuffer);
264 util_blitter_save_fragment_sampler_states(nv30->blitter,
265 nv30->fragprog.num_samplers,
266 (void**)nv30->fragprog.samplers);
267 util_blitter_save_fragment_sampler_views(nv30->blitter,
268 nv30->fragprog.num_textures, nv30->fragprog.textures);
269 util_blitter_save_render_condition(nv30->blitter, nv30->render_cond_query,
270 nv30->render_cond_cond, nv30->render_cond_mode);
271 util_blitter_blit(nv30->blitter, &info, NULL);
272 }
273
274 void
nv30_flush_resource(struct pipe_context * pipe,struct pipe_resource * resource)275 nv30_flush_resource(struct pipe_context *pipe,
276 struct pipe_resource *resource)
277 {
278 }
279
280 void *
nv30_miptree_transfer_map(struct pipe_context * pipe,struct pipe_resource * pt,unsigned level,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** ptransfer)281 nv30_miptree_transfer_map(struct pipe_context *pipe, struct pipe_resource *pt,
282 unsigned level, unsigned usage,
283 const struct pipe_box *box,
284 struct pipe_transfer **ptransfer)
285 {
286 struct nv30_context *nv30 = nv30_context(pipe);
287 struct nouveau_device *dev = nv30->screen->base.device;
288 struct nv30_miptree *mt = nv30_miptree(pt);
289 struct nv30_transfer *tx;
290 unsigned access = 0;
291 int ret;
292
293 tx = CALLOC_STRUCT(nv30_transfer);
294 if (!tx)
295 return NULL;
296 pipe_resource_reference(&tx->base.resource, pt);
297 tx->base.level = level;
298 tx->base.usage = usage;
299 tx->base.box = *box;
300 tx->base.stride = align(util_format_get_nblocksx(pt->format, box->width) *
301 util_format_get_blocksize(pt->format), 64);
302 tx->base.layer_stride = util_format_get_nblocksy(pt->format, box->height) *
303 tx->base.stride;
304
305 tx->nblocksx = util_format_get_nblocksx(pt->format, box->width);
306 tx->nblocksy = util_format_get_nblocksy(pt->format, box->height);
307
308 define_rect(pt, level, box->z, box->x, box->y,
309 box->width, box->height, &tx->img);
310
311 ret = nouveau_bo_new(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 0,
312 tx->base.layer_stride * tx->base.box.depth, NULL,
313 &tx->tmp.bo);
314 if (ret) {
315 pipe_resource_reference(&tx->base.resource, NULL);
316 FREE(tx);
317 return NULL;
318 }
319
320 tx->tmp.domain = NOUVEAU_BO_GART;
321 tx->tmp.offset = 0;
322 tx->tmp.pitch = tx->base.stride;
323 tx->tmp.cpp = tx->img.cpp;
324 tx->tmp.w = tx->nblocksx;
325 tx->tmp.h = tx->nblocksy;
326 tx->tmp.d = 1;
327 tx->tmp.x0 = 0;
328 tx->tmp.y0 = 0;
329 tx->tmp.x1 = tx->tmp.w;
330 tx->tmp.y1 = tx->tmp.h;
331 tx->tmp.z = 0;
332
333 if (usage & PIPE_MAP_READ) {
334 bool is_3d = mt->base.base.target == PIPE_TEXTURE_3D;
335 unsigned offset = tx->img.offset;
336 unsigned z = tx->img.z;
337 unsigned i;
338 for (i = 0; i < box->depth; ++i) {
339 nv30_transfer_rect(nv30, NEAREST, &tx->img, &tx->tmp);
340 if (is_3d && mt->swizzled)
341 tx->img.z++;
342 else if (is_3d)
343 tx->img.offset += mt->level[level].zslice_size;
344 else
345 tx->img.offset += mt->layer_size;
346 tx->tmp.offset += tx->base.layer_stride;
347 }
348 tx->img.z = z;
349 tx->img.offset = offset;
350 tx->tmp.offset = 0;
351 }
352
353 if (tx->tmp.bo->map) {
354 *ptransfer = &tx->base;
355 return tx->tmp.bo->map;
356 }
357
358 if (usage & PIPE_MAP_READ)
359 access |= NOUVEAU_BO_RD;
360 if (usage & PIPE_MAP_WRITE)
361 access |= NOUVEAU_BO_WR;
362
363 ret = BO_MAP(nv30->base.screen, tx->tmp.bo, access, nv30->base.client);
364 if (ret) {
365 pipe_resource_reference(&tx->base.resource, NULL);
366 FREE(tx);
367 return NULL;
368 }
369
370 *ptransfer = &tx->base;
371 return tx->tmp.bo->map;
372 }
373
374 void
nv30_miptree_transfer_unmap(struct pipe_context * pipe,struct pipe_transfer * ptx)375 nv30_miptree_transfer_unmap(struct pipe_context *pipe,
376 struct pipe_transfer *ptx)
377 {
378 struct nv30_context *nv30 = nv30_context(pipe);
379 struct nv30_transfer *tx = nv30_transfer(ptx);
380 struct nv30_miptree *mt = nv30_miptree(tx->base.resource);
381 unsigned i;
382
383 if (ptx->usage & PIPE_MAP_WRITE) {
384 bool is_3d = mt->base.base.target == PIPE_TEXTURE_3D;
385 for (i = 0; i < tx->base.box.depth; ++i) {
386 nv30_transfer_rect(nv30, NEAREST, &tx->tmp, &tx->img);
387 if (is_3d && mt->swizzled)
388 tx->img.z++;
389 else if (is_3d)
390 tx->img.offset += mt->level[tx->base.level].zslice_size;
391 else
392 tx->img.offset += mt->layer_size;
393 tx->tmp.offset += tx->base.layer_stride;
394 }
395
396 /* Allow the copies above to finish executing before freeing the source */
397 nouveau_fence_work(nv30->base.fence,
398 nouveau_fence_unref_bo, tx->tmp.bo);
399 } else {
400 nouveau_bo_ref(NULL, &tx->tmp.bo);
401 }
402 pipe_resource_reference(&ptx->resource, NULL);
403 FREE(tx);
404 }
405
406 struct pipe_resource *
nv30_miptree_create(struct pipe_screen * pscreen,const struct pipe_resource * tmpl)407 nv30_miptree_create(struct pipe_screen *pscreen,
408 const struct pipe_resource *tmpl)
409 {
410 struct nouveau_device *dev = nouveau_screen(pscreen)->device;
411 struct nv30_miptree *mt = CALLOC_STRUCT(nv30_miptree);
412 struct pipe_resource *pt = &mt->base.base;
413 unsigned blocksz, size;
414 unsigned w, h, d, l;
415 int ret;
416
417 switch (tmpl->nr_samples) {
418 case 4:
419 mt->ms_mode = 0x00004000;
420 mt->ms_x = 1;
421 mt->ms_y = 1;
422 break;
423 case 2:
424 mt->ms_mode = 0x00003000;
425 mt->ms_x = 1;
426 mt->ms_y = 0;
427 break;
428 default:
429 mt->ms_mode = 0x00000000;
430 mt->ms_x = 0;
431 mt->ms_y = 0;
432 break;
433 }
434
435 *pt = *tmpl;
436 pipe_reference_init(&pt->reference, 1);
437 pt->screen = pscreen;
438
439 w = pt->width0 << mt->ms_x;
440 h = pt->height0 << mt->ms_y;
441 d = (pt->target == PIPE_TEXTURE_3D) ? pt->depth0 : 1;
442 blocksz = util_format_get_blocksize(pt->format);
443
444 if ((pt->target == PIPE_TEXTURE_RECT) ||
445 (pt->bind & PIPE_BIND_SCANOUT) ||
446 !util_is_power_of_two_or_zero(pt->width0) ||
447 !util_is_power_of_two_or_zero(pt->height0) ||
448 !util_is_power_of_two_or_zero(pt->depth0) ||
449 mt->ms_mode) {
450 mt->uniform_pitch = util_format_get_nblocksx(pt->format, w) * blocksz;
451 mt->uniform_pitch = align(mt->uniform_pitch, 64);
452 if (pt->bind & PIPE_BIND_SCANOUT) {
453 struct nv30_screen *screen = nv30_screen(pscreen);
454 int pitch_align = MAX2(
455 screen->eng3d->oclass >= NV40_3D_CLASS ? 1024 : 256,
456 /* round_down_pow2(mt->uniform_pitch / 4) */
457 1 << (util_last_bit(mt->uniform_pitch / 4) - 1));
458 mt->uniform_pitch = align(mt->uniform_pitch, pitch_align);
459 }
460 }
461
462 if (util_format_is_compressed(pt->format)) {
463 // Compressed (DXT) formats are packed tightly. We don't mark them as
464 // swizzled, since their layout is largely linear. However we do end up
465 // omitting the LINEAR flag when texturing them, as the levels are not
466 // uniformly sized (for POT sizes).
467 } else if (!mt->uniform_pitch) {
468 mt->swizzled = true;
469 }
470
471 size = 0;
472 for (l = 0; l <= pt->last_level; l++) {
473 struct nv30_miptree_level *lvl = &mt->level[l];
474 unsigned nbx = util_format_get_nblocksx(pt->format, w);
475 unsigned nby = util_format_get_nblocksy(pt->format, h);
476
477 lvl->offset = size;
478 lvl->pitch = mt->uniform_pitch;
479 if (!lvl->pitch)
480 lvl->pitch = nbx * blocksz;
481
482 lvl->zslice_size = lvl->pitch * nby;
483 size += lvl->zslice_size * d;
484
485 w = u_minify(w, 1);
486 h = u_minify(h, 1);
487 d = u_minify(d, 1);
488 }
489
490 mt->layer_size = size;
491 if (pt->target == PIPE_TEXTURE_CUBE) {
492 if (!mt->uniform_pitch)
493 mt->layer_size = align(mt->layer_size, 128);
494 size = mt->layer_size * 6;
495 }
496
497 ret = nouveau_bo_new(dev, NOUVEAU_BO_VRAM, 256, size, NULL, &mt->base.bo);
498 if (ret) {
499 FREE(mt);
500 return NULL;
501 }
502
503 mt->base.domain = NOUVEAU_BO_VRAM;
504 return &mt->base.base;
505 }
506
507 struct pipe_resource *
nv30_miptree_from_handle(struct pipe_screen * pscreen,const struct pipe_resource * tmpl,struct winsys_handle * handle)508 nv30_miptree_from_handle(struct pipe_screen *pscreen,
509 const struct pipe_resource *tmpl,
510 struct winsys_handle *handle)
511 {
512 struct nv30_miptree *mt;
513 unsigned stride;
514
515 /* only supports 2D, non-mipmapped textures for the moment */
516 if ((tmpl->target != PIPE_TEXTURE_2D &&
517 tmpl->target != PIPE_TEXTURE_RECT) ||
518 tmpl->last_level != 0 ||
519 tmpl->depth0 != 1 ||
520 tmpl->array_size > 1)
521 return NULL;
522
523 mt = CALLOC_STRUCT(nv30_miptree);
524 if (!mt)
525 return NULL;
526
527 mt->base.bo = nouveau_screen_bo_from_handle(pscreen, handle, &stride);
528 if (mt->base.bo == NULL) {
529 FREE(mt);
530 return NULL;
531 }
532
533 mt->base.base = *tmpl;
534 pipe_reference_init(&mt->base.base.reference, 1);
535 mt->base.base.screen = pscreen;
536 mt->uniform_pitch = stride;
537 mt->level[0].pitch = mt->uniform_pitch;
538 mt->level[0].offset = 0;
539
540 /* no need to adjust bo reference count */
541 return &mt->base.base;
542 }
543
544 struct pipe_surface *
nv30_miptree_surface_new(struct pipe_context * pipe,struct pipe_resource * pt,const struct pipe_surface * tmpl)545 nv30_miptree_surface_new(struct pipe_context *pipe,
546 struct pipe_resource *pt,
547 const struct pipe_surface *tmpl)
548 {
549 struct nv30_miptree *mt = nv30_miptree(pt); /* guaranteed */
550 struct nv30_surface *ns;
551 struct pipe_surface *ps;
552 struct nv30_miptree_level *lvl = &mt->level[tmpl->u.tex.level];
553
554 ns = CALLOC_STRUCT(nv30_surface);
555 if (!ns)
556 return NULL;
557 ps = &ns->base;
558
559 pipe_reference_init(&ps->reference, 1);
560 pipe_resource_reference(&ps->texture, pt);
561 ps->context = pipe;
562 ps->format = tmpl->format;
563 ps->u.tex.level = tmpl->u.tex.level;
564 ps->u.tex.first_layer = tmpl->u.tex.first_layer;
565 ps->u.tex.last_layer = tmpl->u.tex.last_layer;
566
567 ns->width = u_minify(pt->width0, ps->u.tex.level);
568 ns->height = u_minify(pt->height0, ps->u.tex.level);
569 ns->depth = ps->u.tex.last_layer - ps->u.tex.first_layer + 1;
570 ns->offset = layer_offset(pt, ps->u.tex.level, ps->u.tex.first_layer);
571 if (mt->swizzled)
572 ns->pitch = 4096; /* random, just something the hw won't reject.. */
573 else
574 ns->pitch = lvl->pitch;
575
576 /* comment says there are going to be removed, but they're used by the st */
577 ps->width = ns->width;
578 ps->height = ns->height;
579 return ps;
580 }
581
582 void
nv30_miptree_surface_del(struct pipe_context * pipe,struct pipe_surface * ps)583 nv30_miptree_surface_del(struct pipe_context *pipe, struct pipe_surface *ps)
584 {
585 struct nv30_surface *ns = nv30_surface(ps);
586
587 pipe_resource_reference(&ps->texture, NULL);
588 FREE(ns);
589 }
590