2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 #include <pipe/p_screen.h>
29 #include <util/u_format.h>
30 #include <util/u_format_s3tc.h>
31 #include <util/u_math.h>
32 #include <util/u_inlines.h>
33 #include <util/u_memory.h>
34 #include "state_tracker/drm_driver.h"
35 #include "pipebuffer/pb_buffer.h"
36 #include "r600_pipe.h"
37 #include "r600_resource.h"
38 #include "r600_state_inlines.h"
40 #include "r600_formats.h"
42 /* Copy from a full GPU texture to a transfer's staging one. */
43 static void r600_copy_to_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
45 struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
46 struct pipe_resource *texture = transfer->resource;
48 ctx->resource_copy_region(ctx, rtransfer->staging_texture,
49 0, 0, 0, 0, texture, transfer->level,
54 /* Copy from a transfer's staging texture to a full GPU one. */
55 static void r600_copy_from_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
57 struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
58 struct pipe_resource *texture = transfer->resource;
61 sbox.x = sbox.y = sbox.z = 0;
62 sbox.width = transfer->box.width;
63 sbox.height = transfer->box.height;
64 /* XXX that might be wrong */
66 ctx->resource_copy_region(ctx, texture, transfer->level,
67 transfer->box.x, transfer->box.y, transfer->box.z,
68 rtransfer->staging_texture,
71 ctx->flush(ctx, NULL);
74 unsigned r600_texture_get_offset(struct r600_resource_texture *rtex,
75 unsigned level, unsigned layer)
77 unsigned offset = rtex->offset[level];
79 switch (rtex->resource.b.b.b.target) {
81 case PIPE_TEXTURE_CUBE:
83 return offset + layer * rtex->layer_size[level];
87 static unsigned r600_get_block_alignment(struct pipe_screen *screen,
88 enum pipe_format format,
91 struct r600_screen* rscreen = (struct r600_screen *)screen;
92 unsigned pixsize = util_format_get_blocksize(format);
96 case V_038000_ARRAY_1D_TILED_THIN1:
98 ((rscreen->tiling_info->group_bytes / 8 / pixsize)));
100 case V_038000_ARRAY_2D_TILED_THIN1:
101 p_align = MAX2(rscreen->tiling_info->num_banks,
102 (((rscreen->tiling_info->group_bytes / 8 / pixsize)) *
103 rscreen->tiling_info->num_banks)) * 8;
105 case V_038000_ARRAY_LINEAR_ALIGNED:
106 p_align = MAX2(64, rscreen->tiling_info->group_bytes / pixsize);
108 case V_038000_ARRAY_LINEAR_GENERAL:
110 p_align = rscreen->tiling_info->group_bytes / pixsize;
116 static unsigned r600_get_height_alignment(struct pipe_screen *screen,
119 struct r600_screen* rscreen = (struct r600_screen *)screen;
122 switch (array_mode) {
123 case V_038000_ARRAY_2D_TILED_THIN1:
124 h_align = rscreen->tiling_info->num_channels * 8;
126 case V_038000_ARRAY_1D_TILED_THIN1:
127 case V_038000_ARRAY_LINEAR_ALIGNED:
130 case V_038000_ARRAY_LINEAR_GENERAL:
138 static unsigned r600_get_base_alignment(struct pipe_screen *screen,
139 enum pipe_format format,
142 struct r600_screen* rscreen = (struct r600_screen *)screen;
143 unsigned pixsize = util_format_get_blocksize(format);
144 int p_align = r600_get_block_alignment(screen, format, array_mode);
145 int h_align = r600_get_height_alignment(screen, array_mode);
148 switch (array_mode) {
149 case V_038000_ARRAY_2D_TILED_THIN1:
150 b_align = MAX2(rscreen->tiling_info->num_banks * rscreen->tiling_info->num_channels * 8 * 8 * pixsize,
151 p_align * pixsize * h_align);
153 case V_038000_ARRAY_1D_TILED_THIN1:
154 case V_038000_ARRAY_LINEAR_ALIGNED:
155 case V_038000_ARRAY_LINEAR_GENERAL:
157 b_align = rscreen->tiling_info->group_bytes;
163 static unsigned mip_minify(unsigned size, unsigned level)
166 val = u_minify(size, level);
168 val = util_next_power_of_two(val);
172 static unsigned r600_texture_get_nblocksx(struct pipe_screen *screen,
173 struct r600_resource_texture *rtex,
176 struct pipe_resource *ptex = &rtex->resource.b.b.b;
177 unsigned nblocksx, block_align, width;
178 unsigned blocksize = util_format_get_blocksize(ptex->format);
180 if (rtex->pitch_override)
181 return rtex->pitch_override / blocksize;
183 width = mip_minify(ptex->width0, level);
184 nblocksx = util_format_get_nblocksx(ptex->format, width);
186 block_align = r600_get_block_alignment(screen, ptex->format,
187 rtex->array_mode[level]);
188 nblocksx = align(nblocksx, block_align);
192 static unsigned r600_texture_get_nblocksy(struct pipe_screen *screen,
193 struct r600_resource_texture *rtex,
196 struct pipe_resource *ptex = &rtex->resource.b.b.b;
197 unsigned height, tile_height;
199 height = mip_minify(ptex->height0, level);
200 height = util_format_get_nblocksy(ptex->format, height);
201 tile_height = r600_get_height_alignment(screen,
202 rtex->array_mode[level]);
203 height = align(height, tile_height);
207 static void r600_texture_set_array_mode(struct pipe_screen *screen,
208 struct r600_resource_texture *rtex,
209 unsigned level, unsigned array_mode)
211 struct pipe_resource *ptex = &rtex->resource.b.b.b;
213 switch (array_mode) {
214 case V_0280A0_ARRAY_LINEAR_GENERAL:
215 case V_0280A0_ARRAY_LINEAR_ALIGNED:
216 case V_0280A0_ARRAY_1D_TILED_THIN1:
218 rtex->array_mode[level] = array_mode;
220 case V_0280A0_ARRAY_2D_TILED_THIN1:
222 unsigned w, h, tile_height, tile_width;
224 tile_height = r600_get_height_alignment(screen, array_mode);
225 tile_width = r600_get_block_alignment(screen, ptex->format, array_mode);
227 w = mip_minify(ptex->width0, level);
228 h = mip_minify(ptex->height0, level);
229 if (w <= tile_width || h <= tile_height)
230 rtex->array_mode[level] = V_0280A0_ARRAY_1D_TILED_THIN1;
232 rtex->array_mode[level] = array_mode;
238 static void r600_setup_miptree(struct pipe_screen *screen,
239 struct r600_resource_texture *rtex,
242 struct pipe_resource *ptex = &rtex->resource.b.b.b;
243 struct radeon *radeon = (struct radeon *)screen->winsys;
244 enum chip_class chipc = r600_get_family_class(radeon);
245 unsigned size, layer_size, i, offset;
246 unsigned nblocksx, nblocksy, extra_size;
248 for (i = 0, offset = 0; i <= ptex->last_level; i++) {
249 unsigned blocksize = util_format_get_blocksize(ptex->format);
250 unsigned base_align = r600_get_base_alignment(screen, ptex->format, array_mode);
252 r600_texture_set_array_mode(screen, rtex, i, array_mode);
254 nblocksx = r600_texture_get_nblocksx(screen, rtex, i);
255 nblocksy = r600_texture_get_nblocksy(screen, rtex, i);
257 layer_size = nblocksx * nblocksy * blocksize;
258 if (ptex->target == PIPE_TEXTURE_CUBE) {
260 size = layer_size * 8;
262 size = layer_size * 6;
264 else if (ptex->target == PIPE_TEXTURE_3D)
265 size = layer_size * u_minify(ptex->depth0, i);
267 size = layer_size * ptex->array_size;
269 /* evergreen stores depth and stencil separately */
270 if ((chipc >= EVERGREEN) && util_format_is_depth_or_stencil(ptex->format))
271 extra_size = align(extra_size + (nblocksx * nblocksy * 1), base_align);
273 /* align base image and start of miptree */
274 if ((i == 0) || (i == 1))
275 offset = align(offset, base_align);
276 rtex->offset[i] = offset;
277 rtex->layer_size[i] = layer_size;
278 rtex->pitch_in_blocks[i] = nblocksx; /* CB talks in elements */
279 rtex->pitch_in_bytes[i] = nblocksx * blocksize;
283 rtex->size = offset + extra_size;
286 /* Figure out whether u_blitter will fallback to a transfer operation.
287 * If so, don't use a staging resource.
289 static boolean permit_hardware_blit(struct pipe_screen *screen,
290 const struct pipe_resource *res)
294 if (util_format_is_depth_or_stencil(res->format))
295 bind = PIPE_BIND_DEPTH_STENCIL;
297 bind = PIPE_BIND_RENDER_TARGET;
299 /* hackaround for S3TC */
300 if (util_format_is_compressed(res->format))
303 if (!screen->is_format_supported(screen,
310 if (!screen->is_format_supported(screen,
314 PIPE_BIND_SAMPLER_VIEW))
320 static boolean r600_texture_get_handle(struct pipe_screen* screen,
321 struct pipe_resource *ptex,
322 struct winsys_handle *whandle)
324 struct r600_resource_texture *rtex = (struct r600_resource_texture*)ptex;
325 struct r600_resource *resource = &rtex->resource;
326 struct radeon *radeon = (struct radeon *)screen->winsys;
328 return r600_bo_get_winsys_handle(radeon, resource->bo,
329 rtex->pitch_in_bytes[0], whandle);
332 static void r600_texture_destroy(struct pipe_screen *screen,
333 struct pipe_resource *ptex)
335 struct r600_resource_texture *rtex = (struct r600_resource_texture*)ptex;
336 struct r600_resource *resource = &rtex->resource;
337 struct radeon *radeon = (struct radeon *)screen->winsys;
339 if (rtex->flushed_depth_texture)
340 pipe_resource_reference((struct pipe_resource **)&rtex->flushed_depth_texture, NULL);
343 r600_bo_reference(radeon, &resource->bo, NULL);
348 static const struct u_resource_vtbl r600_texture_vtbl =
350 r600_texture_get_handle, /* get_handle */
351 r600_texture_destroy, /* resource_destroy */
352 r600_texture_get_transfer, /* get_transfer */
353 r600_texture_transfer_destroy, /* transfer_destroy */
354 r600_texture_transfer_map, /* transfer_map */
355 u_default_transfer_flush_region,/* transfer_flush_region */
356 r600_texture_transfer_unmap, /* transfer_unmap */
357 u_default_transfer_inline_write /* transfer_inline_write */
360 static struct r600_resource_texture *
361 r600_texture_create_object(struct pipe_screen *screen,
362 const struct pipe_resource *base,
364 unsigned pitch_in_bytes_override,
365 unsigned max_buffer_size,
368 struct r600_resource_texture *rtex;
369 struct r600_resource *resource;
370 struct radeon *radeon = (struct radeon *)screen->winsys;
372 rtex = CALLOC_STRUCT(r600_resource_texture);
376 resource = &rtex->resource;
377 resource->b.b.b = *base;
378 resource->b.b.vtbl = &r600_texture_vtbl;
379 pipe_reference_init(&resource->b.b.b.reference, 1);
380 resource->b.b.b.screen = screen;
382 rtex->pitch_override = pitch_in_bytes_override;
383 /* only mark depth textures the HW can hit as depth textures */
384 if (util_format_is_depth_or_stencil(base->format) && permit_hardware_blit(screen, base))
387 r600_setup_miptree(screen, rtex, array_mode);
389 resource->size = rtex->size;
392 struct pipe_resource *ptex = &rtex->resource.b.b.b;
393 int base_align = r600_get_base_alignment(screen, ptex->format, array_mode);
395 resource->bo = r600_bo(radeon, rtex->size, base_align, base->bind, base->usage);
404 struct pipe_resource *r600_texture_create(struct pipe_screen *screen,
405 const struct pipe_resource *templ)
407 unsigned array_mode = 0;
408 static int force_tiling = -1;
410 /* Would like some magic "get_bool_option_once" routine.
412 if (force_tiling == -1) {
414 /* reenable when 2D tiling is fixed better */
415 struct r600_screen *rscreen = (struct r600_screen *)screen;
416 if (r600_get_minor_version(rscreen->radeon) >= 9)
417 force_tiling = debug_get_bool_option("R600_TILING", TRUE);
419 force_tiling = debug_get_bool_option("R600_TILING", FALSE);
422 if (force_tiling && permit_hardware_blit(screen, templ)) {
423 if (!(templ->flags & R600_RESOURCE_FLAG_TRANSFER) &&
424 !(templ->bind & PIPE_BIND_SCANOUT)) {
425 array_mode = V_038000_ARRAY_2D_TILED_THIN1;
429 if (!(templ->flags & R600_RESOURCE_FLAG_TRANSFER) &&
430 util_format_is_compressed(templ->format))
431 array_mode = V_038000_ARRAY_1D_TILED_THIN1;
433 return (struct pipe_resource *)r600_texture_create_object(screen, templ, array_mode,
438 static struct pipe_surface *r600_create_surface(struct pipe_context *pipe,
439 struct pipe_resource *texture,
440 const struct pipe_surface *surf_tmpl)
442 struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
443 struct r600_surface *surface = CALLOC_STRUCT(r600_surface);
444 unsigned level = surf_tmpl->u.tex.level;
446 assert(surf_tmpl->u.tex.first_layer == surf_tmpl->u.tex.last_layer);
450 /* offset = r600_texture_get_offset(rtex, level, surf_tmpl->u.tex.first_layer);*/
451 pipe_reference_init(&surface->base.reference, 1);
452 pipe_resource_reference(&surface->base.texture, texture);
453 surface->base.context = pipe;
454 surface->base.format = surf_tmpl->format;
455 surface->base.width = mip_minify(texture->width0, level);
456 surface->base.height = mip_minify(texture->height0, level);
457 surface->base.usage = surf_tmpl->usage;
458 surface->base.texture = texture;
459 surface->base.u.tex.first_layer = surf_tmpl->u.tex.first_layer;
460 surface->base.u.tex.last_layer = surf_tmpl->u.tex.last_layer;
461 surface->base.u.tex.level = level;
463 surface->aligned_height = r600_texture_get_nblocksy(pipe->screen,
465 return &surface->base;
468 static void r600_surface_destroy(struct pipe_context *pipe,
469 struct pipe_surface *surface)
471 pipe_resource_reference(&surface->texture, NULL);
476 struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen,
477 const struct pipe_resource *templ,
478 struct winsys_handle *whandle)
480 struct radeon *rw = (struct radeon*)screen->winsys;
481 struct r600_bo *bo = NULL;
482 unsigned array_mode = 0;
484 /* Support only 2D textures without mipmaps */
485 if ((templ->target != PIPE_TEXTURE_2D && templ->target != PIPE_TEXTURE_RECT) ||
486 templ->depth0 != 1 || templ->last_level != 0)
489 bo = r600_bo_handle(rw, whandle->handle, &array_mode);
494 return (struct pipe_resource *)r600_texture_create_object(screen, templ, array_mode,
500 int r600_texture_depth_flush(struct pipe_context *ctx,
501 struct pipe_resource *texture, boolean just_create)
503 struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
504 struct pipe_resource resource;
506 if (rtex->flushed_depth_texture)
509 resource.target = PIPE_TEXTURE_2D;
510 resource.format = texture->format;
511 resource.width0 = texture->width0;
512 resource.height0 = texture->height0;
514 resource.array_size = 1;
515 resource.last_level = texture->last_level;
516 resource.nr_samples = 0;
517 resource.usage = PIPE_USAGE_DYNAMIC;
519 resource.flags = R600_RESOURCE_FLAG_TRANSFER;
521 resource.bind |= PIPE_BIND_DEPTH_STENCIL;
523 rtex->flushed_depth_texture = (struct r600_resource_texture *)ctx->screen->resource_create(ctx->screen, &resource);
524 if (rtex->flushed_depth_texture == NULL) {
525 R600_ERR("failed to create temporary texture to hold untiled copy\n");
529 ((struct r600_resource_texture *)rtex->flushed_depth_texture)->is_flushing_texture = TRUE;
534 /* XXX: only do this if the depth texture has actually changed:
536 r600_blit_uncompress_depth(ctx, rtex);
540 /* Needs adjustment for pixelformat:
542 static INLINE unsigned u_box_volume( const struct pipe_box *box )
544 return box->width * box->depth * box->height;
547 struct pipe_transfer* r600_texture_get_transfer(struct pipe_context *ctx,
548 struct pipe_resource *texture,
551 const struct pipe_box *box)
553 struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
554 struct pipe_resource resource;
555 struct r600_transfer *trans;
557 boolean use_staging_texture = FALSE;
559 /* We cannot map a tiled texture directly because the data is
560 * in a different order, therefore we do detiling using a blit.
562 * Also, use a temporary in GTT memory for read transfers, as
563 * the CPU is much happier reading out of cached system memory
564 * than uncached VRAM.
566 if (R600_TEX_IS_TILED(rtex, level))
567 use_staging_texture = TRUE;
569 if ((usage & PIPE_TRANSFER_READ) && u_box_volume(box) > 1024)
570 use_staging_texture = TRUE;
572 /* XXX: Use a staging texture for uploads if the underlying BO
573 * is busy. No interface for checking that currently? so do
574 * it eagerly whenever the transfer doesn't require a readback
577 if ((usage & PIPE_TRANSFER_WRITE) &&
578 !(usage & (PIPE_TRANSFER_READ |
579 PIPE_TRANSFER_DONTBLOCK |
580 PIPE_TRANSFER_UNSYNCHRONIZED)))
581 use_staging_texture = TRUE;
583 if (!permit_hardware_blit(ctx->screen, texture) ||
584 (texture->flags & R600_RESOURCE_FLAG_TRANSFER))
585 use_staging_texture = FALSE;
587 trans = CALLOC_STRUCT(r600_transfer);
590 pipe_resource_reference(&trans->transfer.resource, texture);
591 trans->transfer.level = level;
592 trans->transfer.usage = usage;
593 trans->transfer.box = *box;
595 /* XXX: only readback the rectangle which is being mapped?
597 /* XXX: when discard is true, no need to read back from depth texture
599 r = r600_texture_depth_flush(ctx, texture, FALSE);
601 R600_ERR("failed to create temporary texture to hold untiled copy\n");
602 pipe_resource_reference(&trans->transfer.resource, NULL);
606 trans->transfer.stride = rtex->flushed_depth_texture->pitch_in_bytes[level];
607 trans->offset = r600_texture_get_offset(rtex->flushed_depth_texture, level, box->z);
608 return &trans->transfer;
609 } else if (use_staging_texture) {
610 resource.target = PIPE_TEXTURE_2D;
611 resource.format = texture->format;
612 resource.width0 = box->width;
613 resource.height0 = box->height;
615 resource.array_size = 1;
616 resource.last_level = 0;
617 resource.nr_samples = 0;
618 resource.usage = PIPE_USAGE_STAGING;
620 resource.flags = R600_RESOURCE_FLAG_TRANSFER;
621 /* For texture reading, the temporary (detiled) texture is used as
622 * a render target when blitting from a tiled texture. */
623 if (usage & PIPE_TRANSFER_READ) {
624 resource.bind |= PIPE_BIND_RENDER_TARGET;
626 /* For texture writing, the temporary texture is used as a sampler
627 * when blitting into a tiled texture. */
628 if (usage & PIPE_TRANSFER_WRITE) {
629 resource.bind |= PIPE_BIND_SAMPLER_VIEW;
631 /* Create the temporary texture. */
632 trans->staging_texture = ctx->screen->resource_create(ctx->screen, &resource);
633 if (trans->staging_texture == NULL) {
634 R600_ERR("failed to create temporary texture to hold untiled copy\n");
635 pipe_resource_reference(&trans->transfer.resource, NULL);
640 trans->transfer.stride =
641 ((struct r600_resource_texture *)trans->staging_texture)->pitch_in_bytes[0];
642 if (usage & PIPE_TRANSFER_READ) {
643 r600_copy_to_staging_texture(ctx, trans);
644 /* Always referenced in the blit. */
645 ctx->flush(ctx, NULL);
647 return &trans->transfer;
649 trans->transfer.stride = rtex->pitch_in_bytes[level];
650 trans->transfer.layer_stride = rtex->layer_size[level];
651 trans->offset = r600_texture_get_offset(rtex, level, box->z);
652 return &trans->transfer;
655 void r600_texture_transfer_destroy(struct pipe_context *ctx,
656 struct pipe_transfer *transfer)
658 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
659 struct pipe_resource *texture = transfer->resource;
660 struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
662 if (rtransfer->staging_texture) {
663 if (transfer->usage & PIPE_TRANSFER_WRITE) {
664 r600_copy_from_staging_texture(ctx, rtransfer);
666 pipe_resource_reference(&rtransfer->staging_texture, NULL);
669 if (rtex->depth && !rtex->is_flushing_texture) {
670 if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtex->flushed_depth_texture)
671 r600_blit_push_depth(ctx, rtex);
674 pipe_resource_reference(&transfer->resource, NULL);
678 void* r600_texture_transfer_map(struct pipe_context *ctx,
679 struct pipe_transfer* transfer)
681 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
683 enum pipe_format format = transfer->resource->format;
684 struct radeon *radeon = (struct radeon *)ctx->screen->winsys;
689 if (rtransfer->staging_texture) {
690 bo = ((struct r600_resource *)rtransfer->staging_texture)->bo;
692 struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource;
694 if (rtex->flushed_depth_texture)
695 bo = ((struct r600_resource *)rtex->flushed_depth_texture)->bo;
697 bo = ((struct r600_resource *)transfer->resource)->bo;
699 offset = rtransfer->offset +
700 transfer->box.y / util_format_get_blockheight(format) * transfer->stride +
701 transfer->box.x / util_format_get_blockwidth(format) * util_format_get_blocksize(format);
704 if (transfer->usage & PIPE_TRANSFER_WRITE) {
705 usage |= PB_USAGE_CPU_WRITE;
707 if (transfer->usage & PIPE_TRANSFER_DISCARD) {
710 if (transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT) {
714 if (transfer->usage & PIPE_TRANSFER_READ) {
715 usage |= PB_USAGE_CPU_READ;
718 if (transfer->usage & PIPE_TRANSFER_DONTBLOCK) {
719 usage |= PB_USAGE_DONTBLOCK;
722 if (transfer->usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
723 usage |= PB_USAGE_UNSYNCHRONIZED;
726 map = r600_bo_map(radeon, bo, usage, ctx);
734 void r600_texture_transfer_unmap(struct pipe_context *ctx,
735 struct pipe_transfer* transfer)
737 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
738 struct radeon *radeon = (struct radeon *)ctx->screen->winsys;
741 if (rtransfer->staging_texture) {
742 bo = ((struct r600_resource *)rtransfer->staging_texture)->bo;
744 struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource;
746 if (rtex->flushed_depth_texture) {
747 bo = ((struct r600_resource *)rtex->flushed_depth_texture)->bo;
749 bo = ((struct r600_resource *)transfer->resource)->bo;
752 r600_bo_unmap(radeon, bo);
755 void r600_init_surface_functions(struct r600_pipe_context *r600)
757 r600->context.create_surface = r600_create_surface;
758 r600->context.surface_destroy = r600_surface_destroy;
761 static unsigned r600_get_swizzle_combined(const unsigned char *swizzle_format,
762 const unsigned char *swizzle_view)
765 unsigned char swizzle[4];
767 const uint32_t swizzle_shift[4] = {
770 const uint32_t swizzle_bit[4] = {
775 /* Combine two sets of swizzles. */
776 for (i = 0; i < 4; i++) {
777 swizzle[i] = swizzle_view[i] <= UTIL_FORMAT_SWIZZLE_W ?
778 swizzle_format[swizzle_view[i]] : swizzle_view[i];
781 memcpy(swizzle, swizzle_format, 4);
785 for (i = 0; i < 4; i++) {
786 switch (swizzle[i]) {
787 case UTIL_FORMAT_SWIZZLE_Y:
788 result |= swizzle_bit[1] << swizzle_shift[i];
790 case UTIL_FORMAT_SWIZZLE_Z:
791 result |= swizzle_bit[2] << swizzle_shift[i];
793 case UTIL_FORMAT_SWIZZLE_W:
794 result |= swizzle_bit[3] << swizzle_shift[i];
796 case UTIL_FORMAT_SWIZZLE_0:
797 result |= V_038010_SQ_SEL_0 << swizzle_shift[i];
799 case UTIL_FORMAT_SWIZZLE_1:
800 result |= V_038010_SQ_SEL_1 << swizzle_shift[i];
802 default: /* UTIL_FORMAT_SWIZZLE_X */
803 result |= swizzle_bit[0] << swizzle_shift[i];
809 /* texture format translate */
810 uint32_t r600_translate_texformat(struct pipe_screen *screen,
811 enum pipe_format format,
812 const unsigned char *swizzle_view,
813 uint32_t *word4_p, uint32_t *yuv_format_p)
815 uint32_t result = 0, word4 = 0, yuv_format = 0;
816 const struct util_format_description *desc;
817 boolean uniform = TRUE;
818 static int r600_enable_s3tc = -1;
821 const uint32_t sign_bit[4] = {
822 S_038010_FORMAT_COMP_X(V_038010_SQ_FORMAT_COMP_SIGNED),
823 S_038010_FORMAT_COMP_Y(V_038010_SQ_FORMAT_COMP_SIGNED),
824 S_038010_FORMAT_COMP_Z(V_038010_SQ_FORMAT_COMP_SIGNED),
825 S_038010_FORMAT_COMP_W(V_038010_SQ_FORMAT_COMP_SIGNED)
827 desc = util_format_description(format);
829 word4 |= r600_get_swizzle_combined(desc->swizzle, swizzle_view);
831 /* Colorspace (return non-RGB formats directly). */
832 switch (desc->colorspace) {
833 /* Depth stencil formats */
834 case UTIL_FORMAT_COLORSPACE_ZS:
836 case PIPE_FORMAT_Z16_UNORM:
839 case PIPE_FORMAT_X24S8_USCALED:
840 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
841 case PIPE_FORMAT_Z24X8_UNORM:
842 case PIPE_FORMAT_Z24_UNORM_S8_USCALED:
845 case PIPE_FORMAT_S8X24_USCALED:
846 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
847 case PIPE_FORMAT_X8Z24_UNORM:
848 case PIPE_FORMAT_S8_USCALED_Z24_UNORM:
851 case PIPE_FORMAT_S8_USCALED:
853 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
859 case UTIL_FORMAT_COLORSPACE_YUV:
860 yuv_format |= (1 << 30);
862 case PIPE_FORMAT_UYVY:
863 case PIPE_FORMAT_YUYV:
867 goto out_unknown; /* TODO */
869 case UTIL_FORMAT_COLORSPACE_SRGB:
870 word4 |= S_038010_FORCE_DEGAMMA(1);
877 if (r600_enable_s3tc == -1) {
878 struct r600_screen *rscreen = (struct r600_screen *)screen;
879 if (r600_get_minor_version(rscreen->radeon) >= 9)
880 r600_enable_s3tc = 1;
882 r600_enable_s3tc = debug_get_bool_option("R600_ENABLE_S3TC", FALSE);
885 if (desc->layout == UTIL_FORMAT_LAYOUT_RGTC) {
886 if (!r600_enable_s3tc)
890 case PIPE_FORMAT_RGTC1_SNORM:
891 case PIPE_FORMAT_LATC1_SNORM:
892 word4 |= sign_bit[0];
893 case PIPE_FORMAT_RGTC1_UNORM:
894 case PIPE_FORMAT_LATC1_UNORM:
897 case PIPE_FORMAT_RGTC2_SNORM:
898 case PIPE_FORMAT_LATC2_SNORM:
899 word4 |= sign_bit[0] | sign_bit[1];
900 case PIPE_FORMAT_RGTC2_UNORM:
901 case PIPE_FORMAT_LATC2_UNORM:
909 if (desc->layout == UTIL_FORMAT_LAYOUT_S3TC) {
911 if (!r600_enable_s3tc)
914 if (!util_format_s3tc_enabled) {
919 case PIPE_FORMAT_DXT1_RGB:
920 case PIPE_FORMAT_DXT1_RGBA:
921 case PIPE_FORMAT_DXT1_SRGB:
922 case PIPE_FORMAT_DXT1_SRGBA:
925 case PIPE_FORMAT_DXT3_RGBA:
926 case PIPE_FORMAT_DXT3_SRGBA:
929 case PIPE_FORMAT_DXT5_RGBA:
930 case PIPE_FORMAT_DXT5_SRGBA:
938 if (format == PIPE_FORMAT_R9G9B9E5_FLOAT) {
939 result = FMT_5_9_9_9_SHAREDEXP;
941 } else if (format == PIPE_FORMAT_R11G11B10_FLOAT) {
942 result = FMT_10_11_11_FLOAT;
947 for (i = 0; i < desc->nr_channels; i++) {
948 if (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
949 word4 |= sign_bit[i];
953 /* R8G8Bx_SNORM - TODO CxV8U8 */
955 /* See whether the components are of the same size. */
956 for (i = 1; i < desc->nr_channels; i++) {
957 uniform = uniform && desc->channel[0].size == desc->channel[i].size;
960 /* Non-uniform formats. */
962 switch(desc->nr_channels) {
964 if (desc->channel[0].size == 5 &&
965 desc->channel[1].size == 6 &&
966 desc->channel[2].size == 5) {
972 if (desc->channel[0].size == 5 &&
973 desc->channel[1].size == 5 &&
974 desc->channel[2].size == 5 &&
975 desc->channel[3].size == 1) {
976 result = FMT_1_5_5_5;
979 if (desc->channel[0].size == 10 &&
980 desc->channel[1].size == 10 &&
981 desc->channel[2].size == 10 &&
982 desc->channel[3].size == 2) {
983 result = FMT_2_10_10_10;
991 /* Find the first non-VOID channel. */
992 for (i = 0; i < 4; i++) {
993 if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) {
1001 /* uniform formats */
1002 switch (desc->channel[i].type) {
1003 case UTIL_FORMAT_TYPE_UNSIGNED:
1004 case UTIL_FORMAT_TYPE_SIGNED:
1005 if (!desc->channel[i].normalized &&
1006 desc->colorspace != UTIL_FORMAT_COLORSPACE_SRGB) {
1010 switch (desc->channel[i].size) {
1012 switch (desc->nr_channels) {
1017 result = FMT_4_4_4_4;
1022 switch (desc->nr_channels) {
1030 result = FMT_8_8_8_8;
1035 switch (desc->nr_channels) {
1043 result = FMT_16_16_16_16;
1048 switch (desc->nr_channels) {
1056 result = FMT_32_32_32_32;
1062 case UTIL_FORMAT_TYPE_FLOAT:
1063 switch (desc->channel[i].size) {
1065 switch (desc->nr_channels) {
1067 result = FMT_16_FLOAT;
1070 result = FMT_16_16_FLOAT;
1073 result = FMT_16_16_16_16_FLOAT;
1078 switch (desc->nr_channels) {
1080 result = FMT_32_FLOAT;
1083 result = FMT_32_32_FLOAT;
1086 result = FMT_32_32_32_32_FLOAT;
1097 *yuv_format_p = yuv_format;
1100 /* R600_ERR("Unable to handle texformat %d %s\n", format, util_format_name(format)); */