2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 #include "r600_pipe.h"
24 #include "r600_public.h"
29 #include "pipe/p_shader_tokens.h"
30 #include "util/u_blitter.h"
31 #include "util/u_format_s3tc.h"
32 #include "util/u_memory.h"
33 #include "util/u_simple_shaders.h"
34 #include "util/u_upload_mgr.h"
35 #include "util/u_math.h"
36 #include "vl/vl_decoder.h"
37 #include "vl/vl_video_buffer.h"
38 #include "os/os_time.h"
43 static struct r600_fence *r600_create_fence(struct r600_context *rctx)
45 struct r600_screen *rscreen = rctx->screen;
46 struct r600_fence *fence = NULL;
48 pipe_mutex_lock(rscreen->fences.mutex);
50 if (!rscreen->fences.bo) {
51 /* Create the shared buffer object */
52 rscreen->fences.bo = (struct r600_resource*)
53 pipe_buffer_create(&rscreen->screen, PIPE_BIND_CUSTOM,
54 PIPE_USAGE_STAGING, 4096);
55 if (!rscreen->fences.bo) {
56 R600_ERR("r600: failed to create bo for fence objects\n");
59 rscreen->fences.data = r600_buffer_mmap_sync_with_rings(rctx, rscreen->fences.bo, PIPE_TRANSFER_READ_WRITE);
62 if (!LIST_IS_EMPTY(&rscreen->fences.pool)) {
63 struct r600_fence *entry;
65 /* Try to find a freed fence that has been signalled */
66 LIST_FOR_EACH_ENTRY(entry, &rscreen->fences.pool, head) {
67 if (rscreen->fences.data[entry->index] != 0) {
68 LIST_DELINIT(&entry->head);
76 /* Allocate a new fence */
77 struct r600_fence_block *block;
80 if ((rscreen->fences.next_index + 1) >= 1024) {
81 R600_ERR("r600: too many concurrent fences\n");
85 index = rscreen->fences.next_index++;
87 if (!(index % FENCE_BLOCK_SIZE)) {
88 /* Allocate a new block */
89 block = CALLOC_STRUCT(r600_fence_block);
93 LIST_ADD(&block->head, &rscreen->fences.blocks);
95 block = LIST_ENTRY(struct r600_fence_block, rscreen->fences.blocks.next, head);
98 fence = &block->fences[index % FENCE_BLOCK_SIZE];
102 pipe_reference_init(&fence->reference, 1);
104 rscreen->fences.data[fence->index] = 0;
105 r600_context_emit_fence(rctx, rscreen->fences.bo, fence->index, 1);
107 /* Create a dummy BO so that fence_finish without a timeout can sleep waiting for completion */
108 fence->sleep_bo = (struct r600_resource*)
109 pipe_buffer_create(&rctx->screen->screen, PIPE_BIND_CUSTOM,
110 PIPE_USAGE_STAGING, 1);
111 /* Add the fence as a dummy relocation. */
112 r600_context_bo_reloc(rctx, &rctx->rings.gfx, fence->sleep_bo, RADEON_USAGE_READWRITE);
115 pipe_mutex_unlock(rscreen->fences.mutex);
119 static void r600_flush(struct pipe_context *ctx, unsigned flags)
121 struct r600_context *rctx = (struct r600_context *)ctx;
122 struct pipe_query *render_cond = NULL;
123 unsigned render_cond_mode = 0;
125 rctx->rings.gfx.flushing = true;
126 /* Disable render condition. */
127 if (rctx->current_render_cond) {
128 render_cond = rctx->current_render_cond;
129 render_cond_mode = rctx->current_render_cond_mode;
130 ctx->render_condition(ctx, NULL, 0);
133 r600_context_flush(rctx, flags);
134 rctx->rings.gfx.flushing = false;
135 r600_begin_new_cs(rctx);
137 /* Re-enable render condition. */
139 ctx->render_condition(ctx, render_cond, render_cond_mode);
143 static void r600_flush_from_st(struct pipe_context *ctx,
144 struct pipe_fence_handle **fence,
145 enum pipe_flush_flags flags)
147 struct r600_context *rctx = (struct r600_context *)ctx;
148 struct r600_fence **rfence = (struct r600_fence**)fence;
151 fflags = flags & PIPE_FLUSH_END_OF_FRAME ? RADEON_FLUSH_END_OF_FRAME : 0;
153 *rfence = r600_create_fence(rctx);
155 /* flush gfx & dma ring, order does not matter as only one can be live */
156 if (rctx->rings.dma.cs) {
157 rctx->rings.dma.flush(rctx, fflags);
159 rctx->rings.gfx.flush(rctx, fflags);
162 static void r600_flush_gfx_ring(void *ctx, unsigned flags)
164 r600_flush((struct pipe_context*)ctx, flags);
167 static void r600_flush_dma_ring(void *ctx, unsigned flags)
169 struct r600_context *rctx = (struct r600_context *)ctx;
170 struct radeon_winsys_cs *cs = rctx->rings.dma.cs;
171 unsigned padding_dw, i;
177 /* Pad the DMA CS to a multiple of 8 dwords. */
178 padding_dw = 8 - cs->cdw % 8;
179 if (padding_dw < 8) {
180 for (i = 0; i < padding_dw; i++) {
181 cs->buf[cs->cdw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
185 rctx->rings.dma.flushing = true;
186 rctx->ws->cs_flush(cs, flags);
187 rctx->rings.dma.flushing = false;
190 boolean r600_rings_is_buffer_referenced(struct r600_context *ctx,
191 struct radeon_winsys_cs_handle *buf,
192 enum radeon_bo_usage usage)
194 if (ctx->ws->cs_is_buffer_referenced(ctx->rings.gfx.cs, buf, usage)) {
197 if (ctx->rings.dma.cs) {
198 if (ctx->ws->cs_is_buffer_referenced(ctx->rings.dma.cs, buf, usage)) {
205 void *r600_buffer_mmap_sync_with_rings(struct r600_context *ctx,
206 struct r600_resource *resource,
209 enum radeon_bo_usage rusage = RADEON_USAGE_READWRITE;
211 bool sync_flush = TRUE;
213 if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
214 return ctx->ws->buffer_map(resource->cs_buf, NULL, usage);
217 if (!(usage & PIPE_TRANSFER_WRITE)) {
218 /* have to wait for pending read */
219 rusage = RADEON_USAGE_WRITE;
221 if (usage & PIPE_TRANSFER_DONTBLOCK) {
222 flags |= RADEON_FLUSH_ASYNC;
225 if (ctx->ws->cs_is_buffer_referenced(ctx->rings.gfx.cs, resource->cs_buf, rusage) && ctx->rings.gfx.cs->cdw) {
226 ctx->rings.gfx.flush(ctx, flags);
227 if (usage & PIPE_TRANSFER_DONTBLOCK) {
231 if (ctx->rings.dma.cs) {
232 if (ctx->ws->cs_is_buffer_referenced(ctx->rings.dma.cs, resource->cs_buf, rusage) && ctx->rings.dma.cs->cdw) {
233 ctx->rings.dma.flush(ctx, flags);
234 if (usage & PIPE_TRANSFER_DONTBLOCK) {
240 if (usage & PIPE_TRANSFER_DONTBLOCK) {
241 if (ctx->ws->buffer_is_busy(resource->buf, rusage)) {
246 /* Try to avoid busy-waiting in radeon_bo_wait. */
247 ctx->ws->cs_sync_flush(ctx->rings.gfx.cs);
248 if (ctx->rings.dma.cs) {
249 ctx->ws->cs_sync_flush(ctx->rings.dma.cs);
252 ctx->ws->buffer_wait(resource->buf, rusage);
254 /* at this point everything is synchronized */
255 return ctx->ws->buffer_map(resource->cs_buf, NULL, usage | PIPE_TRANSFER_UNSYNCHRONIZED);
258 static void r600_flush_from_winsys(void *ctx, unsigned flags)
260 struct r600_context *rctx = (struct r600_context *)ctx;
262 rctx->rings.gfx.flush(rctx, flags);
265 static void r600_flush_dma_from_winsys(void *ctx, unsigned flags)
267 struct r600_context *rctx = (struct r600_context *)ctx;
269 rctx->rings.dma.flush(rctx, flags);
272 static void r600_destroy_context(struct pipe_context *context)
274 struct r600_context *rctx = (struct r600_context *)context;
276 r600_isa_destroy(rctx->isa);
278 pipe_resource_reference((struct pipe_resource**)&rctx->dummy_cmask, NULL);
279 pipe_resource_reference((struct pipe_resource**)&rctx->dummy_fmask, NULL);
281 if (rctx->dummy_pixel_shader) {
282 rctx->context.delete_fs_state(&rctx->context, rctx->dummy_pixel_shader);
284 if (rctx->custom_dsa_flush) {
285 rctx->context.delete_depth_stencil_alpha_state(&rctx->context, rctx->custom_dsa_flush);
287 if (rctx->custom_blend_resolve) {
288 rctx->context.delete_blend_state(&rctx->context, rctx->custom_blend_resolve);
290 if (rctx->custom_blend_decompress) {
291 rctx->context.delete_blend_state(&rctx->context, rctx->custom_blend_decompress);
293 if (rctx->custom_blend_fmask_decompress) {
294 rctx->context.delete_blend_state(&rctx->context, rctx->custom_blend_fmask_decompress);
296 util_unreference_framebuffer_state(&rctx->framebuffer.state);
298 r600_context_fini(rctx);
301 util_blitter_destroy(rctx->blitter);
303 if (rctx->uploader) {
304 u_upload_destroy(rctx->uploader);
306 if (rctx->allocator_so_filled_size) {
307 u_suballocator_destroy(rctx->allocator_so_filled_size);
309 if (rctx->allocator_fetch_shader) {
310 u_suballocator_destroy(rctx->allocator_fetch_shader);
312 util_slab_destroy(&rctx->pool_transfers);
314 r600_release_command_buffer(&rctx->start_cs_cmd);
316 if (rctx->rings.gfx.cs) {
317 rctx->ws->cs_destroy(rctx->rings.gfx.cs);
319 if (rctx->rings.dma.cs) {
320 rctx->ws->cs_destroy(rctx->rings.dma.cs);
327 static struct pipe_context *r600_create_context(struct pipe_screen *screen, void *priv)
329 struct r600_context *rctx = CALLOC_STRUCT(r600_context);
330 struct r600_screen* rscreen = (struct r600_screen *)screen;
335 util_slab_create(&rctx->pool_transfers,
336 sizeof(struct r600_transfer), 64,
337 UTIL_SLAB_SINGLETHREADED);
339 rctx->context.screen = screen;
340 rctx->context.priv = priv;
341 rctx->context.destroy = r600_destroy_context;
342 rctx->context.flush = r600_flush_from_st;
344 /* Easy accessing of screen/winsys. */
345 rctx->screen = rscreen;
346 rctx->ws = rscreen->ws;
347 rctx->family = rscreen->family;
348 rctx->chip_class = rscreen->chip_class;
349 rctx->keep_tiling_flags = rscreen->info.drm_minor >= 12;
351 LIST_INITHEAD(&rctx->active_nontimer_queries);
352 LIST_INITHEAD(&rctx->dirty);
353 LIST_INITHEAD(&rctx->enable_list);
355 rctx->range = CALLOC(NUM_RANGES, sizeof(struct r600_range));
359 r600_init_blit_functions(rctx);
360 r600_init_query_functions(rctx);
361 r600_init_context_resource_functions(rctx);
362 r600_init_surface_functions(rctx);
365 rctx->context.create_video_decoder = vl_create_decoder;
366 rctx->context.create_video_buffer = vl_video_buffer_create;
368 r600_init_common_state_functions(rctx);
370 switch (rctx->chip_class) {
373 r600_init_state_functions(rctx);
374 r600_init_atom_start_cs(rctx);
375 if (r600_context_init(rctx))
377 rctx->custom_dsa_flush = r600_create_db_flush_dsa(rctx);
378 rctx->custom_blend_resolve = rctx->chip_class == R700 ? r700_create_resolve_blend(rctx)
379 : r600_create_resolve_blend(rctx);
380 rctx->custom_blend_decompress = r600_create_decompress_blend(rctx);
381 rctx->has_vertex_cache = !(rctx->family == CHIP_RV610 ||
382 rctx->family == CHIP_RV620 ||
383 rctx->family == CHIP_RS780 ||
384 rctx->family == CHIP_RS880 ||
385 rctx->family == CHIP_RV710);
389 evergreen_init_state_functions(rctx);
390 evergreen_init_atom_start_cs(rctx);
391 evergreen_init_atom_start_compute_cs(rctx);
392 if (evergreen_context_init(rctx))
394 rctx->custom_dsa_flush = evergreen_create_db_flush_dsa(rctx);
395 rctx->custom_blend_resolve = evergreen_create_resolve_blend(rctx);
396 rctx->custom_blend_decompress = evergreen_create_decompress_blend(rctx);
397 rctx->custom_blend_fmask_decompress = evergreen_create_fmask_decompress_blend(rctx);
398 rctx->has_vertex_cache = !(rctx->family == CHIP_CEDAR ||
399 rctx->family == CHIP_PALM ||
400 rctx->family == CHIP_SUMO ||
401 rctx->family == CHIP_SUMO2 ||
402 rctx->family == CHIP_CAICOS ||
403 rctx->family == CHIP_CAYMAN ||
404 rctx->family == CHIP_ARUBA);
407 R600_ERR("Unsupported chip class %d.\n", rctx->chip_class);
411 rctx->rings.gfx.cs = rctx->ws->cs_create(rctx->ws, RING_GFX);
412 rctx->rings.gfx.flush = r600_flush_gfx_ring;
413 rctx->ws->cs_set_flush_callback(rctx->rings.gfx.cs, r600_flush_from_winsys, rctx);
414 rctx->rings.gfx.flushing = false;
416 rctx->rings.dma.cs = NULL;
417 if (rscreen->info.r600_has_dma) {
418 rctx->rings.dma.cs = rctx->ws->cs_create(rctx->ws, RING_DMA);
419 rctx->rings.dma.flush = r600_flush_dma_ring;
420 rctx->ws->cs_set_flush_callback(rctx->rings.dma.cs, r600_flush_dma_from_winsys, rctx);
421 rctx->rings.dma.flushing = false;
424 rctx->uploader = u_upload_create(&rctx->context, 1024 * 1024, 256,
425 PIPE_BIND_INDEX_BUFFER |
426 PIPE_BIND_CONSTANT_BUFFER);
430 rctx->allocator_fetch_shader = u_suballocator_create(&rctx->context, 64 * 1024, 256,
431 0, PIPE_USAGE_STATIC, FALSE);
432 if (!rctx->allocator_fetch_shader)
435 rctx->allocator_so_filled_size = u_suballocator_create(&rctx->context, 4096, 4,
436 0, PIPE_USAGE_STATIC, TRUE);
437 if (!rctx->allocator_so_filled_size)
440 rctx->isa = calloc(1, sizeof(struct r600_isa));
441 if (!rctx->isa || r600_isa_init(rctx, rctx->isa))
444 rctx->blitter = util_blitter_create(&rctx->context);
445 if (rctx->blitter == NULL)
447 util_blitter_set_texture_multisample(rctx->blitter, rscreen->has_msaa);
448 rctx->blitter->draw_rectangle = r600_draw_rectangle;
450 r600_begin_new_cs(rctx);
451 r600_get_backend_mask(rctx); /* this emits commands and must be last */
453 rctx->dummy_pixel_shader =
454 util_make_fragment_cloneinput_shader(&rctx->context, 0,
455 TGSI_SEMANTIC_GENERIC,
456 TGSI_INTERPOLATE_CONSTANT);
457 rctx->context.bind_fs_state(&rctx->context, rctx->dummy_pixel_shader);
459 return &rctx->context;
462 r600_destroy_context(&rctx->context);
469 static const char* r600_get_vendor(struct pipe_screen* pscreen)
474 static const char *r600_get_family_name(enum radeon_family family)
477 case CHIP_R600: return "AMD R600";
478 case CHIP_RV610: return "AMD RV610";
479 case CHIP_RV630: return "AMD RV630";
480 case CHIP_RV670: return "AMD RV670";
481 case CHIP_RV620: return "AMD RV620";
482 case CHIP_RV635: return "AMD RV635";
483 case CHIP_RS780: return "AMD RS780";
484 case CHIP_RS880: return "AMD RS880";
485 case CHIP_RV770: return "AMD RV770";
486 case CHIP_RV730: return "AMD RV730";
487 case CHIP_RV710: return "AMD RV710";
488 case CHIP_RV740: return "AMD RV740";
489 case CHIP_CEDAR: return "AMD CEDAR";
490 case CHIP_REDWOOD: return "AMD REDWOOD";
491 case CHIP_JUNIPER: return "AMD JUNIPER";
492 case CHIP_CYPRESS: return "AMD CYPRESS";
493 case CHIP_HEMLOCK: return "AMD HEMLOCK";
494 case CHIP_PALM: return "AMD PALM";
495 case CHIP_SUMO: return "AMD SUMO";
496 case CHIP_SUMO2: return "AMD SUMO2";
497 case CHIP_BARTS: return "AMD BARTS";
498 case CHIP_TURKS: return "AMD TURKS";
499 case CHIP_CAICOS: return "AMD CAICOS";
500 case CHIP_CAYMAN: return "AMD CAYMAN";
501 case CHIP_ARUBA: return "AMD ARUBA";
502 default: return "AMD unknown";
506 static const char* r600_get_name(struct pipe_screen* pscreen)
508 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
510 return r600_get_family_name(rscreen->family);
513 static int r600_get_param(struct pipe_screen* pscreen, enum pipe_cap param)
515 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
516 enum radeon_family family = rscreen->family;
519 /* Supported features (boolean caps). */
520 case PIPE_CAP_NPOT_TEXTURES:
521 case PIPE_CAP_TWO_SIDED_STENCIL:
522 case PIPE_CAP_ANISOTROPIC_FILTER:
523 case PIPE_CAP_POINT_SPRITE:
524 case PIPE_CAP_OCCLUSION_QUERY:
525 case PIPE_CAP_TEXTURE_SHADOW_MAP:
526 case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
527 case PIPE_CAP_BLEND_EQUATION_SEPARATE:
528 case PIPE_CAP_TEXTURE_SWIZZLE:
529 case PIPE_CAP_DEPTH_CLIP_DISABLE:
530 case PIPE_CAP_SHADER_STENCIL_EXPORT:
531 case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR:
532 case PIPE_CAP_MIXED_COLORBUFFER_FORMATS:
533 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT:
534 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER:
536 case PIPE_CAP_SEAMLESS_CUBE_MAP:
537 case PIPE_CAP_PRIMITIVE_RESTART:
538 case PIPE_CAP_CONDITIONAL_RENDER:
539 case PIPE_CAP_TEXTURE_BARRIER:
540 case PIPE_CAP_VERTEX_COLOR_UNCLAMPED:
541 case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION:
542 case PIPE_CAP_TGSI_INSTANCEID:
543 case PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY:
544 case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY:
545 case PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY:
546 case PIPE_CAP_USER_INDEX_BUFFERS:
547 case PIPE_CAP_USER_CONSTANT_BUFFERS:
548 case PIPE_CAP_COMPUTE:
549 case PIPE_CAP_START_INSTANCE:
550 case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS:
551 case PIPE_CAP_TEXTURE_BUFFER_OBJECTS:
554 case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT:
555 return R600_MAP_BUFFER_ALIGNMENT;
557 case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
560 case PIPE_CAP_GLSL_FEATURE_LEVEL:
563 case PIPE_CAP_TEXTURE_MULTISAMPLE:
564 return rscreen->msaa_texture_support != MSAA_TEXTURE_SAMPLE_ZERO;
566 /* Supported except the original R600. */
567 case PIPE_CAP_INDEP_BLEND_ENABLE:
568 case PIPE_CAP_INDEP_BLEND_FUNC:
569 /* R600 doesn't support per-MRT blends */
570 return family == CHIP_R600 ? 0 : 1;
572 /* Supported on Evergreen. */
573 case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE:
574 case PIPE_CAP_CUBE_MAP_ARRAY:
575 return family >= CHIP_CEDAR ? 1 : 0;
577 /* Unsupported features. */
578 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT:
579 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER:
580 case PIPE_CAP_SCALED_RESOLVE:
581 case PIPE_CAP_TGSI_CAN_COMPACT_VARYINGS:
582 case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS:
583 case PIPE_CAP_FRAGMENT_COLOR_CLAMPED:
584 case PIPE_CAP_VERTEX_COLOR_CLAMPED:
585 case PIPE_CAP_USER_VERTEX_BUFFERS:
586 case PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT:
590 case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS:
591 return rscreen->has_streamout ? 4 : 0;
592 case PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME:
593 return rscreen->has_streamout ? 1 : 0;
594 case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS:
595 case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS:
599 case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
600 case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
601 case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
602 if (family >= CHIP_CEDAR)
606 case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS:
607 return rscreen->info.drm_minor >= 9 ?
608 (family >= CHIP_CEDAR ? 16384 : 8192) : 0;
609 case PIPE_CAP_MAX_COMBINED_SAMPLERS:
612 /* Render targets. */
613 case PIPE_CAP_MAX_RENDER_TARGETS:
614 /* XXX some r6xx are buggy and can only do 4 */
617 /* Timer queries, present when the clock frequency is non zero. */
618 case PIPE_CAP_QUERY_TIME_ELAPSED:
619 return rscreen->info.r600_clock_crystal_freq != 0;
620 case PIPE_CAP_QUERY_TIMESTAMP:
621 return rscreen->info.drm_minor >= 20 &&
622 rscreen->info.r600_clock_crystal_freq != 0;
624 case PIPE_CAP_MIN_TEXEL_OFFSET:
627 case PIPE_CAP_MAX_TEXEL_OFFSET:
633 static float r600_get_paramf(struct pipe_screen* pscreen,
634 enum pipe_capf param)
636 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
637 enum radeon_family family = rscreen->family;
640 case PIPE_CAPF_MAX_LINE_WIDTH:
641 case PIPE_CAPF_MAX_LINE_WIDTH_AA:
642 case PIPE_CAPF_MAX_POINT_WIDTH:
643 case PIPE_CAPF_MAX_POINT_WIDTH_AA:
644 if (family >= CHIP_CEDAR)
648 case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY:
650 case PIPE_CAPF_MAX_TEXTURE_LOD_BIAS:
652 case PIPE_CAPF_GUARD_BAND_LEFT:
653 case PIPE_CAPF_GUARD_BAND_TOP:
654 case PIPE_CAPF_GUARD_BAND_RIGHT:
655 case PIPE_CAPF_GUARD_BAND_BOTTOM:
661 static int r600_get_shader_param(struct pipe_screen* pscreen, unsigned shader, enum pipe_shader_cap param)
665 case PIPE_SHADER_FRAGMENT:
666 case PIPE_SHADER_VERTEX:
667 case PIPE_SHADER_COMPUTE:
669 case PIPE_SHADER_GEOMETRY:
670 /* XXX: support and enable geometry programs */
673 /* XXX: support tessellation on Evergreen */
678 case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
679 case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
680 case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
681 case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
683 case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
685 case PIPE_SHADER_CAP_MAX_INPUTS:
687 case PIPE_SHADER_CAP_MAX_TEMPS:
688 return 256; /* Max native temporaries. */
689 case PIPE_SHADER_CAP_MAX_ADDRS:
690 /* XXX Isn't this equal to TEMPS? */
691 return 1; /* Max native address registers */
692 case PIPE_SHADER_CAP_MAX_CONSTS:
693 return R600_MAX_CONST_BUFFER_SIZE;
694 case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
695 return R600_MAX_USER_CONST_BUFFERS;
696 case PIPE_SHADER_CAP_MAX_PREDS:
697 return 0; /* nothing uses this */
698 case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED:
700 case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED:
702 case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR:
703 case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
704 case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
705 case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
707 case PIPE_SHADER_CAP_SUBROUTINES:
709 case PIPE_SHADER_CAP_INTEGERS:
711 case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
713 case PIPE_SHADER_CAP_PREFERRED_IR:
714 if (shader == PIPE_SHADER_COMPUTE) {
715 return PIPE_SHADER_IR_LLVM;
717 return PIPE_SHADER_IR_TGSI;
723 static int r600_get_video_param(struct pipe_screen *screen,
724 enum pipe_video_profile profile,
725 enum pipe_video_cap param)
728 case PIPE_VIDEO_CAP_SUPPORTED:
729 return vl_profile_supported(screen, profile);
730 case PIPE_VIDEO_CAP_NPOT_TEXTURES:
732 case PIPE_VIDEO_CAP_MAX_WIDTH:
733 case PIPE_VIDEO_CAP_MAX_HEIGHT:
734 return vl_video_buffer_max_size(screen);
735 case PIPE_VIDEO_CAP_PREFERED_FORMAT:
736 return PIPE_FORMAT_NV12;
737 case PIPE_VIDEO_CAP_PREFERS_INTERLACED:
739 case PIPE_VIDEO_CAP_SUPPORTS_INTERLACED:
741 case PIPE_VIDEO_CAP_SUPPORTS_PROGRESSIVE:
748 static int r600_get_compute_param(struct pipe_screen *screen,
749 enum pipe_compute_cap param,
752 //TODO: select these params by asic
754 case PIPE_COMPUTE_CAP_IR_TARGET:
756 strcpy(ret, "r600--");
758 return 7 * sizeof(char);
760 case PIPE_COMPUTE_CAP_GRID_DIMENSION:
762 uint64_t * grid_dimension = ret;
763 grid_dimension[0] = 3;
765 return 1 * sizeof(uint64_t);
767 case PIPE_COMPUTE_CAP_MAX_GRID_SIZE:
769 uint64_t * grid_size = ret;
770 grid_size[0] = 65535;
771 grid_size[1] = 65535;
774 return 3 * sizeof(uint64_t) ;
776 case PIPE_COMPUTE_CAP_MAX_BLOCK_SIZE:
778 uint64_t * block_size = ret;
783 return 3 * sizeof(uint64_t);
785 case PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK:
787 uint64_t * max_threads_per_block = ret;
788 *max_threads_per_block = 256;
790 return sizeof(uint64_t);
792 case PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE:
794 uint64_t * max_global_size = ret;
795 /* XXX: This is what the proprietary driver reports, we
796 * may want to use a different value. */
797 *max_global_size = 201326592;
799 return sizeof(uint64_t);
801 case PIPE_COMPUTE_CAP_MAX_INPUT_SIZE:
803 uint64_t * max_input_size = ret;
804 *max_input_size = 1024;
806 return sizeof(uint64_t);
808 case PIPE_COMPUTE_CAP_MAX_LOCAL_SIZE:
810 uint64_t * max_local_size = ret;
811 /* XXX: This is what the proprietary driver reports, we
812 * may want to use a different value. */
813 *max_local_size = 32768;
815 return sizeof(uint64_t);
817 case PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE:
819 uint64_t max_global_size;
820 uint64_t * max_mem_alloc_size = ret;
821 r600_get_compute_param(screen,
822 PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE,
824 /* OpenCL requres this value be at least
825 * max(MAX_GLOBAL_SIZE / 4, 128 * 1024 *1024)
826 * I'm really not sure what value to report here, but
827 * MAX_GLOBAL_SIZE / 4 seems resonable.
829 *max_mem_alloc_size = max_global_size / 4;
831 return sizeof(uint64_t);
834 fprintf(stderr, "unknown PIPE_COMPUTE_CAP %d\n", param);
839 static void r600_destroy_screen(struct pipe_screen* pscreen)
841 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
846 if (rscreen->global_pool) {
847 compute_memory_pool_delete(rscreen->global_pool);
850 if (rscreen->fences.bo) {
851 struct r600_fence_block *entry, *tmp;
853 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, &rscreen->fences.blocks, head) {
854 LIST_DEL(&entry->head);
858 rscreen->ws->buffer_unmap(rscreen->fences.bo->cs_buf);
859 pipe_resource_reference((struct pipe_resource**)&rscreen->fences.bo, NULL);
862 if (rscreen->trace_bo) {
863 rscreen->ws->buffer_unmap(rscreen->trace_bo->cs_buf);
864 pipe_resource_reference((struct pipe_resource**)&rscreen->trace_bo, NULL);
867 pipe_mutex_destroy(rscreen->fences.mutex);
869 rscreen->ws->destroy(rscreen->ws);
873 static void r600_fence_reference(struct pipe_screen *pscreen,
874 struct pipe_fence_handle **ptr,
875 struct pipe_fence_handle *fence)
877 struct r600_fence **oldf = (struct r600_fence**)ptr;
878 struct r600_fence *newf = (struct r600_fence*)fence;
880 if (pipe_reference(&(*oldf)->reference, &newf->reference)) {
881 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
882 pipe_mutex_lock(rscreen->fences.mutex);
883 pipe_resource_reference((struct pipe_resource**)&(*oldf)->sleep_bo, NULL);
884 LIST_ADDTAIL(&(*oldf)->head, &rscreen->fences.pool);
885 pipe_mutex_unlock(rscreen->fences.mutex);
891 static boolean r600_fence_signalled(struct pipe_screen *pscreen,
892 struct pipe_fence_handle *fence)
894 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
895 struct r600_fence *rfence = (struct r600_fence*)fence;
897 return rscreen->fences.data[rfence->index] != 0;
900 static boolean r600_fence_finish(struct pipe_screen *pscreen,
901 struct pipe_fence_handle *fence,
904 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
905 struct r600_fence *rfence = (struct r600_fence*)fence;
906 int64_t start_time = 0;
909 if (timeout != PIPE_TIMEOUT_INFINITE) {
910 start_time = os_time_get();
912 /* Convert to microseconds. */
916 while (rscreen->fences.data[rfence->index] == 0) {
917 /* Special-case infinite timeout - wait for the dummy BO to become idle */
918 if (timeout == PIPE_TIMEOUT_INFINITE) {
919 rscreen->ws->buffer_wait(rfence->sleep_bo->buf, RADEON_USAGE_READWRITE);
923 /* The dummy BO will be busy until the CS including the fence has completed, or
924 * the GPU is reset. Don't bother continuing to spin when the BO is idle. */
925 if (!rscreen->ws->buffer_is_busy(rfence->sleep_bo->buf, RADEON_USAGE_READWRITE))
935 if (timeout != PIPE_TIMEOUT_INFINITE &&
936 os_time_get() - start_time >= timeout) {
941 return rscreen->fences.data[rfence->index] != 0;
944 static int r600_interpret_tiling(struct r600_screen *rscreen, uint32_t tiling_config)
946 switch ((tiling_config & 0xe) >> 1) {
948 rscreen->tiling_info.num_channels = 1;
951 rscreen->tiling_info.num_channels = 2;
954 rscreen->tiling_info.num_channels = 4;
957 rscreen->tiling_info.num_channels = 8;
963 switch ((tiling_config & 0x30) >> 4) {
965 rscreen->tiling_info.num_banks = 4;
968 rscreen->tiling_info.num_banks = 8;
974 switch ((tiling_config & 0xc0) >> 6) {
976 rscreen->tiling_info.group_bytes = 256;
979 rscreen->tiling_info.group_bytes = 512;
987 static int evergreen_interpret_tiling(struct r600_screen *rscreen, uint32_t tiling_config)
989 switch (tiling_config & 0xf) {
991 rscreen->tiling_info.num_channels = 1;
994 rscreen->tiling_info.num_channels = 2;
997 rscreen->tiling_info.num_channels = 4;
1000 rscreen->tiling_info.num_channels = 8;
1006 switch ((tiling_config & 0xf0) >> 4) {
1008 rscreen->tiling_info.num_banks = 4;
1011 rscreen->tiling_info.num_banks = 8;
1014 rscreen->tiling_info.num_banks = 16;
1020 switch ((tiling_config & 0xf00) >> 8) {
1022 rscreen->tiling_info.group_bytes = 256;
1025 rscreen->tiling_info.group_bytes = 512;
1033 static int r600_init_tiling(struct r600_screen *rscreen)
1035 uint32_t tiling_config = rscreen->info.r600_tiling_config;
1037 /* set default group bytes, overridden by tiling info ioctl */
1038 if (rscreen->chip_class <= R700) {
1039 rscreen->tiling_info.group_bytes = 256;
1041 rscreen->tiling_info.group_bytes = 512;
1047 if (rscreen->chip_class <= R700) {
1048 return r600_interpret_tiling(rscreen, tiling_config);
1050 return evergreen_interpret_tiling(rscreen, tiling_config);
1054 static uint64_t r600_get_timestamp(struct pipe_screen *screen)
1056 struct r600_screen *rscreen = (struct r600_screen*)screen;
1058 return 1000000 * rscreen->ws->query_timestamp(rscreen->ws) /
1059 rscreen->info.r600_clock_crystal_freq;
1062 struct pipe_screen *r600_screen_create(struct radeon_winsys *ws)
1064 struct r600_screen *rscreen = CALLOC_STRUCT(r600_screen);
1066 if (rscreen == NULL) {
1071 ws->query_info(ws, &rscreen->info);
1073 rscreen->family = rscreen->info.family;
1074 rscreen->chip_class = rscreen->info.chip_class;
1075 if (rscreen->family == CHIP_UNKNOWN) {
1076 fprintf(stderr, "r600: Unknown chipset 0x%04X\n", rscreen->info.pci_id);
1081 /* Figure out streamout kernel support. */
1082 switch (rscreen->chip_class) {
1084 if (rscreen->family < CHIP_RS780) {
1085 rscreen->has_streamout = rscreen->info.drm_minor >= 14;
1087 rscreen->has_streamout = rscreen->info.drm_minor >= 23;
1091 rscreen->has_streamout = rscreen->info.drm_minor >= 17;
1095 rscreen->has_streamout = rscreen->info.drm_minor >= 14;
1098 rscreen->has_streamout = FALSE;
1103 switch (rscreen->chip_class) {
1106 rscreen->has_msaa = rscreen->info.drm_minor >= 22;
1107 rscreen->msaa_texture_support = MSAA_TEXTURE_DECOMPRESSED;
1110 rscreen->has_msaa = rscreen->info.drm_minor >= 19;
1111 rscreen->msaa_texture_support =
1112 rscreen->info.drm_minor >= 24 ? MSAA_TEXTURE_COMPRESSED :
1113 MSAA_TEXTURE_DECOMPRESSED;
1116 rscreen->has_msaa = rscreen->info.drm_minor >= 19;
1117 /* We should be able to read compressed MSAA textures, but it doesn't work. */
1118 rscreen->msaa_texture_support = MSAA_TEXTURE_SAMPLE_ZERO;
1121 rscreen->has_msaa = FALSE;
1122 rscreen->msaa_texture_support = 0;
1126 rscreen->has_cp_dma = rscreen->info.drm_minor >= 27;
1128 if (r600_init_tiling(rscreen)) {
1133 rscreen->screen.destroy = r600_destroy_screen;
1134 rscreen->screen.get_name = r600_get_name;
1135 rscreen->screen.get_vendor = r600_get_vendor;
1136 rscreen->screen.get_param = r600_get_param;
1137 rscreen->screen.get_shader_param = r600_get_shader_param;
1138 rscreen->screen.get_paramf = r600_get_paramf;
1139 rscreen->screen.get_video_param = r600_get_video_param;
1140 rscreen->screen.get_compute_param = r600_get_compute_param;
1141 rscreen->screen.get_timestamp = r600_get_timestamp;
1143 if (rscreen->chip_class >= EVERGREEN) {
1144 rscreen->screen.is_format_supported = evergreen_is_format_supported;
1145 rscreen->dma_blit = &evergreen_dma_blit;
1147 rscreen->screen.is_format_supported = r600_is_format_supported;
1148 rscreen->dma_blit = &r600_dma_blit;
1150 rscreen->screen.is_video_format_supported = vl_video_buffer_is_format_supported;
1151 rscreen->screen.context_create = r600_create_context;
1152 rscreen->screen.fence_reference = r600_fence_reference;
1153 rscreen->screen.fence_signalled = r600_fence_signalled;
1154 rscreen->screen.fence_finish = r600_fence_finish;
1155 r600_init_screen_resource_functions(&rscreen->screen);
1157 util_format_s3tc_init();
1159 rscreen->fences.bo = NULL;
1160 rscreen->fences.data = NULL;
1161 rscreen->fences.next_index = 0;
1162 LIST_INITHEAD(&rscreen->fences.pool);
1163 LIST_INITHEAD(&rscreen->fences.blocks);
1164 pipe_mutex_init(rscreen->fences.mutex);
1166 /* Hyperz is very lockup prone any code that touch related part should be
1167 * carefully tested especialy on r6xx/r7xx Development show that some piglit
1168 * case were triggering lockup quickly such as :
1169 * piglit/bin/depthstencil-render-miplevels 1024 d=s=z24_s8
1171 rscreen->use_hyperz = debug_get_bool_option("R600_HYPERZ", TRUE);
1172 rscreen->use_hyperz = rscreen->info.drm_minor >= 26 ? rscreen->use_hyperz : FALSE;
1174 rscreen->global_pool = compute_memory_pool_new(rscreen);
1177 rscreen->cs_count = 0;
1178 if (rscreen->info.drm_minor >= 28) {
1179 rscreen->trace_bo = (struct r600_resource*)pipe_buffer_create(&rscreen->screen,
1183 if (rscreen->trace_bo) {
1184 rscreen->trace_ptr = rscreen->ws->buffer_map(rscreen->trace_bo->cs_buf, NULL,
1185 PIPE_TRANSFER_UNSYNCHRONIZED);
1190 return &rscreen->screen;