1 /**************************************************************************
3 * Copyright 2003 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
31 #include "main/glheader.h"
32 #include "main/context.h"
33 #include "main/framebuffer.h"
34 #include "main/renderbuffer.h"
35 #include "main/texobj.h"
36 #include "main/hash.h"
37 #include "main/fbobject.h"
38 #include "main/version.h"
39 #include "swrast/s_renderbuffer.h"
44 static const __DRIconfigOptionsExtension i915_config_options = {
45 .base = { __DRI_CONFIG_OPTIONS, 1 },
49 DRI_CONF_SECTION_PERFORMANCE
50 DRI_CONF_VBLANK_MODE(DRI_CONF_VBLANK_ALWAYS_SYNC)
51 /* Options correspond to DRI_CONF_BO_REUSE_DISABLED,
52 * DRI_CONF_BO_REUSE_ALL
54 DRI_CONF_OPT_BEGIN_V(bo_reuse, enum, 1, "0:1")
55 DRI_CONF_DESC_BEGIN(en, "Buffer object reuse")
56 DRI_CONF_ENUM(0, "Disable buffer object reuse")
57 DRI_CONF_ENUM(1, "Enable reuse of all sizes of buffer objects")
61 DRI_CONF_OPT_BEGIN_B(early_z, "false")
62 DRI_CONF_DESC(en, "Enable early Z in classic mode (unstable, 945-only).")
66 DRI_CONF_SECTION_QUALITY
67 DRI_CONF_FORCE_S3TC_ENABLE("false")
69 DRI_CONF_SECTION_DEBUG
70 DRI_CONF_NO_RAST("false")
71 DRI_CONF_ALWAYS_FLUSH_BATCH("false")
72 DRI_CONF_ALWAYS_FLUSH_CACHE("false")
73 DRI_CONF_DISABLE_THROTTLING("false")
74 DRI_CONF_FORCE_GLSL_EXTENSIONS_WARN("false")
75 DRI_CONF_DISABLE_GLSL_LINE_CONTINUATIONS("false")
76 DRI_CONF_DISABLE_BLEND_FUNC_EXTENDED("false")
78 DRI_CONF_OPT_BEGIN_B(shader_precompile, "true")
79 DRI_CONF_DESC(en, "Perform code generation at shader link time.")
85 #include "intel_batchbuffer.h"
86 #include "intel_buffers.h"
87 #include "intel_bufmgr.h"
88 #include "intel_chipset.h"
89 #include "intel_fbo.h"
90 #include "intel_mipmap_tree.h"
91 #include "intel_screen.h"
92 #include "intel_tex.h"
93 #include "intel_regions.h"
98 * For debugging purposes, this returns a time in seconds.
105 clock_gettime(CLOCK_MONOTONIC, &tp);
107 return tp.tv_sec + tp.tv_nsec / 1000000000.0;
111 aub_dump_bmp(struct gl_context *ctx)
113 struct gl_framebuffer *fb = ctx->DrawBuffer;
115 for (int i = 0; i < fb->_NumColorDrawBuffers; i++) {
116 struct intel_renderbuffer *irb =
117 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
119 if (irb && irb->mt) {
120 enum aub_dump_bmp_format format;
122 switch (irb->Base.Base.Format) {
123 case MESA_FORMAT_B8G8R8A8_UNORM:
124 case MESA_FORMAT_B8G8R8X8_UNORM:
125 format = AUB_DUMP_BMP_FORMAT_ARGB_8888;
131 assert(irb->mt->region->pitch % irb->mt->region->cpp == 0);
132 drm_intel_gem_bo_aub_dump_bmp(irb->mt->region->bo,
135 irb->Base.Base.Width,
136 irb->Base.Base.Height,
138 irb->mt->region->pitch,
144 static const __DRItexBufferExtension intelTexBufferExtension = {
145 .base = { __DRI_TEX_BUFFER, 3 },
147 .setTexBuffer = intelSetTexBuffer,
148 .setTexBuffer2 = intelSetTexBuffer2,
149 .releaseTexBuffer = NULL,
153 intelDRI2Flush(__DRIdrawable *drawable)
155 GET_CURRENT_CONTEXT(ctx);
156 struct intel_context *intel = intel_context(ctx);
160 INTEL_FIREVERTICES(intel);
162 intel->need_throttle = true;
164 if (intel->batch.used)
165 intel_batchbuffer_flush(intel);
167 if (INTEL_DEBUG & DEBUG_AUB) {
172 static const struct __DRI2flushExtensionRec intelFlushExtension = {
173 .base = { __DRI2_FLUSH, 3 },
175 .flush = intelDRI2Flush,
176 .invalidate = dri2InvalidateDrawable,
179 static struct intel_image_format intel_image_formats[] = {
180 { __DRI_IMAGE_FOURCC_ARGB8888, __DRI_IMAGE_COMPONENTS_RGBA, 1,
181 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ARGB8888, 4 } } },
183 { __DRI_IMAGE_FOURCC_SARGB8888, __DRI_IMAGE_COMPONENTS_RGBA, 1,
184 { { 0, 0, 0, __DRI_IMAGE_FORMAT_SARGB8, 4 } } },
186 { __DRI_IMAGE_FOURCC_XRGB8888, __DRI_IMAGE_COMPONENTS_RGB, 1,
187 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XRGB8888, 4 }, } },
189 { __DRI_IMAGE_FOURCC_YUV410, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
190 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
191 { 1, 2, 2, __DRI_IMAGE_FORMAT_R8, 1 },
192 { 2, 2, 2, __DRI_IMAGE_FORMAT_R8, 1 } } },
194 { __DRI_IMAGE_FOURCC_YUV411, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
195 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
196 { 1, 2, 0, __DRI_IMAGE_FORMAT_R8, 1 },
197 { 2, 2, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
199 { __DRI_IMAGE_FOURCC_YUV420, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
200 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
201 { 1, 1, 1, __DRI_IMAGE_FORMAT_R8, 1 },
202 { 2, 1, 1, __DRI_IMAGE_FORMAT_R8, 1 } } },
204 { __DRI_IMAGE_FOURCC_YUV422, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
205 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
206 { 1, 1, 0, __DRI_IMAGE_FORMAT_R8, 1 },
207 { 2, 1, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
209 { __DRI_IMAGE_FOURCC_YUV444, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
210 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
211 { 1, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
212 { 2, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
214 { __DRI_IMAGE_FOURCC_NV12, __DRI_IMAGE_COMPONENTS_Y_UV, 2,
215 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
216 { 1, 1, 1, __DRI_IMAGE_FORMAT_GR88, 2 } } },
218 { __DRI_IMAGE_FOURCC_NV16, __DRI_IMAGE_COMPONENTS_Y_UV, 2,
219 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
220 { 1, 1, 0, __DRI_IMAGE_FORMAT_GR88, 2 } } },
222 /* For YUYV buffers, we set up two overlapping DRI images and treat
223 * them as planar buffers in the compositors. Plane 0 is GR88 and
224 * samples YU or YV pairs and places Y into the R component, while
225 * plane 1 is ARGB and samples YUYV clusters and places pairs and
226 * places U into the G component and V into A. This lets the
227 * texture sampler interpolate the Y components correctly when
228 * sampling from plane 0, and interpolate U and V correctly when
229 * sampling from plane 1. */
230 { __DRI_IMAGE_FOURCC_YUYV, __DRI_IMAGE_COMPONENTS_Y_XUXV, 2,
231 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88, 2 },
232 { 0, 1, 0, __DRI_IMAGE_FORMAT_ARGB8888, 4 } } }
236 intel_allocate_image(int dri_format, void *loaderPrivate)
240 image = calloc(1, sizeof *image);
244 image->dri_format = dri_format;
247 image->format = driImageFormatToGLFormat(dri_format);
248 if (dri_format != __DRI_IMAGE_FORMAT_NONE &&
249 image->format == MESA_FORMAT_NONE) {
254 image->internal_format = _mesa_get_format_base_format(image->format);
255 image->data = loaderPrivate;
261 * Sets up a DRIImage structure to point to our shared image in a region
264 intel_setup_image_from_mipmap_tree(struct intel_context *intel, __DRIimage *image,
265 struct intel_mipmap_tree *mt, GLuint level,
268 unsigned int draw_x, draw_y;
269 uint32_t mask_x, mask_y;
271 intel_miptree_check_level_layer(mt, level, zoffset);
273 intel_region_get_tile_masks(mt->region, &mask_x, &mask_y, false);
274 intel_miptree_get_image_offset(mt, level, zoffset, &draw_x, &draw_y);
276 image->width = mt->level[level].width;
277 image->height = mt->level[level].height;
278 image->tile_x = draw_x & mask_x;
279 image->tile_y = draw_y & mask_y;
281 image->offset = intel_region_get_aligned_offset(mt->region,
286 intel_region_reference(&image->region, mt->region);
290 intel_setup_image_from_dimensions(__DRIimage *image)
292 image->width = image->region->width;
293 image->height = image->region->height;
299 intel_create_image_from_name(__DRIscreen *screen,
300 int width, int height, int format,
301 int name, int pitch, void *loaderPrivate)
303 struct intel_screen *intelScreen = screen->driverPrivate;
307 image = intel_allocate_image(format, loaderPrivate);
311 if (image->format == MESA_FORMAT_NONE)
314 cpp = _mesa_get_format_bytes(image->format);
315 image->region = intel_region_alloc_for_handle(intelScreen,
317 pitch * cpp, name, "image");
318 if (image->region == NULL) {
323 intel_setup_image_from_dimensions(image);
329 intel_create_image_from_renderbuffer(__DRIcontext *context,
330 int renderbuffer, void *loaderPrivate)
333 struct intel_context *intel = context->driverPrivate;
334 struct gl_renderbuffer *rb;
335 struct intel_renderbuffer *irb;
337 rb = _mesa_lookup_renderbuffer(&intel->ctx, renderbuffer);
339 _mesa_error(&intel->ctx,
340 GL_INVALID_OPERATION, "glRenderbufferExternalMESA");
344 irb = intel_renderbuffer(rb);
345 image = calloc(1, sizeof *image);
349 image->internal_format = rb->InternalFormat;
350 image->format = rb->Format;
352 image->data = loaderPrivate;
353 intel_region_reference(&image->region, irb->mt->region);
354 intel_setup_image_from_dimensions(image);
355 image->dri_format = driGLFormatToImageFormat(image->format);
357 rb->NeedsFinishRenderTexture = true;
362 intel_create_image_from_texture(__DRIcontext *context, int target,
363 unsigned texture, int zoffset,
369 struct intel_context *intel = context->driverPrivate;
370 struct gl_texture_object *obj;
371 struct intel_texture_object *iobj;
374 obj = _mesa_lookup_texture(&intel->ctx, texture);
375 if (!obj || obj->Target != target) {
376 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
380 if (target == GL_TEXTURE_CUBE_MAP)
383 _mesa_test_texobj_completeness(&intel->ctx, obj);
384 iobj = intel_texture_object(obj);
385 if (!obj->_BaseComplete || (level > 0 && !obj->_MipmapComplete)) {
386 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
390 if (level < obj->BaseLevel || level > obj->_MaxLevel) {
391 *error = __DRI_IMAGE_ERROR_BAD_MATCH;
395 if (target == GL_TEXTURE_3D && obj->Image[face][level]->Depth < zoffset) {
396 *error = __DRI_IMAGE_ERROR_BAD_MATCH;
399 image = calloc(1, sizeof *image);
401 *error = __DRI_IMAGE_ERROR_BAD_ALLOC;
405 image->internal_format = obj->Image[face][level]->InternalFormat;
406 image->format = obj->Image[face][level]->TexFormat;
407 image->data = loaderPrivate;
408 intel_setup_image_from_mipmap_tree(intel, image, iobj->mt, level, zoffset);
409 image->dri_format = driGLFormatToImageFormat(image->format);
410 if (image->dri_format == MESA_FORMAT_NONE) {
411 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
416 *error = __DRI_IMAGE_ERROR_SUCCESS;
421 intel_destroy_image(__DRIimage *image)
423 intel_region_release(&image->region);
428 intel_create_image(__DRIscreen *screen,
429 int width, int height, int format,
434 struct intel_screen *intelScreen = screen->driverPrivate;
438 tiling = I915_TILING_X;
439 if (use & __DRI_IMAGE_USE_CURSOR) {
440 if (width != 64 || height != 64)
442 tiling = I915_TILING_NONE;
445 if (use & __DRI_IMAGE_USE_LINEAR)
446 tiling = I915_TILING_NONE;
448 image = intel_allocate_image(format, loaderPrivate);
452 cpp = _mesa_get_format_bytes(image->format);
454 intel_region_alloc(intelScreen, tiling, cpp, width, height, true);
455 if (image->region == NULL) {
460 intel_setup_image_from_dimensions(image);
466 intel_query_image(__DRIimage *image, int attrib, int *value)
469 case __DRI_IMAGE_ATTRIB_STRIDE:
470 *value = image->region->pitch;
472 case __DRI_IMAGE_ATTRIB_HANDLE:
473 *value = image->region->bo->handle;
475 case __DRI_IMAGE_ATTRIB_NAME:
476 return intel_region_flink(image->region, (uint32_t *) value);
477 case __DRI_IMAGE_ATTRIB_FORMAT:
478 *value = image->dri_format;
480 case __DRI_IMAGE_ATTRIB_WIDTH:
481 *value = image->region->width;
483 case __DRI_IMAGE_ATTRIB_HEIGHT:
484 *value = image->region->height;
486 case __DRI_IMAGE_ATTRIB_COMPONENTS:
487 if (image->planar_format == NULL)
489 *value = image->planar_format->components;
491 case __DRI_IMAGE_ATTRIB_FD:
492 return !drm_intel_bo_gem_export_to_prime(image->region->bo, value);
499 intel_dup_image(__DRIimage *orig_image, void *loaderPrivate)
503 image = calloc(1, sizeof *image);
507 intel_region_reference(&image->region, orig_image->region);
508 if (image->region == NULL) {
513 image->internal_format = orig_image->internal_format;
514 image->planar_format = orig_image->planar_format;
515 image->dri_format = orig_image->dri_format;
516 image->format = orig_image->format;
517 image->offset = orig_image->offset;
518 image->width = orig_image->width;
519 image->height = orig_image->height;
520 image->tile_x = orig_image->tile_x;
521 image->tile_y = orig_image->tile_y;
522 image->data = loaderPrivate;
524 memcpy(image->strides, orig_image->strides, sizeof(image->strides));
525 memcpy(image->offsets, orig_image->offsets, sizeof(image->offsets));
531 intel_validate_usage(__DRIimage *image, unsigned int use)
533 if (use & __DRI_IMAGE_USE_CURSOR) {
534 if (image->region->width != 64 || image->region->height != 64)
542 intel_create_image_from_names(__DRIscreen *screen,
543 int width, int height, int fourcc,
544 int *names, int num_names,
545 int *strides, int *offsets,
548 struct intel_image_format *f = NULL;
552 if (screen == NULL || names == NULL || num_names != 1)
555 for (i = 0; i < ARRAY_SIZE(intel_image_formats); i++) {
556 if (intel_image_formats[i].fourcc == fourcc) {
557 f = &intel_image_formats[i];
564 image = intel_create_image_from_name(screen, width, height,
565 __DRI_IMAGE_FORMAT_NONE,
566 names[0], strides[0],
572 image->planar_format = f;
573 for (i = 0; i < f->nplanes; i++) {
574 index = f->planes[i].buffer_index;
575 image->offsets[index] = offsets[index];
576 image->strides[index] = strides[index];
583 intel_create_image_from_fds(__DRIscreen *screen,
584 int width, int height, int fourcc,
585 int *fds, int num_fds, int *strides, int *offsets,
588 struct intel_screen *intelScreen = screen->driverPrivate;
589 struct intel_image_format *f = NULL;
593 if (fds == NULL || num_fds != 1)
596 for (i = 0; i < ARRAY_SIZE(intel_image_formats); i++) {
597 if (intel_image_formats[i].fourcc == fourcc) {
598 f = &intel_image_formats[i];
605 image = intel_allocate_image(__DRI_IMAGE_FORMAT_NONE, loaderPrivate);
609 image->region = intel_region_alloc_for_fd(intelScreen,
610 f->planes[0].cpp, width, height, strides[0],
611 height * strides[0], fds[0], "image");
612 if (image->region == NULL) {
617 intel_setup_image_from_dimensions(image);
619 image->planar_format = f;
620 for (i = 0; i < f->nplanes; i++) {
621 index = f->planes[i].buffer_index;
622 image->offsets[index] = offsets[index];
623 image->strides[index] = strides[index];
631 intel_from_planar(__DRIimage *parent, int plane, void *loaderPrivate)
633 int width, height, offset, stride, dri_format, index;
634 struct intel_image_format *f;
635 uint32_t mask_x, mask_y;
638 if (parent == NULL || parent->planar_format == NULL)
641 f = parent->planar_format;
643 if (plane >= f->nplanes)
646 width = parent->region->width >> f->planes[plane].width_shift;
647 height = parent->region->height >> f->planes[plane].height_shift;
648 dri_format = f->planes[plane].dri_format;
649 index = f->planes[plane].buffer_index;
650 offset = parent->offsets[index];
651 stride = parent->strides[index];
653 image = intel_allocate_image(dri_format, loaderPrivate);
657 if (offset + height * stride > parent->region->bo->size) {
658 _mesa_warning(NULL, "intel_create_sub_image: subimage out of bounds");
663 image->region = calloc(sizeof(*image->region), 1);
664 if (image->region == NULL) {
669 image->region->cpp = _mesa_get_format_bytes(image->format);
670 image->region->width = width;
671 image->region->height = height;
672 image->region->pitch = stride;
673 image->region->refcount = 1;
674 image->region->bo = parent->region->bo;
675 drm_intel_bo_reference(image->region->bo);
676 image->region->tiling = parent->region->tiling;
677 image->offset = offset;
678 intel_setup_image_from_dimensions(image);
680 intel_region_get_tile_masks(image->region, &mask_x, &mask_y, false);
683 "intel_create_sub_image: offset not on tile boundary");
688 static const __DRIimageExtension intelImageExtension = {
689 .base = { __DRI_IMAGE, 7 },
691 .createImageFromName = intel_create_image_from_name,
692 .createImageFromRenderbuffer = intel_create_image_from_renderbuffer,
693 .destroyImage = intel_destroy_image,
694 .createImage = intel_create_image,
695 .queryImage = intel_query_image,
696 .dupImage = intel_dup_image,
697 .validateUsage = intel_validate_usage,
698 .createImageFromNames = intel_create_image_from_names,
699 .fromPlanar = intel_from_planar,
700 .createImageFromTexture = intel_create_image_from_texture,
701 .createImageFromFds = intel_create_image_from_fds
705 i915_query_renderer_integer(__DRIscreen *psp, int param, unsigned int *value)
707 const struct intel_screen *const intelScreen =
708 (struct intel_screen *) psp->driverPrivate;
711 case __DRI2_RENDERER_VENDOR_ID:
714 case __DRI2_RENDERER_DEVICE_ID:
715 value[0] = intelScreen->deviceID;
717 case __DRI2_RENDERER_ACCELERATED:
720 case __DRI2_RENDERER_VIDEO_MEMORY: {
721 /* Once a batch uses more than 75% of the maximum mappable size, we
722 * assume that there's some fragmentation, and we start doing extra
723 * flushing, etc. That's the big cliff apps will care about.
726 size_t mappable_size;
728 drm_intel_get_aperture_sizes(psp->fd, &mappable_size, &aper_size);
730 const unsigned gpu_mappable_megabytes =
731 (aper_size / (1024 * 1024)) * 3 / 4;
733 const long system_memory_pages = sysconf(_SC_PHYS_PAGES);
734 const long system_page_size = sysconf(_SC_PAGE_SIZE);
736 if (system_memory_pages <= 0 || system_page_size <= 0)
739 const uint64_t system_memory_bytes = (uint64_t) system_memory_pages
740 * (uint64_t) system_page_size;
742 const unsigned system_memory_megabytes =
743 (unsigned) (system_memory_bytes / (1024 * 1024));
745 value[0] = MIN2(system_memory_megabytes, gpu_mappable_megabytes);
748 case __DRI2_RENDERER_UNIFIED_MEMORY_ARCHITECTURE:
751 case __DRI2_RENDERER_HAS_TEXTURE_3D:
755 return driQueryRendererIntegerCommon(psp, param, value);
762 i915_query_renderer_string(__DRIscreen *psp, int param, const char **value)
764 const struct intel_screen *intelScreen =
765 (struct intel_screen *) psp->driverPrivate;
768 case __DRI2_RENDERER_VENDOR_ID:
769 value[0] = i915_vendor_string;
771 case __DRI2_RENDERER_DEVICE_ID:
772 value[0] = i915_get_renderer_string(intelScreen->deviceID);
781 static const __DRI2rendererQueryExtension intelRendererQueryExtension = {
782 .base = { __DRI2_RENDERER_QUERY, 1 },
784 .queryInteger = i915_query_renderer_integer,
785 .queryString = i915_query_renderer_string
788 static const __DRIextension *intelScreenExtensions[] = {
789 &intelTexBufferExtension.base,
790 &intelFlushExtension.base,
791 &intelImageExtension.base,
792 &intelRendererQueryExtension.base,
793 &dri2ConfigQueryExtension.base,
798 intel_get_param(__DRIscreen *psp, int param, int *value)
801 struct drm_i915_getparam gp;
803 memset(&gp, 0, sizeof(gp));
807 ret = drmCommandWriteRead(psp->fd, DRM_I915_GETPARAM, &gp, sizeof(gp));
810 _mesa_warning(NULL, "drm_i915_getparam: %d", ret);
818 intel_get_boolean(__DRIscreen *psp, int param)
821 return intel_get_param(psp, param, &value) && value;
825 intelDestroyScreen(__DRIscreen * sPriv)
827 struct intel_screen *intelScreen = sPriv->driverPrivate;
829 dri_bufmgr_destroy(intelScreen->bufmgr);
830 driDestroyOptionInfo(&intelScreen->optionCache);
833 sPriv->driverPrivate = NULL;
838 * This is called when we need to set up GL rendering to a new X window.
841 intelCreateBuffer(__DRIscreen * driScrnPriv,
842 __DRIdrawable * driDrawPriv,
843 const struct gl_config * mesaVis, GLboolean isPixmap)
845 struct intel_renderbuffer *rb;
846 mesa_format rgbFormat;
847 struct gl_framebuffer *fb;
852 fb = CALLOC_STRUCT(gl_framebuffer);
856 _mesa_initialize_window_framebuffer(fb, mesaVis);
858 if (mesaVis->redBits == 5)
859 rgbFormat = MESA_FORMAT_B5G6R5_UNORM;
860 else if (mesaVis->sRGBCapable)
861 rgbFormat = MESA_FORMAT_B8G8R8A8_SRGB;
862 else if (mesaVis->alphaBits == 0)
863 rgbFormat = MESA_FORMAT_B8G8R8X8_UNORM;
865 rgbFormat = MESA_FORMAT_B8G8R8A8_UNORM;
867 /* setup the hardware-based renderbuffers */
868 rb = intel_create_renderbuffer(rgbFormat);
869 _mesa_add_renderbuffer(fb, BUFFER_FRONT_LEFT, &rb->Base.Base);
871 if (mesaVis->doubleBufferMode) {
872 rb = intel_create_renderbuffer(rgbFormat);
873 _mesa_add_renderbuffer(fb, BUFFER_BACK_LEFT, &rb->Base.Base);
877 * Assert here that the gl_config has an expected depth/stencil bit
878 * combination: one of d24/s8, d16/s0, d0/s0. (See intelInitScreen2(),
879 * which constructs the advertised configs.)
881 if (mesaVis->depthBits == 24) {
882 assert(mesaVis->stencilBits == 8);
885 * Use combined depth/stencil. Note that the renderbuffer is
886 * attached to two attachment points.
888 rb = intel_create_private_renderbuffer(MESA_FORMAT_Z24_UNORM_S8_UINT);
889 _mesa_add_renderbuffer(fb, BUFFER_DEPTH, &rb->Base.Base);
890 _mesa_add_renderbuffer(fb, BUFFER_STENCIL, &rb->Base.Base);
892 else if (mesaVis->depthBits == 16) {
893 assert(mesaVis->stencilBits == 0);
894 rb = intel_create_private_renderbuffer(MESA_FORMAT_Z_UNORM16);
895 _mesa_add_renderbuffer(fb, BUFFER_DEPTH, &rb->Base.Base);
898 assert(mesaVis->depthBits == 0);
899 assert(mesaVis->stencilBits == 0);
902 /* now add any/all software-based renderbuffers we may need */
903 _swrast_add_soft_renderbuffers(fb,
904 false, /* never sw color */
905 false, /* never sw depth */
906 false, /* never sw stencil */
907 mesaVis->accumRedBits > 0,
908 false, /* never sw alpha */
909 false /* never sw aux */ );
910 driDrawPriv->driverPrivate = fb;
916 intelDestroyBuffer(__DRIdrawable * driDrawPriv)
918 struct gl_framebuffer *fb = driDrawPriv->driverPrivate;
920 _mesa_reference_framebuffer(&fb, NULL);
923 /* There are probably better ways to do this, such as an
924 * init-designated function to register chipids and createcontext
928 i830CreateContext(int api,
929 const struct gl_config *mesaVis,
930 __DRIcontext *driContextPriv,
931 unsigned major_version,
932 unsigned minor_version,
935 void *sharedContextPrivate);
938 i915CreateContext(int api,
939 const struct gl_config *mesaVis,
940 __DRIcontext *driContextPriv,
941 unsigned major_version,
942 unsigned minor_version,
945 void *sharedContextPrivate);
948 intelCreateContext(gl_api api,
949 const struct gl_config * mesaVis,
950 __DRIcontext * driContextPriv,
951 unsigned major_version,
952 unsigned minor_version,
956 void *sharedContextPrivate)
958 bool success = false;
960 __DRIscreen *sPriv = driContextPriv->driScreenPriv;
961 struct intel_screen *intelScreen = sPriv->driverPrivate;
963 if (flags & ~__DRI_CTX_FLAG_DEBUG) {
964 *error = __DRI_CTX_ERROR_UNKNOWN_FLAG;
969 *error = __DRI_CTX_ERROR_UNKNOWN_ATTRIBUTE;
973 if (IS_GEN3(intelScreen->deviceID)) {
974 success = i915CreateContext(api, mesaVis, driContextPriv,
975 major_version, minor_version, flags,
976 error, sharedContextPrivate);
978 intelScreen->no_vbo = true;
979 success = i830CreateContext(api, mesaVis, driContextPriv,
980 major_version, minor_version, flags,
981 error, sharedContextPrivate);
987 if (driContextPriv->driverPrivate != NULL)
988 intelDestroyContext(driContextPriv);
994 intel_init_bufmgr(struct intel_screen *intelScreen)
996 __DRIscreen *spriv = intelScreen->driScrnPriv;
998 intelScreen->no_hw = getenv("INTEL_NO_HW") != NULL;
1000 intelScreen->bufmgr = intel_bufmgr_gem_init(spriv->fd, BATCH_SZ);
1001 if (intelScreen->bufmgr == NULL) {
1002 fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
1003 __func__, __LINE__);
1007 drm_intel_bufmgr_gem_enable_fenced_relocs(intelScreen->bufmgr);
1009 if (!intel_get_boolean(spriv, I915_PARAM_HAS_RELAXED_DELTA)) {
1010 fprintf(stderr, "[%s: %u] Kernel 2.6.39 required.\n", __func__, __LINE__);
1018 intel_detect_swizzling(struct intel_screen *screen)
1020 drm_intel_bo *buffer;
1021 unsigned long flags = 0;
1022 unsigned long aligned_pitch;
1023 uint32_t tiling = I915_TILING_X;
1024 uint32_t swizzle_mode = 0;
1026 buffer = drm_intel_bo_alloc_tiled(screen->bufmgr, "swizzle test",
1028 &tiling, &aligned_pitch, flags);
1032 drm_intel_bo_get_tiling(buffer, &tiling, &swizzle_mode);
1033 drm_intel_bo_unreference(buffer);
1035 if (swizzle_mode == I915_BIT_6_SWIZZLE_NONE)
1041 static __DRIconfig**
1042 intel_screen_make_configs(__DRIscreen *dri_screen)
1044 static const mesa_format formats[] = {
1045 MESA_FORMAT_B5G6R5_UNORM,
1046 MESA_FORMAT_B8G8R8A8_UNORM
1049 /* GLX_SWAP_COPY_OML is not supported due to page flipping. */
1050 static const GLenum back_buffer_modes[] = {
1051 GLX_SWAP_UNDEFINED_OML, GLX_NONE,
1054 static const uint8_t singlesample_samples[1] = {0};
1056 uint8_t depth_bits[4], stencil_bits[4];
1057 __DRIconfig **configs = NULL;
1059 /* Generate singlesample configs without accumulation buffer. */
1060 for (int i = 0; i < ARRAY_SIZE(formats); i++) {
1061 __DRIconfig **new_configs;
1062 int num_depth_stencil_bits = 2;
1064 /* Starting with DRI2 protocol version 1.1 we can request a depth/stencil
1065 * buffer that has a different number of bits per pixel than the color
1069 stencil_bits[0] = 0;
1071 if (formats[i] == MESA_FORMAT_B5G6R5_UNORM) {
1073 stencil_bits[1] = 0;
1076 stencil_bits[1] = 8;
1079 new_configs = driCreateConfigs(formats[i],
1082 num_depth_stencil_bits,
1083 back_buffer_modes, 2,
1084 singlesample_samples, 1,
1086 configs = driConcatConfigs(configs, new_configs);
1089 /* Generate the minimum possible set of configs that include an
1090 * accumulation buffer.
1092 for (int i = 0; i < ARRAY_SIZE(formats); i++) {
1093 __DRIconfig **new_configs;
1095 if (formats[i] == MESA_FORMAT_B5G6R5_UNORM) {
1097 stencil_bits[0] = 0;
1100 stencil_bits[0] = 8;
1103 new_configs = driCreateConfigs(formats[i],
1104 depth_bits, stencil_bits, 1,
1105 back_buffer_modes, 1,
1106 singlesample_samples, 1,
1108 configs = driConcatConfigs(configs, new_configs);
1111 if (configs == NULL) {
1112 fprintf(stderr, "[%s:%u] Error creating FBConfig!\n", __func__,
1121 set_max_gl_versions(struct intel_screen *screen)
1123 __DRIscreen *psp = screen->driScrnPriv;
1125 switch (screen->gen) {
1127 psp->max_gl_core_version = 0;
1128 psp->max_gl_es1_version = 11;
1129 psp->max_gl_compat_version = 21;
1130 psp->max_gl_es2_version = 20;
1133 psp->max_gl_core_version = 0;
1134 psp->max_gl_compat_version = 13;
1135 psp->max_gl_es1_version = 11;
1136 psp->max_gl_es2_version = 0;
1139 assert(!"unrecognized intel_screen::gen");
1145 * This is the driver specific part of the createNewScreen entry point.
1146 * Called when using DRI2.
1148 * \return the struct gl_config supported by this driver
1151 __DRIconfig **intelInitScreen2(__DRIscreen *psp)
1153 struct intel_screen *intelScreen;
1155 if (psp->image.loader) {
1156 } else if (psp->dri2.loader->base.version <= 2 ||
1157 psp->dri2.loader->getBuffersWithFormat == NULL) {
1159 "\nERROR! DRI2 loader with getBuffersWithFormat() "
1160 "support required\n");
1164 /* Allocate the private area */
1165 intelScreen = calloc(1, sizeof *intelScreen);
1167 fprintf(stderr, "\nERROR! Allocating private area failed\n");
1170 /* parse information in __driConfigOptions */
1171 driParseOptionInfo(&intelScreen->optionCache, i915_config_options.xml);
1173 intelScreen->driScrnPriv = psp;
1174 psp->driverPrivate = (void *) intelScreen;
1176 if (!intel_init_bufmgr(intelScreen))
1179 intelScreen->deviceID = drm_intel_bufmgr_gem_get_devid(intelScreen->bufmgr);
1181 if (IS_GEN3(intelScreen->deviceID)) {
1182 intelScreen->gen = 3;
1184 intelScreen->gen = 2;
1187 intelScreen->hw_has_swizzling = intel_detect_swizzling(intelScreen);
1189 set_max_gl_versions(intelScreen);
1191 psp->extensions = intelScreenExtensions;
1193 return (const __DRIconfig**) intel_screen_make_configs(psp);
1196 struct intel_buffer {
1198 struct intel_region *region;
1201 static __DRIbuffer *
1202 intelAllocateBuffer(__DRIscreen *screen,
1203 unsigned attachment, unsigned format,
1204 int width, int height)
1206 struct intel_buffer *intelBuffer;
1207 struct intel_screen *intelScreen = screen->driverPrivate;
1209 assert(attachment == __DRI_BUFFER_FRONT_LEFT ||
1210 attachment == __DRI_BUFFER_BACK_LEFT);
1212 intelBuffer = calloc(1, sizeof *intelBuffer);
1213 if (intelBuffer == NULL)
1216 /* The front and back buffers are color buffers, which are X tiled. */
1217 intelBuffer->region = intel_region_alloc(intelScreen,
1224 if (intelBuffer->region == NULL) {
1229 intel_region_flink(intelBuffer->region, &intelBuffer->base.name);
1231 intelBuffer->base.attachment = attachment;
1232 intelBuffer->base.cpp = intelBuffer->region->cpp;
1233 intelBuffer->base.pitch = intelBuffer->region->pitch;
1235 return &intelBuffer->base;
1239 intelReleaseBuffer(__DRIscreen *screen, __DRIbuffer *buffer)
1241 struct intel_buffer *intelBuffer = (struct intel_buffer *) buffer;
1243 intel_region_release(&intelBuffer->region);
1248 static const struct __DriverAPIRec i915_driver_api = {
1249 .InitScreen = intelInitScreen2,
1250 .DestroyScreen = intelDestroyScreen,
1251 .CreateContext = intelCreateContext,
1252 .DestroyContext = intelDestroyContext,
1253 .CreateBuffer = intelCreateBuffer,
1254 .DestroyBuffer = intelDestroyBuffer,
1255 .MakeCurrent = intelMakeCurrent,
1256 .UnbindContext = intelUnbindContext,
1257 .AllocateBuffer = intelAllocateBuffer,
1258 .ReleaseBuffer = intelReleaseBuffer
1261 static const struct __DRIDriverVtableExtensionRec i915_vtable = {
1262 .base = { __DRI_DRIVER_VTABLE, 1 },
1263 .vtable = &i915_driver_api,
1266 /* This is the table of extensions that the loader will dlsym() for. */
1267 static const __DRIextension *i915_driver_extensions[] = {
1268 &driCoreExtension.base,
1269 &driImageDriverExtension.base,
1270 &driDRI2Extension.base,
1272 &i915_config_options.base,
1276 PUBLIC const __DRIextension **__driDriverGetExtensions_i915(void)
1278 globalDriverAPI = &i915_driver_api;
1280 return i915_driver_extensions;