1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
32 #include "simple_list.h"
33 #include "extensions.h"
34 #include "framebuffer.h"
38 #include "swrast/swrast.h"
39 #include "swrast_setup/swrast_setup.h"
42 #include "tnl/t_pipeline.h"
43 #include "tnl/t_vertex.h"
45 #include "drivers/common/driverfuncs.h"
47 #include "intel_screen.h"
51 #include "intel_buffers.h"
52 #include "intel_tex.h"
53 #include "intel_span.h"
54 #include "intel_tris.h"
55 #include "intel_ioctl.h"
56 #include "intel_batchbuffer.h"
57 #include "intel_blit.h"
58 #include "intel_pixel.h"
59 #include "intel_regions.h"
60 #include "intel_buffer_objects.h"
61 #include "intel_fbo.h"
62 #include "intel_decode.h"
63 #include "intel_bufmgr_ttm.h"
65 #include "drirenderbuffer.h"
68 #include "xmlpool.h" /* for symbolic values of enum-type options */
70 int INTEL_DEBUG = (0);
73 #define need_GL_ARB_multisample
74 #define need_GL_ARB_point_parameters
75 #define need_GL_ARB_texture_compression
76 #define need_GL_ARB_vertex_buffer_object
77 #define need_GL_ARB_vertex_program
78 #define need_GL_ARB_window_pos
79 #define need_GL_EXT_blend_color
80 #define need_GL_EXT_blend_equation_separate
81 #define need_GL_EXT_blend_func_separate
82 #define need_GL_EXT_blend_minmax
83 #define need_GL_EXT_cull_vertex
84 #define need_GL_EXT_fog_coord
85 #define need_GL_EXT_framebuffer_object
86 #define need_GL_EXT_multi_draw_arrays
87 #define need_GL_EXT_secondary_color
88 #define need_GL_NV_vertex_program
89 #include "extension_helper.h"
92 #define DRIVER_DATE "20061102"
94 _glthread_Mutex lockMutex;
95 static GLboolean lockMutexInit = GL_FALSE;
98 static const GLubyte *
99 intelGetString(GLcontext * ctx, GLenum name)
102 static char buffer[128];
106 return (GLubyte *) "Tungsten Graphics, Inc";
110 switch (intel_context(ctx)->intelScreen->deviceID) {
112 chipset = "Intel(R) 845G";
114 case PCI_CHIP_I830_M:
115 chipset = "Intel(R) 830M";
117 case PCI_CHIP_I855_GM:
118 chipset = "Intel(R) 852GM/855GM";
120 case PCI_CHIP_I865_G:
121 chipset = "Intel(R) 865G";
123 case PCI_CHIP_I915_G:
124 chipset = "Intel(R) 915G";
126 case PCI_CHIP_I915_GM:
127 chipset = "Intel(R) 915GM";
129 case PCI_CHIP_I945_G:
130 chipset = "Intel(R) 945G";
132 case PCI_CHIP_I945_GM:
133 chipset = "Intel(R) 945GM";
135 case PCI_CHIP_I945_GME:
136 chipset = "Intel(R) 945GME";
139 chipset = "Intel(R) G33";
142 chipset = "Intel(R) Q35";
145 chipset = "Intel(R) Q33";
148 chipset = "Unknown Intel Chipset";
152 (void) driGetRendererString(buffer, chipset, DRIVER_DATE, 0);
153 return (GLubyte *) buffer;
162 * Extension strings exported by the intel driver.
164 const struct dri_extension card_extensions[] = {
165 {"GL_ARB_multisample", GL_ARB_multisample_functions},
166 {"GL_ARB_multitexture", NULL},
167 {"GL_ARB_point_parameters", GL_ARB_point_parameters_functions},
168 {"GL_ARB_texture_border_clamp", NULL},
169 {"GL_ARB_texture_compression", GL_ARB_texture_compression_functions},
170 {"GL_ARB_texture_cube_map", NULL},
171 {"GL_ARB_texture_env_add", NULL},
172 {"GL_ARB_texture_env_combine", NULL},
173 {"GL_ARB_texture_env_dot3", NULL},
174 {"GL_ARB_texture_mirrored_repeat", NULL},
175 {"GL_ARB_texture_rectangle", NULL},
176 {"GL_ARB_vertex_buffer_object", GL_ARB_vertex_buffer_object_functions},
177 {"GL_ARB_vertex_program", GL_ARB_vertex_program_functions},
178 {"GL_ARB_window_pos", GL_ARB_window_pos_functions},
179 {"GL_EXT_blend_color", GL_EXT_blend_color_functions},
180 {"GL_EXT_blend_equation_separate",
181 GL_EXT_blend_equation_separate_functions},
182 {"GL_EXT_blend_func_separate", GL_EXT_blend_func_separate_functions},
183 {"GL_EXT_blend_minmax", GL_EXT_blend_minmax_functions},
184 {"GL_EXT_blend_subtract", NULL},
185 {"GL_EXT_cull_vertex", GL_EXT_cull_vertex_functions},
186 {"GL_EXT_fog_coord", GL_EXT_fog_coord_functions},
187 {"GL_EXT_multi_draw_arrays", GL_EXT_multi_draw_arrays_functions},
188 #if 1 /* XXX FBO temporary? */
189 {"GL_EXT_packed_depth_stencil", NULL},
191 {"GL_EXT_secondary_color", GL_EXT_secondary_color_functions},
192 {"GL_EXT_stencil_wrap", NULL},
193 {"GL_EXT_texture_edge_clamp", NULL},
194 {"GL_EXT_texture_env_combine", NULL},
195 {"GL_EXT_texture_env_dot3", NULL},
196 {"GL_EXT_texture_filter_anisotropic", NULL},
197 {"GL_EXT_texture_lod_bias", NULL},
198 {"GL_3DFX_texture_compression_FXT1", NULL},
199 {"GL_APPLE_client_storage", NULL},
200 {"GL_MESA_pack_invert", NULL},
201 {"GL_MESA_ycbcr_texture", NULL},
202 {"GL_NV_blend_square", NULL},
203 {"GL_NV_vertex_program", GL_NV_vertex_program_functions},
204 {"GL_NV_vertex_program1_1", NULL},
205 /* { "GL_SGIS_generate_mipmap", NULL }, */
209 const struct dri_extension ttm_extensions[] = {
210 {"GL_EXT_framebuffer_object", GL_EXT_framebuffer_object_functions},
211 {"GL_ARB_pixel_buffer_object", NULL},
215 extern const struct tnl_pipeline_stage _intel_render_stage;
217 static const struct tnl_pipeline_stage *intel_pipeline[] = {
218 &_tnl_vertex_transform_stage,
219 &_tnl_vertex_cull_stage,
220 &_tnl_normal_transform_stage,
221 &_tnl_lighting_stage,
222 &_tnl_fog_coordinate_stage,
224 &_tnl_texture_transform_stage,
225 &_tnl_point_attenuation_stage,
226 &_tnl_vertex_program_stage,
228 &_intel_render_stage, /* ADD: unclipped rastersetup-to-dma */
235 static const struct dri_debug_control debug_control[] = {
236 {"tex", DEBUG_TEXTURE},
237 {"state", DEBUG_STATE},
238 {"ioctl", DEBUG_IOCTL},
239 {"blit", DEBUG_BLIT},
240 {"mip", DEBUG_MIPTREE},
241 {"fall", DEBUG_FALLBACKS},
242 {"verb", DEBUG_VERBOSE},
243 {"bat", DEBUG_BATCH},
244 {"pix", DEBUG_PIXEL},
245 {"buf", DEBUG_BUFMGR},
246 {"reg", DEBUG_REGION},
248 {"lock", DEBUG_LOCK},
249 {"sync", DEBUG_SYNC},
255 intelInvalidateState(GLcontext * ctx, GLuint new_state)
257 _swrast_InvalidateState(ctx, new_state);
258 _swsetup_InvalidateState(ctx, new_state);
259 _vbo_InvalidateState(ctx, new_state);
260 _tnl_InvalidateState(ctx, new_state);
261 _tnl_invalidate_vertex_state(ctx, new_state);
262 intel_context(ctx)->NewGLState |= new_state;
267 intelFlush(GLcontext * ctx)
269 struct intel_context *intel = intel_context(ctx);
274 INTEL_FIREVERTICES(intel);
276 if (intel->batch->map != intel->batch->ptr)
277 intel_batchbuffer_flush(intel->batch);
279 /* XXX: Need to do an MI_FLUSH here.
284 intelFinish(GLcontext * ctx)
286 struct intel_context *intel = intel_context(ctx);
288 if (intel->batch->last_fence) {
289 dri_fence_wait(intel->batch->last_fence);
290 dri_fence_unreference(intel->batch->last_fence);
291 intel->batch->last_fence = NULL;
295 /** Driver-specific fence emit implementation for the fake memory manager. */
297 intel_fence_emit(void *private)
299 struct intel_context *intel = (struct intel_context *)private;
302 /* XXX: Need to emit a flush, if we haven't already (at least with the
303 * current batchbuffer implementation, we have).
306 fence = intelEmitIrqLocked(intel);
311 /** Driver-specific fence wait implementation for the fake memory manager. */
313 intel_fence_wait(void *private, unsigned int cookie)
315 struct intel_context *intel = (struct intel_context *)private;
317 intelWaitIrq(intel, cookie);
323 intel_init_bufmgr(struct intel_context *intel)
325 intelScreenPrivate *intelScreen = intel->intelScreen;
326 GLboolean ttm_disable = getenv("INTEL_NO_TTM") != NULL;
328 /* If we've got a new enough DDX that's initializing TTM and giving us
329 * object handles for the shared buffers, use that.
331 intel->ttm = GL_FALSE;
333 intel->intelScreen->driScrnPriv->ddx_version.minor >= 9 &&
334 intel->intelScreen->drmMinor >= 11 &&
335 intel->intelScreen->front.bo_handle != -1)
337 intel->bufmgr = intel_bufmgr_ttm_init(intel->driFd,
340 DRM_I915_FENCE_TYPE_RW,
342 if (intel->bufmgr != NULL)
343 intel->ttm = GL_TRUE;
345 /* Otherwise, use the classic buffer manager. */
346 if (intel->bufmgr == NULL) {
348 fprintf(stderr, "TTM buffer manager disabled. Using classic.\n");
350 fprintf(stderr, "Failed to initialize TTM buffer manager. "
351 "Falling back to classic.\n");
354 if (intelScreen->tex.size == 0) {
355 fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
360 intel->bufmgr = dri_bufmgr_fake_init(intelScreen->tex.offset,
361 intelScreen->tex.map,
362 intelScreen->tex.size,
372 intelInitDriverFunctions(struct dd_function_table *functions)
374 _mesa_init_driver_functions(functions);
376 functions->Flush = intelFlush;
377 functions->Finish = intelFinish;
378 functions->GetString = intelGetString;
379 functions->UpdateState = intelInvalidateState;
380 functions->CopyColorTable = _swrast_CopyColorTable;
381 functions->CopyColorSubTable = _swrast_CopyColorSubTable;
382 functions->CopyConvolutionFilter1D = _swrast_CopyConvolutionFilter1D;
383 functions->CopyConvolutionFilter2D = _swrast_CopyConvolutionFilter2D;
385 intelInitTextureFuncs(functions);
386 intelInitPixelFuncs(functions);
387 intelInitStateFuncs(functions);
388 intelInitBufferFuncs(functions);
393 intelInitContext(struct intel_context *intel,
394 const __GLcontextModes * mesaVis,
395 __DRIcontextPrivate * driContextPriv,
396 void *sharedContextPrivate,
397 struct dd_function_table *functions)
399 GLcontext *ctx = &intel->ctx;
400 GLcontext *shareCtx = (GLcontext *) sharedContextPrivate;
401 __DRIscreenPrivate *sPriv = driContextPriv->driScreenPriv;
402 intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
403 drmI830Sarea *saPriv = (drmI830Sarea *)
404 (((GLubyte *) sPriv->pSAREA) + intelScreen->sarea_priv_offset);
407 if (!_mesa_initialize_context(&intel->ctx,
409 functions, (void *) intel))
412 driContextPriv->driverPrivate = intel;
413 intel->intelScreen = intelScreen;
414 intel->driScreen = sPriv;
415 intel->sarea = saPriv;
418 intel->hHWContext = driContextPriv->hHWContext;
419 intel->driFd = sPriv->fd;
420 intel->driHwLock = (drmLock *) & sPriv->pSAREA->lock;
422 intel->width = intelScreen->width;
423 intel->height = intelScreen->height;
425 if (intelScreen->deviceID == PCI_CHIP_I865_G)
426 intel->maxBatchSize = 4096;
428 intel->maxBatchSize = BATCH_SZ;
430 if (!intel_init_bufmgr(intel))
433 if (!lockMutexInit) {
434 lockMutexInit = GL_TRUE;
435 _glthread_INIT_MUTEX(lockMutex);
438 driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache,
439 intel->driScreen->myNum, "i915");
441 ctx->Const.MaxTextureMaxAnisotropy = 2.0;
443 /* This doesn't yet catch all non-conformant rendering, but it's a
446 if (getenv("INTEL_STRICT_CONFORMANCE")) {
447 intel->strict_conformance = 1;
450 ctx->Const.MinLineWidth = 1.0;
451 ctx->Const.MinLineWidthAA = 1.0;
452 ctx->Const.MaxLineWidth = 3.0;
453 ctx->Const.MaxLineWidthAA = 3.0;
454 ctx->Const.LineWidthGranularity = 1.0;
456 ctx->Const.MinPointSize = 1.0;
457 ctx->Const.MinPointSizeAA = 1.0;
458 ctx->Const.MaxPointSize = 255.0;
459 ctx->Const.MaxPointSizeAA = 3.0;
460 ctx->Const.PointSizeGranularity = 1.0;
462 /* reinitialize the context point state.
463 * It depend on constants in __GLcontextRec::Const
465 _mesa_init_point(ctx);
467 ctx->Const.MaxColorAttachments = 4; /* XXX FBO: review this */
469 /* Initialize the software rasterizer and helper modules. */
470 _swrast_CreateContext(ctx);
471 _vbo_CreateContext(ctx);
472 _tnl_CreateContext(ctx);
473 _swsetup_CreateContext(ctx);
475 /* Install the customized pipeline: */
476 _tnl_destroy_pipeline(ctx);
477 _tnl_install_pipeline(ctx, intel_pipeline);
479 /* Configure swrast to match hardware characteristics: */
480 _swrast_allow_pixel_fog(ctx, GL_FALSE);
481 _swrast_allow_vertex_fog(ctx, GL_TRUE);
483 intel->hw_stipple = 1;
485 /* XXX FBO: this doesn't seem to be used anywhere */
486 switch (mesaVis->depthBits) {
487 case 0: /* what to do in this case? */
489 intel->polygon_offset_scale = 1.0 / 0xffff;
492 intel->polygon_offset_scale = 2.0 / 0xffffff; /* req'd to pass glean */
499 /* Initialize swrast, tnl driver tables: */
500 intelInitSpanFuncs(ctx);
501 intelInitTriFuncs(ctx);
504 intel->RenderIndex = ~0;
506 fthrottle_mode = driQueryOptioni(&intel->optionCache, "fthrottle_mode");
507 intel->irqsEmitted = 0;
509 intel->do_irqs = (intel->intelScreen->irq_active &&
510 fthrottle_mode == DRI_CONF_FTHROTTLE_IRQS);
512 intel->do_usleeps = (fthrottle_mode == DRI_CONF_FTHROTTLE_USLEEPS);
514 _math_matrix_ctr(&intel->ViewportMatrix);
516 /* Disable imaging extension until convolution is working in
519 driInitExtensions(ctx, card_extensions,
524 driInitExtensions(ctx, ttm_extensions, GL_FALSE);
526 intel_recreate_static_regions(intel);
528 intel->batch = intel_batchbuffer_alloc(intel);
529 intel->last_swap_fence = NULL;
530 intel->first_swap_fence = NULL;
532 intel_bufferobj_init(intel);
533 intel_fbo_init(intel);
535 if (intel->ctx.Mesa_DXTn) {
536 _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
537 _mesa_enable_extension(ctx, "GL_S3_s3tc");
539 else if (driQueryOptionb(&intel->optionCache, "force_s3tc_enable")) {
540 _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
543 intel->prim.primitive = ~0;
546 INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
547 if (INTEL_DEBUG & DEBUG_BUFMGR)
548 dri_bufmgr_set_debug(intel->bufmgr, GL_TRUE);
551 if (getenv("INTEL_NO_RAST")) {
552 fprintf(stderr, "disabling 3D rasterization\n");
553 FALLBACK(intel, INTEL_FALLBACK_USER, 1);
560 intelDestroyContext(__DRIcontextPrivate * driContextPriv)
562 struct intel_context *intel =
563 (struct intel_context *) driContextPriv->driverPrivate;
565 assert(intel); /* should never be null */
567 GLboolean release_texture_heaps;
569 INTEL_FIREVERTICES(intel);
571 intel->vtbl.destroy(intel);
573 release_texture_heaps = (intel->ctx.Shared->RefCount == 1);
574 _swsetup_DestroyContext(&intel->ctx);
575 _tnl_DestroyContext(&intel->ctx);
576 _vbo_DestroyContext(&intel->ctx);
578 _swrast_DestroyContext(&intel->ctx);
579 intel->Fallback = 0; /* don't call _swrast_Flush later */
581 intel_batchbuffer_free(intel->batch);
583 if (intel->last_swap_fence) {
584 dri_fence_wait(intel->last_swap_fence);
585 dri_fence_unreference(intel->last_swap_fence);
586 intel->last_swap_fence = NULL;
588 if (intel->first_swap_fence) {
589 dri_fence_wait(intel->first_swap_fence);
590 dri_fence_unreference(intel->first_swap_fence);
591 intel->first_swap_fence = NULL;
594 dri_bufmgr_destroy(intel->bufmgr);
596 if (release_texture_heaps) {
597 /* This share group is about to go away, free our private
598 * texture object data.
600 if (INTEL_DEBUG & DEBUG_TEXTURE)
601 fprintf(stderr, "do something to free texture heaps\n");
604 /* free the Mesa context */
605 _mesa_free_context_data(&intel->ctx);
610 intelUnbindContext(__DRIcontextPrivate * driContextPriv)
616 intelMakeCurrent(__DRIcontextPrivate * driContextPriv,
617 __DRIdrawablePrivate * driDrawPriv,
618 __DRIdrawablePrivate * driReadPriv)
621 if (driContextPriv) {
622 struct intel_context *intel =
623 (struct intel_context *) driContextPriv->driverPrivate;
624 struct intel_framebuffer *intel_fb =
625 (struct intel_framebuffer *) driDrawPriv->driverPrivate;
626 GLframebuffer *readFb = (GLframebuffer *) driReadPriv->driverPrivate;
629 /* XXX FBO temporary fix-ups! */
630 /* if the renderbuffers don't have regions, init them from the context */
632 struct intel_renderbuffer *irbDepth
633 = intel_get_renderbuffer(&intel_fb->Base, BUFFER_DEPTH);
634 struct intel_renderbuffer *irbStencil
635 = intel_get_renderbuffer(&intel_fb->Base, BUFFER_STENCIL);
637 if (intel_fb->color_rb[0] && !intel_fb->color_rb[0]->region) {
638 intel_region_reference(&intel_fb->color_rb[0]->region,
639 intel->front_region);
641 if (intel_fb->color_rb[1] && !intel_fb->color_rb[1]->region) {
642 intel_region_reference(&intel_fb->color_rb[1]->region,
645 if (intel_fb->color_rb[2] && !intel_fb->color_rb[2]->region) {
646 intel_region_reference(&intel_fb->color_rb[2]->region,
647 intel->third_region);
649 if (irbDepth && !irbDepth->region) {
650 intel_region_reference(&irbDepth->region, intel->depth_region);
652 if (irbStencil && !irbStencil->region) {
653 intel_region_reference(&irbStencil->region, intel->depth_region);
657 /* set GLframebuffer size to match window, if needed */
658 driUpdateFramebufferSize(&intel->ctx, driDrawPriv);
660 if (driReadPriv != driDrawPriv) {
661 driUpdateFramebufferSize(&intel->ctx, driReadPriv);
664 _mesa_make_current(&intel->ctx, &intel_fb->Base, readFb);
666 /* The drawbuffer won't always be updated by _mesa_make_current:
668 if (intel->ctx.DrawBuffer == &intel_fb->Base) {
670 if (intel->driDrawable != driDrawPriv) {
671 if (driDrawPriv->swap_interval == (unsigned)-1) {
674 driDrawPriv->vblFlags = (intel->intelScreen->irq_active != 0)
675 ? driGetDefaultVBlankFlags(&intel->optionCache)
676 : VBLANK_FLAG_NO_IRQ;
678 (*dri_interface->getUST) (&intel_fb->swap_ust);
679 driDrawableInitVBlank(driDrawPriv);
680 intel_fb->vbl_waited = driDrawPriv->vblSeq;
682 for (i = 0; i < (intel->intelScreen->third.handle ? 3 : 2); i++) {
683 if (intel_fb->color_rb[i])
684 intel_fb->color_rb[i]->vbl_pending = driDrawPriv->vblSeq;
687 intel->driDrawable = driDrawPriv;
688 intelWindowMoved(intel);
691 intel_draw_buffer(&intel->ctx, &intel_fb->Base);
695 _mesa_make_current(NULL, NULL, NULL);
702 intelContendedLock(struct intel_context *intel, GLuint flags)
704 __DRIdrawablePrivate *dPriv = intel->driDrawable;
705 __DRIscreenPrivate *sPriv = intel->driScreen;
706 drmI830Sarea *sarea = intel->sarea;
708 drmGetLock(intel->driFd, intel->hHWContext, flags);
710 if (INTEL_DEBUG & DEBUG_LOCK)
711 _mesa_printf("%s - got contended lock\n", __progname);
713 /* If the window moved, may need to set a new cliprect now.
715 * NOTE: This releases and regains the hw lock, so all state
716 * checking must be done *after* this call:
719 DRI_VALIDATE_DRAWABLE_INFO(sPriv, dPriv);
721 /* If the last consumer of the texture memory wasn't us, notify the fake
722 * bufmgr and record the new owner. We should have the memory shared
723 * between contexts of a single fake bufmgr, but this will at least make
724 * things correct for now.
726 if (!intel->ttm && sarea->texAge != intel->hHWContext) {
727 sarea->texAge = intel->hHWContext;
728 dri_bufmgr_fake_contended_lock_take(intel->bufmgr);
729 if (INTEL_DEBUG & DEBUG_BATCH)
730 intel_decode_context_reset();
733 if (sarea->width != intel->width ||
734 sarea->height != intel->height) {
735 int numClipRects = intel->numClipRects;
738 * FIXME: Really only need to do this when drawing to a
739 * common back- or front buffer.
743 * This will essentially drop the outstanding batchbuffer on the floor.
745 intel->numClipRects = 0;
748 _swrast_flush(&intel->ctx);
750 INTEL_FIREVERTICES(intel);
752 if (intel->batch->map != intel->batch->ptr)
753 intel_batchbuffer_flush(intel->batch);
755 intel->numClipRects = numClipRects;
757 /* force window update */
758 intel->lastStamp = 0;
760 intel->width = sarea->width;
761 intel->height = sarea->height;
766 if (dPriv && intel->lastStamp != dPriv->lastStamp) {
767 intelWindowMoved(intel);
768 intel->lastStamp = dPriv->lastStamp;
774 /* Lock the hardware and validate our state.
776 void LOCK_HARDWARE( struct intel_context *intel )
778 __DRIdrawablePrivate *dPriv = intel->driDrawable;
780 struct intel_framebuffer *intel_fb = NULL;
781 struct intel_renderbuffer *intel_rb = NULL;
782 _glthread_LOCK_MUTEX(lockMutex);
783 assert(!intel->locked);
785 if (intel->driDrawable) {
786 intel_fb = intel->driDrawable->driverPrivate;
790 intel_get_renderbuffer(&intel_fb->Base,
791 intel_fb->Base._ColorDrawBufferMask[0] ==
792 BUFFER_BIT_FRONT_LEFT ? BUFFER_FRONT_LEFT :
796 if (intel_rb && dPriv->vblFlags &&
797 !(dPriv->vblFlags & VBLANK_FLAG_NO_IRQ) &&
798 (intel_fb->vbl_waited - intel_rb->vbl_pending) > (1<<23)) {
801 vbl.request.type = DRM_VBLANK_ABSOLUTE;
803 if ( dPriv->vblFlags & VBLANK_FLAG_SECONDARY ) {
804 vbl.request.type |= DRM_VBLANK_SECONDARY;
807 vbl.request.sequence = intel_rb->vbl_pending;
808 drmWaitVBlank(intel->driFd, &vbl);
809 intel_fb->vbl_waited = vbl.reply.sequence;
812 DRM_CAS(intel->driHwLock, intel->hHWContext,
813 (DRM_LOCK_HELD|intel->hHWContext), __ret);
816 intelContendedLock( intel, 0 );
818 if (INTEL_DEBUG & DEBUG_LOCK)
819 _mesa_printf("%s - locked\n", __progname);
825 /* Unlock the hardware using the global current context
827 void UNLOCK_HARDWARE( struct intel_context *intel )
831 DRM_UNLOCK(intel->driFd, intel->driHwLock, intel->hHWContext);
833 _glthread_UNLOCK_MUTEX(lockMutex);
835 if (INTEL_DEBUG & DEBUG_LOCK)
836 _mesa_printf("%s - unlocked\n", __progname);