From: Mauro Rossi Date: Tue, 29 Dec 2015 01:19:44 +0000 (+0100) Subject: i965/pixel_read: use the blitter for PBO ReadPixels with gen4 X-Git-Url: http://git.osdn.net/view?p=android-x86%2Fexternal-mesa.git;a=commitdiff_plain;h=d15afb9605ed239db944c6b73681df8144e48599 i965/pixel_read: use the blitter for PBO ReadPixels with gen4 After disabling meta for gen4, the blitter is re-enabled to optimize gen4. Function do_blit_readpixels() was already restored by specific commit. --- diff --git a/src/mesa/drivers/dri/i965/intel_pixel_read.c b/src/mesa/drivers/dri/i965/intel_pixel_read.c index 706322d1a15..3f5e53affc9 100644 --- a/src/mesa/drivers/dri/i965/intel_pixel_read.c +++ b/src/mesa/drivers/dri/i965/intel_pixel_read.c @@ -339,31 +339,37 @@ intelReadPixels(struct gl_context * ctx, DBG("%s\n", __FUNCTION__); if (_mesa_is_bufferobj(pack->BufferObj)) { - if (_mesa_meta_pbo_GetTexSubImage(ctx, 2, NULL, x, y, 0, width, height, 1, - format, type, pixels, pack)) { - /* _mesa_meta_pbo_GetTexSubImage() implements PBO transfers by - * binding the user-provided BO as a fake framebuffer and rendering - * to it. This breaks the invariant of the GL that nothing is able - * to render to a BO, causing nondeterministic corruption issues - * because the render cache is not coherent with a number of other - * caches that the BO could potentially be bound to afterwards. - * - * This could be solved in the same way that we guarantee texture - * coherency after a texture is attached to a framebuffer and - * rendered to, but that would involve checking *all* BOs bound to - * the pipeline for the case we need to emit a cache flush due to - * previous rendering to any of them -- Including vertex, index, - * uniform, atomic counter, shader image, transform feedback, - * indirect draw buffers, etc. - * - * That would increase the per-draw call overhead even though it's - * very unlikely that any of the BOs bound to the pipeline has been - * rendered to via a PBO at any point, so it seems better to just - * flush here unconditionally. - */ - intel_batchbuffer_emit_mi_flush(brw); - return; - } + if (brw->gen >= 5) + if (_mesa_meta_pbo_GetTexSubImage(ctx, 2, NULL, x, y, 0, width, height, 1, + format, type, pixels, pack)) { + /* _mesa_meta_pbo_GetTexSubImage() implements PBO transfers by + * binding the user-provided BO as a fake framebuffer and rendering + * to it. This breaks the invariant of the GL that nothing is able + * to render to a BO, causing nondeterministic corruption issues + * because the render cache is not coherent with a number of other + * caches that the BO could potentially be bound to afterwards. + * + * This could be solved in the same way that we guarantee texture + * coherency after a texture is attached to a framebuffer and + * rendered to, but that would involve checking *all* BOs bound to + * the pipeline for the case we need to emit a cache flush due to + * previous rendering to any of them -- Including vertex, index, + * uniform, atomic counter, shader image, transform feedback, + * indirect draw buffers, etc. + * + * That would increase the per-draw call overhead even though it's + * very unlikely that any of the BOs bound to the pipeline has been + * rendered to via a PBO at any point, so it seems better to just + * flush here unconditionally. + */ + intel_batchbuffer_emit_mi_flush(brw); + return; + } + else + /* Using PBOs, so try the BLT based path. */ + if (do_blit_readpixels(ctx, x, y, width, height, format, type, pack, + pixels)) + return; perf_debug("%s: fallback to CPU mapping in PBO case\n", __FUNCTION__); }