#define FILE_DEBUG_FLAG DEBUG_PIXEL
+/* For many applications, the new ability to pull the source buffers
+ * back out of the GTT and then do the packing/conversion operations
+ * in software will be as much of an improvement as trying to get the
+ * blitter and/or texture engine to do the work.
+ *
+ * This step is gated on private backbuffers.
+ *
+ * Obviously the frontbuffer can't be pulled back, so that is either
+ * an argument for blit/texture readpixels, or for blitting to a
+ * temporary and then pulling that back.
+ *
+ * When the destination is a pbo, however, it's not clear if it is
+ * ever going to be pulled to main memory (though the access param
+ * will be a good hint). So it sounds like we do want to be able to
+ * choose between blit/texture implementation on the gpu and pullback
+ * and cpu-based copying.
+ *
+ * Unless you can magically turn client memory into a PBO for the
+ * duration of this call, there will be a cpu-based copying step in
+ * any case.
+ */
+
+static bool
+do_blit_readpixels(struct gl_context * ctx,
+ GLint x, GLint y, GLsizei width, GLsizei height,
+ GLenum format, GLenum type,
+ const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
+{
+ struct brw_context *brw = brw_context(ctx);
+ struct intel_buffer_object *dst = intel_buffer_object(pack->BufferObj);
+ GLuint dst_offset;
+ drm_intel_bo *dst_buffer;
+ GLint dst_x, dst_y;
+ GLuint dirty;
+
+ DBG("%s\n", __FUNCTION__);
+
+ assert(_mesa_is_bufferobj(pack->BufferObj));
+
+ struct gl_renderbuffer *rb = ctx->ReadBuffer->_ColorReadBuffer;
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb);
+
+ /* Currently this function only supports reading from color buffers. */
+ if (!_mesa_is_color_format(format))
+ return false;
+
+ assert(irb != NULL);
+
+ if (ctx->_ImageTransferState ||
+ !_mesa_format_matches_format_and_type(irb->mt->format, format, type,
+ false)) {
+ DBG("%s - bad format for blit\n", __FUNCTION__);
+ return false;
+ }
+
+ if (pack->SwapBytes || pack->LsbFirst) {
+ DBG("%s: bad packing params\n", __FUNCTION__);
+ return false;
+ }
+
+ int dst_stride = _mesa_image_row_stride(pack, width, format, type);
+ bool dst_flip = false;
+ /* Mesa flips the dst_stride for pack->Invert, but we want our mt to have a
+ * normal dst_stride.
+ */
+ struct gl_pixelstore_attrib uninverted_pack = *pack;
+ if (pack->Invert) {
+ dst_stride = -dst_stride;
+ dst_flip = true;
+ uninverted_pack.Invert = false;
+ }
+
+ dst_offset = (GLintptr)pixels;
+ dst_offset += _mesa_image_offset(2, &uninverted_pack, width, height,
+ format, type, 0, 0, 0);
+
+ if (!_mesa_clip_copytexsubimage(ctx,
+ &dst_x, &dst_y,
+ &x, &y,
+ &width, &height)) {
+ return true;
+ }
+
+ dirty = brw->front_buffer_dirty;
+ intel_prepare_render(brw);
+ brw->front_buffer_dirty = dirty;
+
+ dst_buffer = intel_bufferobj_buffer(brw, dst,
+ dst_offset, height * dst_stride);
+
+ struct intel_mipmap_tree *pbo_mt =
+ intel_miptree_create_for_bo(brw,
+ dst_buffer,
+ irb->mt->format,
+ dst_offset,
+ width, height, 1,
+ dst_stride);
+
+ if (!intel_miptree_blit(brw,
+ irb->mt, irb->mt_level, irb->mt_layer,
+ x, y, _mesa_is_winsys_fbo(ctx->ReadBuffer),
+ pbo_mt, 0, 0,
+ 0, 0, dst_flip,
+ width, height, GL_COPY)) {
+ return false;
+ }
+
+ intel_miptree_release(&pbo_mt);
+
+ DBG("%s - DONE\n", __FUNCTION__);
+
+ return true;
+}
+
/**
* \brief A fast path for glReadPixels
*
false);
}
+/* XXX: Do this for TexSubImage also:
+ */
+static bool
+try_pbo_upload(struct gl_context *ctx,
+ struct gl_texture_image *image,
+ const struct gl_pixelstore_attrib *unpack,
+ GLenum format, GLenum type, const void *pixels)
+{
+ struct intel_texture_image *intelImage = intel_texture_image(image);
+ struct brw_context *brw = brw_context(ctx);
+ struct intel_buffer_object *pbo = intel_buffer_object(unpack->BufferObj);
+ GLuint src_offset;
+ drm_intel_bo *src_buffer;
+
+ if (!_mesa_is_bufferobj(unpack->BufferObj))
+ return false;
+
+ DBG("trying pbo upload\n");
+
+ if (ctx->_ImageTransferState || unpack->SkipPixels || unpack->SkipRows) {
+ DBG("%s: image transfer\n", __FUNCTION__);
+ return false;
+ }
+
+ ctx->Driver.AllocTextureImageBuffer(ctx, image);
+
+ if (!intelImage->mt) {
+ DBG("%s: no miptree\n", __FUNCTION__);
+ return false;
+ }
+
+ if (!_mesa_format_matches_format_and_type(intelImage->mt->format,
+ format, type, false)) {
+ DBG("%s: format mismatch (upload to %s with format 0x%x, type 0x%x)\n",
+ __FUNCTION__, _mesa_get_format_name(intelImage->mt->format),
+ format, type);
+ return false;
+ }
+
+ if (image->TexObject->Target == GL_TEXTURE_1D_ARRAY ||
+ image->TexObject->Target == GL_TEXTURE_2D_ARRAY) {
+ DBG("%s: no support for array textures\n", __FUNCTION__);
+ return false;
+ }
+
+ int src_stride =
+ _mesa_image_row_stride(unpack, image->Width, format, type);
+
+ /* note: potential 64-bit ptr to 32-bit int cast */
+ src_offset = (GLuint) (unsigned long) pixels;
+ src_buffer = intel_bufferobj_buffer(brw, pbo,
+ src_offset, src_stride * image->Height);
+
+ struct intel_mipmap_tree *pbo_mt =
+ intel_miptree_create_for_bo(brw,
+ src_buffer,
+ intelImage->mt->format,
+ src_offset,
+ image->Width, image->Height, 1,
+ src_stride);
+ if (!pbo_mt)
+ return false;
+
+ if (!intel_miptree_blit(brw,
+ pbo_mt, 0, 0,
+ 0, 0, false,
+ intelImage->mt, image->Level, image->Face,
+ 0, 0, false,
+ image->Width, image->Height, GL_COPY)) {
+ DBG("%s: blit failed\n", __FUNCTION__);
+ intel_miptree_release(&pbo_mt);
+ return false;
+ }
+
+ intel_miptree_release(&pbo_mt);
+
+ DBG("%s: success\n", __FUNCTION__);
+ return true;
+}
+
static void
intelTexImage(struct gl_context * ctx,
GLuint dims,
return true;
}
+static bool
+blit_texture_to_pbo(struct gl_context *ctx,
+ GLenum format, GLenum type,
+ GLvoid * pixels, struct gl_texture_image *texImage)
+{
+ struct intel_texture_image *intelImage = intel_texture_image(texImage);
+ struct brw_context *brw = brw_context(ctx);
+ const struct gl_pixelstore_attrib *pack = &ctx->Pack;
+ struct intel_buffer_object *dst = intel_buffer_object(pack->BufferObj);
+ GLuint dst_offset;
+ drm_intel_bo *dst_buffer;
+ GLenum target = texImage->TexObject->Target;
+
+ /* Check if we can use GPU blit to copy from the hardware texture
+ * format to the user's format/type.
+ * Note that GL's pixel transfer ops don't apply to glGetTexImage()
+ */
+
+ if (!_mesa_format_matches_format_and_type(intelImage->mt->format, format,
+ type, false))
+ {
+ perf_debug("%s: unsupported format, fallback to CPU mapping for PBO\n",
+ __FUNCTION__);
+
+ return false;
+ }
+
+ if (ctx->_ImageTransferState) {
+ perf_debug("%s: bad transfer state, fallback to CPU mapping for PBO\n",
+ __FUNCTION__);
+ return false;
+ }
+
+ if (pack->SwapBytes || pack->LsbFirst) {
+ perf_debug("%s: unsupported pack swap params\n",
+ __FUNCTION__);
+ return false;
+ }
+
+ if (target == GL_TEXTURE_1D_ARRAY ||
+ target == GL_TEXTURE_2D_ARRAY ||
+ target == GL_TEXTURE_CUBE_MAP ||
+ target == GL_TEXTURE_CUBE_MAP_ARRAY ||
+ target == GL_TEXTURE_3D) {
+ perf_debug("%s: no support for multiple slices, fallback to CPU mapping "
+ "for PBO\n", __FUNCTION__);
+ return false;
+ }
+
+ int dst_stride = _mesa_image_row_stride(pack, texImage->Width, format, type);
+ bool dst_flip = false;
+ /* Mesa flips the dst_stride for ctx->Pack.Invert, our mt must have a
+ * normal dst_stride.
+ */
+ struct gl_pixelstore_attrib uninverted_pack = *pack;
+ if (ctx->Pack.Invert) {
+ dst_stride = -dst_stride;
+ dst_flip = true;
+ uninverted_pack.Invert = false;
+ }
+ dst_offset = (GLintptr) pixels;
+ dst_offset += _mesa_image_offset(2, &uninverted_pack, texImage->Width,
+ texImage->Height, format, type, 0, 0, 0);
+ dst_buffer = intel_bufferobj_buffer(brw, dst, dst_offset,
+ texImage->Height * dst_stride);
+
+ struct intel_mipmap_tree *pbo_mt =
+ intel_miptree_create_for_bo(brw,
+ dst_buffer,
+ intelImage->mt->format,
+ dst_offset,
+ texImage->Width, texImage->Height, 1,
+ dst_stride);
+
+ if (!pbo_mt)
+ return false;
+
+ if (!intel_miptree_blit(brw,
+ intelImage->mt, texImage->Level, texImage->Face,
+ 0, 0, false,
+ pbo_mt, 0, 0,
+ 0, 0, dst_flip,
+ texImage->Width, texImage->Height, GL_COPY))
+ return false;
+
+ intel_miptree_release(&pbo_mt);
+
+ return true;
+}
+
static void
intel_get_tex_image(struct gl_context *ctx,
GLenum format, GLenum type, GLvoid *pixels,
#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+static bool
+intel_blit_texsubimage(struct gl_context * ctx,
+ struct gl_texture_image *texImage,
+ GLint xoffset, GLint yoffset,
+ GLint width, GLint height,
+ GLenum format, GLenum type, const void *pixels,
+ const struct gl_pixelstore_attrib *packing)
+{
+ struct brw_context *brw = brw_context(ctx);
+ struct intel_texture_image *intelImage = intel_texture_image(texImage);
+
+ /* Try to do a blit upload of the subimage if the texture is
+ * currently busy.
+ */
+ if (!intelImage->mt)
+ return false;
+
+ /* Prior to Sandybridge, the blitter can't handle Y tiling */
+ if (brw->gen < 6 && intelImage->mt->tiling == I915_TILING_Y)
+ return false;
+
+ if (texImage->TexObject->Target != GL_TEXTURE_2D)
+ return false;
+
+ /* On gen6, it's probably not worth swapping to the blit ring to do
+ * this because of all the overhead involved.
+ */
+ if (brw->gen >= 6)
+ return false;
+
+ if (!drm_intel_bo_busy(intelImage->mt->bo))
+ return false;
+
+ DBG("BLT subimage %s target %s level %d offset %d,%d %dx%d\n",
+ __FUNCTION__,
+ _mesa_lookup_enum_by_nr(texImage->TexObject->Target),
+ texImage->Level, xoffset, yoffset, width, height);
+
+ pixels = _mesa_validate_pbo_teximage(ctx, 2, width, height, 1,
+ format, type, pixels, packing,
+ "glTexSubImage");
+ if (!pixels)
+ return false;
+
+ struct intel_mipmap_tree *temp_mt =
+ intel_miptree_create(brw, GL_TEXTURE_2D, texImage->TexFormat,
+ 0, 0,
+ width, height, 1,
+ false, 0, INTEL_MIPTREE_TILING_NONE,
+ false);
+ if (!temp_mt)
+ goto err;
+
+ GLubyte *dst = intel_miptree_map_raw(brw, temp_mt);
+ if (!dst)
+ goto err;
+
+ if (!_mesa_texstore(ctx, 2, texImage->_BaseFormat,
+ texImage->TexFormat,
+ temp_mt->pitch,
+ &dst,
+ width, height, 1,
+ format, type, pixels, packing)) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "intelTexSubImage");
+ }
+
+ intel_miptree_unmap_raw(brw, temp_mt);
+
+ bool ret;
+
+ ret = intel_miptree_blit(brw,
+ temp_mt, 0, 0,
+ 0, 0, false,
+ intelImage->mt, texImage->Level, texImage->Face,
+ xoffset, yoffset, false,
+ width, height, GL_COPY);
+ assert(ret);
+
+ intel_miptree_release(&temp_mt);
+ _mesa_unmap_teximage_pbo(ctx, packing);
+
+ return ret;
+
+err:
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "intelTexSubImage");
+ intel_miptree_release(&temp_mt);
+ _mesa_unmap_teximage_pbo(ctx, packing);
+ return false;
+}
+
/**
* \brief A fast path for glTexImage and glTexSubImage.
*