OSDN Git Service

intel: Refactor code in intel_miptree_choose_tiling().
[android-x86/external-mesa.git] / src / mesa / drivers / dri / intel / intel_mipmap_tree.c
1 /**************************************************************************
2  * 
3  * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4  * All Rights Reserved.
5  * 
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  * 
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  * 
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25  * 
26  **************************************************************************/
27
28 #include <GL/gl.h>
29 #include <GL/internal/dri_interface.h>
30
31 #include "intel_batchbuffer.h"
32 #include "intel_chipset.h"
33 #include "intel_context.h"
34 #include "intel_mipmap_tree.h"
35 #include "intel_regions.h"
36 #include "intel_resolve_map.h"
37 #include "intel_span.h"
38 #include "intel_tex_layout.h"
39 #include "intel_tex.h"
40 #include "intel_blit.h"
41
42 #ifndef I915
43 #include "brw_blorp.h"
44 #endif
45
46 #include "main/enums.h"
47 #include "main/formats.h"
48 #include "main/glformats.h"
49 #include "main/texcompress_etc.h"
50 #include "main/teximage.h"
51
52 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
53
54 static GLenum
55 target_to_target(GLenum target)
56 {
57    switch (target) {
58    case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
59    case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
60    case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
61    case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
62    case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
63    case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
64       return GL_TEXTURE_CUBE_MAP_ARB;
65    default:
66       return target;
67    }
68 }
69
70
71 /**
72  * Determine which MSAA layout should be used by the MSAA surface being
73  * created, based on the chip generation and the surface type.
74  */
75 static enum intel_msaa_layout
76 compute_msaa_layout(struct intel_context *intel, gl_format format, GLenum target)
77 {
78    /* Prior to Gen7, all MSAA surfaces used IMS layout. */
79    if (intel->gen < 7)
80       return INTEL_MSAA_LAYOUT_IMS;
81
82    /* In Gen7, IMS layout is only used for depth and stencil buffers. */
83    switch (_mesa_get_format_base_format(format)) {
84    case GL_DEPTH_COMPONENT:
85    case GL_STENCIL_INDEX:
86    case GL_DEPTH_STENCIL:
87       return INTEL_MSAA_LAYOUT_IMS;
88    default:
89       /* From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
90        *
91        *   This field must be set to 0 for all SINT MSRTs when all RT channels
92        *   are not written
93        *
94        * In practice this means that we have to disable MCS for all signed
95        * integer MSAA buffers.  The alternative, to disable MCS only when one
96        * of the render target channels is disabled, is impractical because it
97        * would require converting between CMS and UMS MSAA layouts on the fly,
98        * which is expensive.
99        */
100       if (_mesa_get_format_datatype(format) == GL_INT) {
101          /* TODO: is this workaround needed for future chipsets? */
102          assert(intel->gen == 7);
103          return INTEL_MSAA_LAYOUT_UMS;
104       } else {
105          /* For now, if we're going to be texturing from this surface,
106           * force UMS, so that the shader doesn't have to do different things
107           * based on whether there's a multisample control surface needing sampled first.
108           * We can't just blindly read the MCS surface in all cases because:
109           *
110           * From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
111           *
112           *    If this field is disabled and the sampling engine <ld_mcs> message
113           *    is issued on this surface, the MCS surface may be accessed. Software
114           *    must ensure that the surface is defined to avoid GTT errors.
115           */
116          if (target == GL_TEXTURE_2D_MULTISAMPLE ||
117              target == GL_TEXTURE_2D_MULTISAMPLE_ARRAY) {
118             return INTEL_MSAA_LAYOUT_UMS;
119          } else {
120             return INTEL_MSAA_LAYOUT_CMS;
121          }
122       }
123    }
124 }
125
126
127 /**
128  * @param for_region Indicates that the caller is
129  *        intel_miptree_create_for_region(). If true, then do not create
130  *        \c stencil_mt.
131  */
132 struct intel_mipmap_tree *
133 intel_miptree_create_layout(struct intel_context *intel,
134                             GLenum target,
135                             gl_format format,
136                             GLuint first_level,
137                             GLuint last_level,
138                             GLuint width0,
139                             GLuint height0,
140                             GLuint depth0,
141                             bool for_region,
142                             GLuint num_samples)
143 {
144    struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
145    int compress_byte = 0;
146
147    DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__,
148        _mesa_lookup_enum_by_nr(target),
149        _mesa_get_format_name(format),
150        first_level, last_level, mt);
151
152    if (_mesa_is_format_compressed(format))
153       compress_byte = intel_compressed_num_bytes(format);
154
155    mt->target = target_to_target(target);
156    mt->format = format;
157    mt->first_level = first_level;
158    mt->last_level = last_level;
159    mt->logical_width0 = width0;
160    mt->logical_height0 = height0;
161    mt->logical_depth0 = depth0;
162    mt->cpp = compress_byte ? compress_byte : _mesa_get_format_bytes(mt->format);
163    mt->num_samples = num_samples;
164    mt->compressed = compress_byte ? 1 : 0;
165    mt->msaa_layout = INTEL_MSAA_LAYOUT_NONE;
166    mt->refcount = 1; 
167
168    if (num_samples > 1) {
169       /* Adjust width/height/depth for MSAA */
170       mt->msaa_layout = compute_msaa_layout(intel, format, mt->target);
171       if (mt->msaa_layout == INTEL_MSAA_LAYOUT_IMS) {
172          /* In the Sandy Bridge PRM, volume 4, part 1, page 31, it says:
173           *
174           *     "Any of the other messages (sample*, LOD, load4) used with a
175           *      (4x) multisampled surface will in-effect sample a surface with
176           *      double the height and width as that indicated in the surface
177           *      state. Each pixel position on the original-sized surface is
178           *      replaced with a 2x2 of samples with the following arrangement:
179           *
180           *         sample 0 sample 2
181           *         sample 1 sample 3"
182           *
183           * Thus, when sampling from a multisampled texture, it behaves as
184           * though the layout in memory for (x,y,sample) is:
185           *
186           *      (0,0,0) (0,0,2)   (1,0,0) (1,0,2)
187           *      (0,0,1) (0,0,3)   (1,0,1) (1,0,3)
188           *
189           *      (0,1,0) (0,1,2)   (1,1,0) (1,1,2)
190           *      (0,1,1) (0,1,3)   (1,1,1) (1,1,3)
191           *
192           * However, the actual layout of multisampled data in memory is:
193           *
194           *      (0,0,0) (1,0,0)   (0,0,1) (1,0,1)
195           *      (0,1,0) (1,1,0)   (0,1,1) (1,1,1)
196           *
197           *      (0,0,2) (1,0,2)   (0,0,3) (1,0,3)
198           *      (0,1,2) (1,1,2)   (0,1,3) (1,1,3)
199           *
200           * This pattern repeats for each 2x2 pixel block.
201           *
202           * As a result, when calculating the size of our 4-sample buffer for
203           * an odd width or height, we have to align before scaling up because
204           * sample 3 is in that bottom right 2x2 block.
205           */
206          switch (num_samples) {
207          case 4:
208             width0 = ALIGN(width0, 2) * 2;
209             height0 = ALIGN(height0, 2) * 2;
210             break;
211          case 8:
212             width0 = ALIGN(width0, 2) * 4;
213             height0 = ALIGN(height0, 2) * 2;
214             break;
215          default:
216             /* num_samples should already have been quantized to 0, 1, 4, or
217              * 8.
218              */
219             assert(false);
220          }
221       } else {
222          /* Non-interleaved */
223          depth0 *= num_samples;
224       }
225    }
226
227    /* array_spacing_lod0 is only used for non-IMS MSAA surfaces.  TODO: can we
228     * use it elsewhere?
229     */
230    switch (mt->msaa_layout) {
231    case INTEL_MSAA_LAYOUT_NONE:
232    case INTEL_MSAA_LAYOUT_IMS:
233       mt->array_spacing_lod0 = false;
234       break;
235    case INTEL_MSAA_LAYOUT_UMS:
236    case INTEL_MSAA_LAYOUT_CMS:
237       mt->array_spacing_lod0 = true;
238       break;
239    }
240
241    if (target == GL_TEXTURE_CUBE_MAP) {
242       assert(depth0 == 1);
243       depth0 = 6;
244    }
245
246    mt->physical_width0 = width0;
247    mt->physical_height0 = height0;
248    mt->physical_depth0 = depth0;
249
250    if (!for_region &&
251        _mesa_get_format_base_format(format) == GL_DEPTH_STENCIL &&
252        (intel->must_use_separate_stencil ||
253         (intel->has_separate_stencil &&
254          intel->vtbl.is_hiz_depth_format(intel, format)))) {
255       mt->stencil_mt = intel_miptree_create(intel,
256                                             mt->target,
257                                             MESA_FORMAT_S8,
258                                             mt->first_level,
259                                             mt->last_level,
260                                             mt->logical_width0,
261                                             mt->logical_height0,
262                                             mt->logical_depth0,
263                                             true,
264                                             num_samples,
265                                             false /* force_y_tiling */);
266       if (!mt->stencil_mt) {
267          intel_miptree_release(&mt);
268          return NULL;
269       }
270
271       /* Fix up the Z miptree format for how we're splitting out separate
272        * stencil.  Gen7 expects there to be no stencil bits in its depth buffer.
273        */
274       if (mt->format == MESA_FORMAT_S8_Z24) {
275          mt->format = MESA_FORMAT_X8_Z24;
276       } else if (mt->format == MESA_FORMAT_Z32_FLOAT_X24S8) {
277          mt->format = MESA_FORMAT_Z32_FLOAT;
278          mt->cpp = 4;
279       } else {
280          _mesa_problem(NULL, "Unknown format %s in separate stencil mt\n",
281                        _mesa_get_format_name(mt->format));
282       }
283    }
284
285    intel_get_texture_alignment_unit(intel, mt->format,
286                                     &mt->align_w, &mt->align_h);
287
288 #ifdef I915
289    (void) intel;
290    if (intel->is_945)
291       i945_miptree_layout(mt);
292    else
293       i915_miptree_layout(mt);
294 #else
295    brw_miptree_layout(intel, mt);
296 #endif
297
298    return mt;
299 }
300
301 /**
302  * \brief Helper function for intel_miptree_create().
303  */
304 static uint32_t
305 intel_miptree_choose_tiling(struct intel_context *intel,
306                             gl_format format,
307                             uint32_t width0,
308                             uint32_t num_samples,
309                             bool force_y_tiling,
310                             struct intel_mipmap_tree *mt)
311 {
312
313    if (format == MESA_FORMAT_S8) {
314       /* The stencil buffer is W tiled. However, we request from the kernel a
315        * non-tiled buffer because the GTT is incapable of W fencing.
316        */
317       return I915_TILING_NONE;
318    }
319
320    if (force_y_tiling)
321       return I915_TILING_Y;
322
323    if (num_samples > 1) {
324       /* From p82 of the Sandy Bridge PRM, dw3[1] of SURFACE_STATE ("Tiled
325        * Surface"):
326        *
327        *   [DevSNB+]: For multi-sample render targets, this field must be
328        *   1. MSRTs can only be tiled.
329        *
330        * Our usual reason for preferring X tiling (fast blits using the
331        * blitting engine) doesn't apply to MSAA, since we'll generally be
332        * downsampling or upsampling when blitting between the MSAA buffer
333        * and another buffer, and the blitting engine doesn't support that.
334        * So use Y tiling, since it makes better use of the cache.
335        */
336       return I915_TILING_Y;
337    }
338
339    GLenum base_format = _mesa_get_format_base_format(format);
340    if (intel->gen >= 4 &&
341        (base_format == GL_DEPTH_COMPONENT ||
342         base_format == GL_DEPTH_STENCIL_EXT))
343       return I915_TILING_Y;
344
345    /* If the width is smaller than a tile, don't bother tiling. */
346    if (width0 < 64)
347       return I915_TILING_NONE;
348
349    if (ALIGN(mt->total_width * mt->cpp, 512) >= 32768) {
350       perf_debug("%dx%d miptree too large to blit, falling back to untiled",
351                  mt->total_width, mt->total_height);
352       return I915_TILING_NONE;
353    }
354
355    return intel->gen >= 6 ? I915_TILING_Y : I915_TILING_X;
356 }
357
358 struct intel_mipmap_tree *
359 intel_miptree_create(struct intel_context *intel,
360                      GLenum target,
361                      gl_format format,
362                      GLuint first_level,
363                      GLuint last_level,
364                      GLuint width0,
365                      GLuint height0,
366                      GLuint depth0,
367                      bool expect_accelerated_upload,
368                      GLuint num_samples,
369                      bool force_y_tiling)
370 {
371    struct intel_mipmap_tree *mt;
372    gl_format tex_format = format;
373    gl_format etc_format = MESA_FORMAT_NONE;
374    GLuint total_width, total_height;
375
376    switch (format) {
377    case MESA_FORMAT_ETC1_RGB8:
378       format = MESA_FORMAT_RGBX8888_REV;
379       break;
380    case MESA_FORMAT_ETC2_RGB8:
381       format = MESA_FORMAT_RGBX8888_REV;
382       break;
383    case MESA_FORMAT_ETC2_SRGB8:
384    case MESA_FORMAT_ETC2_SRGB8_ALPHA8_EAC:
385    case MESA_FORMAT_ETC2_SRGB8_PUNCHTHROUGH_ALPHA1:
386       format = MESA_FORMAT_SARGB8;
387       break;
388    case MESA_FORMAT_ETC2_RGBA8_EAC:
389    case MESA_FORMAT_ETC2_RGB8_PUNCHTHROUGH_ALPHA1:
390       format = MESA_FORMAT_RGBA8888_REV;
391       break;
392    case MESA_FORMAT_ETC2_R11_EAC:
393       format = MESA_FORMAT_R16;
394       break;
395    case MESA_FORMAT_ETC2_SIGNED_R11_EAC:
396       format = MESA_FORMAT_SIGNED_R16;
397       break;
398    case MESA_FORMAT_ETC2_RG11_EAC:
399       format = MESA_FORMAT_GR1616;
400       break;
401    case MESA_FORMAT_ETC2_SIGNED_RG11_EAC:
402       format = MESA_FORMAT_SIGNED_GR1616;
403       break;
404    default:
405       /* Non ETC1 / ETC2 format */
406       break;
407    }
408
409    etc_format = (format != tex_format) ? tex_format : MESA_FORMAT_NONE;
410
411    mt = intel_miptree_create_layout(intel, target, format,
412                                       first_level, last_level, width0,
413                                       height0, depth0,
414                                       false, num_samples);
415    /*
416     * pitch == 0 || height == 0  indicates the null texture
417     */
418    if (!mt || !mt->total_width || !mt->total_height) {
419       intel_miptree_release(&mt);
420       return NULL;
421    }
422
423    total_width = mt->total_width;
424    total_height = mt->total_height;
425
426    if (format == MESA_FORMAT_S8) {
427       /* Align to size of W tile, 64x64. */
428       total_width = ALIGN(total_width, 64);
429       total_height = ALIGN(total_height, 64);
430    }
431
432    uint32_t tiling = intel_miptree_choose_tiling(intel, format, width0,
433                                                  num_samples, force_y_tiling,
434                                                  mt);
435    mt->etc_format = etc_format;
436    mt->region = intel_region_alloc(intel->intelScreen,
437                                    tiling,
438                                    mt->cpp,
439                                    total_width,
440                                    total_height,
441                                    expect_accelerated_upload);
442    mt->offset = 0;
443
444    if (!mt->region) {
445        intel_miptree_release(&mt);
446        return NULL;
447    }
448
449    return mt;
450 }
451
452 struct intel_mipmap_tree *
453 intel_miptree_create_for_region(struct intel_context *intel,
454                                 GLenum target,
455                                 gl_format format,
456                                 struct intel_region *region)
457 {
458    struct intel_mipmap_tree *mt;
459
460    mt = intel_miptree_create_layout(intel, target, format,
461                                       0, 0,
462                                       region->width, region->height, 1,
463                                       true, 0 /* num_samples */);
464    if (!mt)
465       return mt;
466
467    intel_region_reference(&mt->region, region);
468
469    return mt;
470 }
471
472
473 /**
474  * For a singlesample DRI2 buffer, this simply wraps the given region with a miptree.
475  *
476  * For a multisample DRI2 buffer, this wraps the given region with
477  * a singlesample miptree, then creates a multisample miptree into which the
478  * singlesample miptree is embedded as a child.
479  */
480 struct intel_mipmap_tree*
481 intel_miptree_create_for_dri2_buffer(struct intel_context *intel,
482                                      unsigned dri_attachment,
483                                      gl_format format,
484                                      uint32_t num_samples,
485                                      struct intel_region *region)
486 {
487    struct intel_mipmap_tree *singlesample_mt = NULL;
488    struct intel_mipmap_tree *multisample_mt = NULL;
489    GLenum base_format = _mesa_get_format_base_format(format);
490
491    /* Only the front and back buffers, which are color buffers, are shared
492     * through DRI2.
493     */
494    assert(dri_attachment == __DRI_BUFFER_BACK_LEFT ||
495           dri_attachment == __DRI_BUFFER_FRONT_LEFT ||
496           dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT);
497    assert(base_format == GL_RGB || base_format == GL_RGBA);
498
499    singlesample_mt = intel_miptree_create_for_region(intel, GL_TEXTURE_2D,
500                                                      format, region);
501    if (!singlesample_mt)
502       return NULL;
503
504    if (num_samples == 0)
505       return singlesample_mt;
506
507    multisample_mt = intel_miptree_create_for_renderbuffer(intel,
508                                                           format,
509                                                           region->width,
510                                                           region->height,
511                                                           num_samples);
512    if (!multisample_mt) {
513       intel_miptree_release(&singlesample_mt);
514       return NULL;
515    }
516
517    multisample_mt->singlesample_mt = singlesample_mt;
518    multisample_mt->need_downsample = false;
519
520    if (intel->is_front_buffer_rendering &&
521        (dri_attachment == __DRI_BUFFER_FRONT_LEFT ||
522         dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT)) {
523       intel_miptree_upsample(intel, multisample_mt);
524    }
525
526    return multisample_mt;
527 }
528
529 struct intel_mipmap_tree*
530 intel_miptree_create_for_renderbuffer(struct intel_context *intel,
531                                       gl_format format,
532                                       uint32_t width,
533                                       uint32_t height,
534                                       uint32_t num_samples)
535 {
536    struct intel_mipmap_tree *mt;
537    uint32_t depth = 1;
538    bool ok;
539
540    mt = intel_miptree_create(intel, GL_TEXTURE_2D, format, 0, 0,
541                              width, height, depth, true, num_samples,
542                              false /* force_y_tiling */);
543    if (!mt)
544       goto fail;
545
546    if (intel->vtbl.is_hiz_depth_format(intel, format)) {
547       ok = intel_miptree_alloc_hiz(intel, mt, num_samples);
548       if (!ok)
549          goto fail;
550    }
551
552    if (mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) {
553       ok = intel_miptree_alloc_mcs(intel, mt, num_samples);
554       if (!ok)
555          goto fail;
556    }
557
558    return mt;
559
560 fail:
561    intel_miptree_release(&mt);
562    return NULL;
563 }
564
565 void
566 intel_miptree_reference(struct intel_mipmap_tree **dst,
567                         struct intel_mipmap_tree *src)
568 {
569    if (*dst == src)
570       return;
571
572    intel_miptree_release(dst);
573
574    if (src) {
575       src->refcount++;
576       DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount);
577    }
578
579    *dst = src;
580 }
581
582
583 void
584 intel_miptree_release(struct intel_mipmap_tree **mt)
585 {
586    if (!*mt)
587       return;
588
589    DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1);
590    if (--(*mt)->refcount <= 0) {
591       GLuint i;
592
593       DBG("%s deleting %p\n", __FUNCTION__, *mt);
594
595       intel_region_release(&((*mt)->region));
596       intel_miptree_release(&(*mt)->stencil_mt);
597       intel_miptree_release(&(*mt)->hiz_mt);
598       intel_miptree_release(&(*mt)->mcs_mt);
599       intel_miptree_release(&(*mt)->singlesample_mt);
600       intel_resolve_map_clear(&(*mt)->hiz_map);
601
602       for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
603          free((*mt)->level[i].slice);
604       }
605
606       free(*mt);
607    }
608    *mt = NULL;
609 }
610
611 void
612 intel_miptree_get_dimensions_for_image(struct gl_texture_image *image,
613                                        int *width, int *height, int *depth)
614 {
615    switch (image->TexObject->Target) {
616    case GL_TEXTURE_1D_ARRAY:
617       *width = image->Width;
618       *height = 1;
619       *depth = image->Height;
620       break;
621    default:
622       *width = image->Width;
623       *height = image->Height;
624       *depth = image->Depth;
625       break;
626    }
627 }
628
629 /**
630  * Can the image be pulled into a unified mipmap tree?  This mirrors
631  * the completeness test in a lot of ways.
632  *
633  * Not sure whether I want to pass gl_texture_image here.
634  */
635 bool
636 intel_miptree_match_image(struct intel_mipmap_tree *mt,
637                           struct gl_texture_image *image)
638 {
639    struct intel_texture_image *intelImage = intel_texture_image(image);
640    GLuint level = intelImage->base.Base.Level;
641    int width, height, depth;
642
643    /* glTexImage* choose the texture object based on the target passed in, and
644     * objects can't change targets over their lifetimes, so this should be
645     * true.
646     */
647    assert(target_to_target(image->TexObject->Target) == mt->target);
648
649    gl_format mt_format = mt->format;
650    if (mt->format == MESA_FORMAT_X8_Z24 && mt->stencil_mt)
651       mt_format = MESA_FORMAT_S8_Z24;
652    if (mt->format == MESA_FORMAT_Z32_FLOAT && mt->stencil_mt)
653       mt_format = MESA_FORMAT_Z32_FLOAT_X24S8;
654    if (mt->etc_format != MESA_FORMAT_NONE)
655       mt_format = mt->etc_format;
656
657    if (image->TexFormat != mt_format)
658       return false;
659
660    intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
661
662    if (mt->target == GL_TEXTURE_CUBE_MAP)
663       depth = 6;
664
665    /* Test image dimensions against the base level image adjusted for
666     * minification.  This will also catch images not present in the
667     * tree, changed targets, etc.
668     */
669    if (mt->target == GL_TEXTURE_2D_MULTISAMPLE ||
670          mt->target == GL_TEXTURE_2D_MULTISAMPLE_ARRAY) {
671       /* nonzero level here is always bogus */
672       assert(level == 0);
673
674       if (width != mt->logical_width0 ||
675             height != mt->logical_height0 ||
676             depth != mt->logical_depth0) {
677          return false;
678       }
679    }
680    else {
681       /* all normal textures, renderbuffers, etc */
682       if (width != mt->level[level].width ||
683           height != mt->level[level].height ||
684           depth != mt->level[level].depth) {
685          return false;
686       }
687    }
688
689    if (image->NumSamples != mt->num_samples)
690       return false;
691
692    return true;
693 }
694
695
696 void
697 intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
698                              GLuint level,
699                              GLuint x, GLuint y,
700                              GLuint w, GLuint h, GLuint d)
701 {
702    mt->level[level].width = w;
703    mt->level[level].height = h;
704    mt->level[level].depth = d;
705    mt->level[level].level_x = x;
706    mt->level[level].level_y = y;
707
708    DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__,
709        level, w, h, d, x, y);
710
711    assert(mt->level[level].slice == NULL);
712
713    mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice));
714    mt->level[level].slice[0].x_offset = mt->level[level].level_x;
715    mt->level[level].slice[0].y_offset = mt->level[level].level_y;
716 }
717
718
719 void
720 intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
721                                GLuint level, GLuint img,
722                                GLuint x, GLuint y)
723 {
724    if (img == 0 && level == 0)
725       assert(x == 0 && y == 0);
726
727    assert(img < mt->level[level].depth);
728
729    mt->level[level].slice[img].x_offset = mt->level[level].level_x + x;
730    mt->level[level].slice[img].y_offset = mt->level[level].level_y + y;
731
732    DBG("%s level %d img %d pos %d,%d\n",
733        __FUNCTION__, level, img,
734        mt->level[level].slice[img].x_offset,
735        mt->level[level].slice[img].y_offset);
736 }
737
738 void
739 intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
740                                GLuint level, GLuint slice,
741                                GLuint *x, GLuint *y)
742 {
743    assert(slice < mt->level[level].depth);
744
745    *x = mt->level[level].slice[slice].x_offset;
746    *y = mt->level[level].slice[slice].y_offset;
747 }
748
749 void
750 intel_miptree_get_tile_offsets(struct intel_mipmap_tree *mt,
751                                GLuint level, GLuint slice,
752                                uint32_t *tile_x,
753                                uint32_t *tile_y)
754 {
755    struct intel_region *region = mt->region;
756    uint32_t mask_x, mask_y;
757
758    intel_region_get_tile_masks(region, &mask_x, &mask_y, false);
759
760    *tile_x = mt->level[level].slice[slice].x_offset & mask_x;
761    *tile_y = mt->level[level].slice[slice].y_offset & mask_y;
762 }
763
764 static void
765 intel_miptree_copy_slice_sw(struct intel_context *intel,
766                             struct intel_mipmap_tree *dst_mt,
767                             struct intel_mipmap_tree *src_mt,
768                             int level,
769                             int slice,
770                             int width,
771                             int height)
772 {
773    void *src, *dst;
774    int src_stride, dst_stride;
775    int cpp = dst_mt->cpp;
776
777    intel_miptree_map(intel, src_mt,
778                      level, slice,
779                      0, 0,
780                      width, height,
781                      GL_MAP_READ_BIT | BRW_MAP_DIRECT_BIT,
782                      &src, &src_stride);
783
784    intel_miptree_map(intel, dst_mt,
785                      level, slice,
786                      0, 0,
787                      width, height,
788                      GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT |
789                      BRW_MAP_DIRECT_BIT,
790                      &dst, &dst_stride);
791
792    DBG("sw blit %s mt %p %p/%d -> %s mt %p %p/%d (%dx%d)\n",
793        _mesa_get_format_name(src_mt->format),
794        src_mt, src, src_stride,
795        _mesa_get_format_name(dst_mt->format),
796        dst_mt, dst, dst_stride,
797        width, height);
798
799    int row_size = cpp * width;
800    if (src_stride == row_size &&
801        dst_stride == row_size) {
802       memcpy(dst, src, row_size * height);
803    } else {
804       for (int i = 0; i < height; i++) {
805          memcpy(dst, src, row_size);
806          dst += dst_stride;
807          src += src_stride;
808       }
809    }
810
811    intel_miptree_unmap(intel, dst_mt, level, slice);
812    intel_miptree_unmap(intel, src_mt, level, slice);
813
814    /* Don't forget to copy the stencil data over, too.  We could have skipped
815     * passing BRW_MAP_DIRECT_BIT, but that would have meant intel_miptree_map
816     * shuffling the two data sources in/out of temporary storage instead of
817     * the direct mapping we get this way.
818     */
819    if (dst_mt->stencil_mt) {
820       assert(src_mt->stencil_mt);
821       intel_miptree_copy_slice_sw(intel, dst_mt->stencil_mt, src_mt->stencil_mt,
822                                   level, slice, width, height);
823    }
824 }
825
826 static void
827 intel_miptree_copy_slice(struct intel_context *intel,
828                          struct intel_mipmap_tree *dst_mt,
829                          struct intel_mipmap_tree *src_mt,
830                          int level,
831                          int face,
832                          int depth)
833
834 {
835    gl_format format = src_mt->format;
836    uint32_t width = src_mt->level[level].width;
837    uint32_t height = src_mt->level[level].height;
838    int slice;
839
840    if (face > 0)
841       slice = face;
842    else
843       slice = depth;
844
845    assert(depth < src_mt->level[level].depth);
846    assert(src_mt->format == dst_mt->format);
847
848    if (dst_mt->compressed) {
849       height = ALIGN(height, dst_mt->align_h) / dst_mt->align_h;
850       width = ALIGN(width, dst_mt->align_w);
851    }
852
853    /* If it's a packed depth/stencil buffer with separate stencil, the blit
854     * below won't apply since we can't do the depth's Y tiling or the
855     * stencil's W tiling in the blitter.
856     */
857    if (src_mt->stencil_mt) {
858       intel_miptree_copy_slice_sw(intel,
859                                   dst_mt, src_mt,
860                                   level, slice,
861                                   width, height);
862       return;
863    }
864
865    uint32_t dst_x, dst_y, src_x, src_y;
866    intel_miptree_get_image_offset(dst_mt, level, slice, &dst_x, &dst_y);
867    intel_miptree_get_image_offset(src_mt, level, slice, &src_x, &src_y);
868
869    DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n",
870        _mesa_get_format_name(src_mt->format),
871        src_mt, src_x, src_y, src_mt->region->pitch,
872        _mesa_get_format_name(dst_mt->format),
873        dst_mt, dst_x, dst_y, dst_mt->region->pitch,
874        width, height);
875
876    if (!intelEmitCopyBlit(intel,
877                           dst_mt->region->cpp,
878                           src_mt->region->pitch, src_mt->region->bo,
879                           0, src_mt->region->tiling,
880                           dst_mt->region->pitch, dst_mt->region->bo,
881                           0, dst_mt->region->tiling,
882                           src_x, src_y,
883                           dst_x, dst_y,
884                           width, height,
885                           GL_COPY)) {
886
887       perf_debug("miptree validate blit for %s failed\n",
888                  _mesa_get_format_name(format));
889
890       intel_miptree_copy_slice_sw(intel, dst_mt, src_mt, level, slice,
891                                   width, height);
892    }
893 }
894
895 /**
896  * Copies the image's current data to the given miptree, and associates that
897  * miptree with the image.
898  *
899  * If \c invalidate is true, then the actual image data does not need to be
900  * copied, but the image still needs to be associated to the new miptree (this
901  * is set to true if we're about to clear the image).
902  */
903 void
904 intel_miptree_copy_teximage(struct intel_context *intel,
905                             struct intel_texture_image *intelImage,
906                             struct intel_mipmap_tree *dst_mt,
907                             bool invalidate)
908 {
909    struct intel_mipmap_tree *src_mt = intelImage->mt;
910    struct intel_texture_object *intel_obj =
911       intel_texture_object(intelImage->base.Base.TexObject);
912    int level = intelImage->base.Base.Level;
913    int face = intelImage->base.Base.Face;
914    GLuint depth = intelImage->base.Base.Depth;
915
916    if (!invalidate) {
917       for (int slice = 0; slice < depth; slice++) {
918          intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
919       }
920    }
921
922    intel_miptree_reference(&intelImage->mt, dst_mt);
923    intel_obj->needs_validate = true;
924 }
925
926 bool
927 intel_miptree_alloc_mcs(struct intel_context *intel,
928                         struct intel_mipmap_tree *mt,
929                         GLuint num_samples)
930 {
931    assert(mt->mcs_mt == NULL);
932    assert(intel->gen >= 7); /* MCS only used on Gen7+ */
933
934    /* Choose the correct format for the MCS buffer.  All that really matters
935     * is that we allocate the right buffer size, since we'll always be
936     * accessing this miptree using MCS-specific hardware mechanisms, which
937     * infer the correct format based on num_samples.
938     */
939    gl_format format;
940    switch (num_samples) {
941    case 4:
942       /* 8 bits/pixel are required for MCS data when using 4x MSAA (2 bits for
943        * each sample).
944        */
945       format = MESA_FORMAT_R8;
946       break;
947    case 8:
948       /* 32 bits/pixel are required for MCS data when using 8x MSAA (3 bits
949        * for each sample, plus 8 padding bits).
950        */
951       format = MESA_FORMAT_R_UINT32;
952       break;
953    default:
954       assert(!"Unrecognized sample count in intel_miptree_alloc_mcs");
955       break;
956    };
957
958    /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
959     *
960     *     "The MCS surface must be stored as Tile Y."
961     */
962    mt->mcs_mt = intel_miptree_create(intel,
963                                      mt->target,
964                                      format,
965                                      mt->first_level,
966                                      mt->last_level,
967                                      mt->logical_width0,
968                                      mt->logical_height0,
969                                      mt->logical_depth0,
970                                      true,
971                                      0 /* num_samples */,
972                                      true /* force_y_tiling */);
973
974    /* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
975     *
976     *     When MCS buffer is enabled and bound to MSRT, it is required that it
977     *     is cleared prior to any rendering.
978     *
979     * Since we don't use the MCS buffer for any purpose other than rendering,
980     * it makes sense to just clear it immediately upon allocation.
981     *
982     * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
983     */
984    void *data = intel_miptree_map_raw(intel, mt->mcs_mt);
985    memset(data, 0xff, mt->mcs_mt->region->bo->size);
986    intel_miptree_unmap_raw(intel, mt->mcs_mt);
987
988    return mt->mcs_mt;
989 }
990
991 /**
992  * Helper for intel_miptree_alloc_hiz() that sets
993  * \c mt->level[level].slice[layer].has_hiz. Return true if and only if
994  * \c has_hiz was set.
995  */
996 static bool
997 intel_miptree_slice_enable_hiz(struct intel_context *intel,
998                                struct intel_mipmap_tree *mt,
999                                uint32_t level,
1000                                uint32_t layer)
1001 {
1002    assert(mt->hiz_mt);
1003
1004    if (intel->is_haswell) {
1005       /* Disable HiZ for some slices to work around a hardware bug.
1006        *
1007        * Haswell hardware fails to respect
1008        * 3DSTATE_DEPTH_BUFFER.Depth_Coordinate_Offset_X/Y when during HiZ
1009        * ambiguate operations.  The failure is inconsistent and affected by
1010        * other GPU contexts. Running a heavy GPU workload in a separate
1011        * process causes the failure rate to drop to nearly 0.
1012        *
1013        * To workaround the bug, we enable HiZ only when we can guarantee that
1014        * the Depth Coordinate Offset fields will be set to 0. The function
1015        * brw_get_depthstencil_tile_masks() is used to calculate the fields,
1016        * and the function is sometimes called in such a way that the presence
1017        * of an attached stencil buffer changes the fuction's return value.
1018        *
1019        * The largest tile size considered by brw_get_depthstencil_tile_masks()
1020        * is that of the stencil buffer. Therefore, if this hiz slice's
1021        * corresponding depth slice has an offset that is aligned to the
1022        * stencil buffer tile size, 64x64 pixels, then
1023        * 3DSTATE_DEPTH_BUFFER.Depth_Coordinate_Offset_X/Y is set to 0.
1024        */
1025       uint32_t depth_x_offset = mt->level[level].slice[layer].x_offset;
1026       uint32_t depth_y_offset = mt->level[level].slice[layer].y_offset;
1027       if ((depth_x_offset & 63) || (depth_y_offset & 63)) {
1028          return false;
1029       }
1030    }
1031
1032    mt->level[level].slice[layer].has_hiz = true;
1033    return true;
1034 }
1035
1036
1037
1038 bool
1039 intel_miptree_alloc_hiz(struct intel_context *intel,
1040                         struct intel_mipmap_tree *mt,
1041                         GLuint num_samples)
1042 {
1043    assert(mt->hiz_mt == NULL);
1044    mt->hiz_mt = intel_miptree_create(intel,
1045                                      mt->target,
1046                                      mt->format,
1047                                      mt->first_level,
1048                                      mt->last_level,
1049                                      mt->logical_width0,
1050                                      mt->logical_height0,
1051                                      mt->logical_depth0,
1052                                      true,
1053                                      num_samples,
1054                                      false /* force_y_tiling */);
1055
1056    if (!mt->hiz_mt)
1057       return false;
1058
1059    /* Mark that all slices need a HiZ resolve. */
1060    struct intel_resolve_map *head = &mt->hiz_map;
1061    for (int level = mt->first_level; level <= mt->last_level; ++level) {
1062       for (int layer = 0; layer < mt->level[level].depth; ++layer) {
1063          if (!intel_miptree_slice_enable_hiz(intel, mt, level, layer))
1064             continue;
1065
1066          head->next = malloc(sizeof(*head->next));
1067          head->next->prev = head;
1068          head->next->next = NULL;
1069          head = head->next;
1070
1071          head->level = level;
1072          head->layer = layer;
1073          head->need = GEN6_HIZ_OP_HIZ_RESOLVE;
1074       }
1075    }
1076
1077    return true;
1078 }
1079
1080 /**
1081  * Does the miptree slice have hiz enabled?
1082  */
1083 bool
1084 intel_miptree_slice_has_hiz(struct intel_mipmap_tree *mt,
1085                             uint32_t level,
1086                             uint32_t layer)
1087 {
1088    intel_miptree_check_level_layer(mt, level, layer);
1089    return mt->level[level].slice[layer].has_hiz;
1090 }
1091
1092 void
1093 intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree *mt,
1094                                           uint32_t level,
1095                                           uint32_t layer)
1096 {
1097    if (!intel_miptree_slice_has_hiz(mt, level, layer))
1098       return;
1099
1100    intel_resolve_map_set(&mt->hiz_map,
1101                          level, layer, GEN6_HIZ_OP_HIZ_RESOLVE);
1102 }
1103
1104
1105 void
1106 intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree *mt,
1107                                             uint32_t level,
1108                                             uint32_t layer)
1109 {
1110    if (!intel_miptree_slice_has_hiz(mt, level, layer))
1111       return;
1112
1113    intel_resolve_map_set(&mt->hiz_map,
1114                          level, layer, GEN6_HIZ_OP_DEPTH_RESOLVE);
1115 }
1116
1117 static bool
1118 intel_miptree_slice_resolve(struct intel_context *intel,
1119                             struct intel_mipmap_tree *mt,
1120                             uint32_t level,
1121                             uint32_t layer,
1122                             enum gen6_hiz_op need)
1123 {
1124    intel_miptree_check_level_layer(mt, level, layer);
1125
1126    struct intel_resolve_map *item =
1127          intel_resolve_map_get(&mt->hiz_map, level, layer);
1128
1129    if (!item || item->need != need)
1130       return false;
1131
1132    intel_hiz_exec(intel, mt, level, layer, need);
1133    intel_resolve_map_remove(item);
1134    return true;
1135 }
1136
1137 bool
1138 intel_miptree_slice_resolve_hiz(struct intel_context *intel,
1139                                 struct intel_mipmap_tree *mt,
1140                                 uint32_t level,
1141                                 uint32_t layer)
1142 {
1143    return intel_miptree_slice_resolve(intel, mt, level, layer,
1144                                       GEN6_HIZ_OP_HIZ_RESOLVE);
1145 }
1146
1147 bool
1148 intel_miptree_slice_resolve_depth(struct intel_context *intel,
1149                                   struct intel_mipmap_tree *mt,
1150                                   uint32_t level,
1151                                   uint32_t layer)
1152 {
1153    return intel_miptree_slice_resolve(intel, mt, level, layer,
1154                                       GEN6_HIZ_OP_DEPTH_RESOLVE);
1155 }
1156
1157 static bool
1158 intel_miptree_all_slices_resolve(struct intel_context *intel,
1159                                  struct intel_mipmap_tree *mt,
1160                                  enum gen6_hiz_op need)
1161 {
1162    bool did_resolve = false;
1163    struct intel_resolve_map *i, *next;
1164
1165    for (i = mt->hiz_map.next; i; i = next) {
1166       next = i->next;
1167       if (i->need != need)
1168          continue;
1169
1170       intel_hiz_exec(intel, mt, i->level, i->layer, need);
1171       intel_resolve_map_remove(i);
1172       did_resolve = true;
1173    }
1174
1175    return did_resolve;
1176 }
1177
1178 bool
1179 intel_miptree_all_slices_resolve_hiz(struct intel_context *intel,
1180                                      struct intel_mipmap_tree *mt)
1181 {
1182    return intel_miptree_all_slices_resolve(intel, mt,
1183                                            GEN6_HIZ_OP_HIZ_RESOLVE);
1184 }
1185
1186 bool
1187 intel_miptree_all_slices_resolve_depth(struct intel_context *intel,
1188                                        struct intel_mipmap_tree *mt)
1189 {
1190    return intel_miptree_all_slices_resolve(intel, mt,
1191                                            GEN6_HIZ_OP_DEPTH_RESOLVE);
1192 }
1193
1194 static void
1195 intel_miptree_updownsample(struct intel_context *intel,
1196                            struct intel_mipmap_tree *src,
1197                            struct intel_mipmap_tree *dst,
1198                            unsigned width,
1199                            unsigned height)
1200 {
1201 #ifndef I915
1202    int src_x0 = 0;
1203    int src_y0 = 0;
1204    int dst_x0 = 0;
1205    int dst_y0 = 0;
1206
1207    intel_miptree_slice_resolve_depth(intel, src, 0, 0);
1208    intel_miptree_slice_resolve_depth(intel, dst, 0, 0);
1209
1210    brw_blorp_blit_miptrees(intel,
1211                            src, 0 /* level */, 0 /* layer */,
1212                            dst, 0 /* level */, 0 /* layer */,
1213                            src_x0, src_y0,
1214                            dst_x0, dst_y0,
1215                            width, height,
1216                            false, false /*mirror x, y*/);
1217
1218    if (src->stencil_mt) {
1219       brw_blorp_blit_miptrees(intel,
1220                               src->stencil_mt, 0 /* level */, 0 /* layer */,
1221                               dst->stencil_mt, 0 /* level */, 0 /* layer */,
1222                               src_x0, src_y0,
1223                               dst_x0, dst_y0,
1224                               width, height,
1225                               false, false /*mirror x, y*/);
1226    }
1227 #endif /* I915 */
1228 }
1229
1230 static void
1231 assert_is_flat(struct intel_mipmap_tree *mt)
1232 {
1233    assert(mt->target == GL_TEXTURE_2D);
1234    assert(mt->first_level == 0);
1235    assert(mt->last_level == 0);
1236 }
1237
1238 /**
1239  * \brief Downsample from mt to mt->singlesample_mt.
1240  *
1241  * If the miptree needs no downsample, then skip.
1242  */
1243 void
1244 intel_miptree_downsample(struct intel_context *intel,
1245                          struct intel_mipmap_tree *mt)
1246 {
1247    /* Only flat, renderbuffer-like miptrees are supported. */
1248    assert_is_flat(mt);
1249
1250    if (!mt->need_downsample)
1251       return;
1252    intel_miptree_updownsample(intel,
1253                               mt, mt->singlesample_mt,
1254                               mt->logical_width0,
1255                               mt->logical_height0);
1256    mt->need_downsample = false;
1257
1258    /* Strictly speaking, after a downsample on a depth miptree, a hiz
1259     * resolve is needed on the singlesample miptree. However, since the
1260     * singlesample miptree is never rendered to, the hiz resolve will never
1261     * occur. Therefore we do not mark the needed hiz resolve after
1262     * downsampling.
1263     */
1264 }
1265
1266 /**
1267  * \brief Upsample from mt->singlesample_mt to mt.
1268  *
1269  * The upsample is done unconditionally.
1270  */
1271 void
1272 intel_miptree_upsample(struct intel_context *intel,
1273                        struct intel_mipmap_tree *mt)
1274 {
1275    /* Only flat, renderbuffer-like miptrees are supported. */
1276    assert_is_flat(mt);
1277    assert(!mt->need_downsample);
1278
1279    intel_miptree_updownsample(intel,
1280                               mt->singlesample_mt, mt,
1281                               mt->logical_width0,
1282                               mt->logical_height0);
1283    intel_miptree_slice_set_needs_hiz_resolve(mt, 0, 0);
1284 }
1285
1286 void *
1287 intel_miptree_map_raw(struct intel_context *intel, struct intel_mipmap_tree *mt)
1288 {
1289    drm_intel_bo *bo = mt->region->bo;
1290
1291    if (unlikely(INTEL_DEBUG & DEBUG_PERF)) {
1292       if (drm_intel_bo_busy(bo)) {
1293          perf_debug("Mapping a busy BO, causing a stall on the GPU.\n");
1294       }
1295    }
1296
1297    intel_flush(&intel->ctx);
1298
1299    if (mt->region->tiling != I915_TILING_NONE)
1300       drm_intel_gem_bo_map_gtt(bo);
1301    else
1302       drm_intel_bo_map(bo, true);
1303
1304    return bo->virtual;
1305 }
1306
1307 void
1308 intel_miptree_unmap_raw(struct intel_context *intel,
1309                         struct intel_mipmap_tree *mt)
1310 {
1311    drm_intel_bo_unmap(mt->region->bo);
1312 }
1313
1314 static void
1315 intel_miptree_map_gtt(struct intel_context *intel,
1316                       struct intel_mipmap_tree *mt,
1317                       struct intel_miptree_map *map,
1318                       unsigned int level, unsigned int slice)
1319 {
1320    unsigned int bw, bh;
1321    void *base;
1322    unsigned int image_x, image_y;
1323    int x = map->x;
1324    int y = map->y;
1325
1326    /* For compressed formats, the stride is the number of bytes per
1327     * row of blocks.  intel_miptree_get_image_offset() already does
1328     * the divide.
1329     */
1330    _mesa_get_format_block_size(mt->format, &bw, &bh);
1331    assert(y % bh == 0);
1332    y /= bh;
1333
1334    base = intel_miptree_map_raw(intel, mt) + mt->offset;
1335
1336    if (base == NULL)
1337       map->ptr = NULL;
1338    else {
1339       /* Note that in the case of cube maps, the caller must have passed the
1340        * slice number referencing the face.
1341       */
1342       intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
1343       x += image_x;
1344       y += image_y;
1345
1346       map->stride = mt->region->pitch;
1347       map->ptr = base + y * map->stride + x * mt->cpp;
1348    }
1349
1350    DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
1351        map->x, map->y, map->w, map->h,
1352        mt, _mesa_get_format_name(mt->format),
1353        x, y, map->ptr, map->stride);
1354 }
1355
1356 static void
1357 intel_miptree_unmap_gtt(struct intel_context *intel,
1358                         struct intel_mipmap_tree *mt,
1359                         struct intel_miptree_map *map,
1360                         unsigned int level,
1361                         unsigned int slice)
1362 {
1363    intel_miptree_unmap_raw(intel, mt);
1364 }
1365
1366 static void
1367 intel_miptree_map_blit(struct intel_context *intel,
1368                        struct intel_mipmap_tree *mt,
1369                        struct intel_miptree_map *map,
1370                        unsigned int level, unsigned int slice)
1371 {
1372    unsigned int image_x, image_y;
1373    int x = map->x;
1374    int y = map->y;
1375    int ret;
1376
1377    /* The blitter requires the pitch to be aligned to 4. */
1378    map->stride = ALIGN(map->w * mt->region->cpp, 4);
1379
1380    map->bo = drm_intel_bo_alloc(intel->bufmgr, "intel_miptree_map_blit() temp",
1381                                 map->stride * map->h, 4096);
1382    if (!map->bo) {
1383       fprintf(stderr, "Failed to allocate blit temporary\n");
1384       goto fail;
1385    }
1386
1387    intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
1388    x += image_x;
1389    y += image_y;
1390
1391    if (!intelEmitCopyBlit(intel,
1392                           mt->region->cpp,
1393                           mt->region->pitch, mt->region->bo,
1394                           mt->offset, mt->region->tiling,
1395                           map->stride, map->bo,
1396                           0, I915_TILING_NONE,
1397                           x, y,
1398                           0, 0,
1399                           map->w, map->h,
1400                           GL_COPY)) {
1401       fprintf(stderr, "Failed to blit\n");
1402       goto fail;
1403    }
1404
1405    intel_batchbuffer_flush(intel);
1406    ret = drm_intel_bo_map(map->bo, (map->mode & GL_MAP_WRITE_BIT) != 0);
1407    if (ret) {
1408       fprintf(stderr, "Failed to map blit temporary\n");
1409       goto fail;
1410    }
1411
1412    map->ptr = map->bo->virtual;
1413
1414    DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
1415        map->x, map->y, map->w, map->h,
1416        mt, _mesa_get_format_name(mt->format),
1417        x, y, map->ptr, map->stride);
1418
1419    return;
1420
1421 fail:
1422    drm_intel_bo_unreference(map->bo);
1423    map->ptr = NULL;
1424    map->stride = 0;
1425 }
1426
1427 static void
1428 intel_miptree_unmap_blit(struct intel_context *intel,
1429                          struct intel_mipmap_tree *mt,
1430                          struct intel_miptree_map *map,
1431                          unsigned int level,
1432                          unsigned int slice)
1433 {
1434    struct gl_context *ctx = &intel->ctx;
1435    drm_intel_bo_unmap(map->bo);
1436
1437    if (map->mode & GL_MAP_WRITE_BIT) {
1438       unsigned int image_x, image_y;
1439       int x = map->x;
1440       int y = map->y;
1441       intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
1442       x += image_x;
1443       y += image_y;
1444
1445       bool ok = intelEmitCopyBlit(intel,
1446                                   mt->region->cpp,
1447                                   map->stride, map->bo,
1448                                   0, I915_TILING_NONE,
1449                                   mt->region->pitch, mt->region->bo,
1450                                   mt->offset, mt->region->tiling,
1451                                   0, 0,
1452                                   x, y,
1453                                   map->w, map->h,
1454                                   GL_COPY);
1455       WARN_ONCE(!ok, "Failed to blit from linear temporary mapping");
1456    }
1457
1458    drm_intel_bo_unreference(map->bo);
1459 }
1460
1461 static void
1462 intel_miptree_map_s8(struct intel_context *intel,
1463                      struct intel_mipmap_tree *mt,
1464                      struct intel_miptree_map *map,
1465                      unsigned int level, unsigned int slice)
1466 {
1467    map->stride = map->w;
1468    map->buffer = map->ptr = malloc(map->stride * map->h);
1469    if (!map->buffer)
1470       return;
1471
1472    /* One of either READ_BIT or WRITE_BIT or both is set.  READ_BIT implies no
1473     * INVALIDATE_RANGE_BIT.  WRITE_BIT needs the original values read in unless
1474     * invalidate is set, since we'll be writing the whole rectangle from our
1475     * temporary buffer back out.
1476     */
1477    if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
1478       uint8_t *untiled_s8_map = map->ptr;
1479       uint8_t *tiled_s8_map = intel_miptree_map_raw(intel, mt);
1480       unsigned int image_x, image_y;
1481
1482       intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
1483
1484       for (uint32_t y = 0; y < map->h; y++) {
1485          for (uint32_t x = 0; x < map->w; x++) {
1486             ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
1487                                                x + image_x + map->x,
1488                                                y + image_y + map->y,
1489                                                intel->has_swizzling);
1490             untiled_s8_map[y * map->w + x] = tiled_s8_map[offset];
1491          }
1492       }
1493
1494       intel_miptree_unmap_raw(intel, mt);
1495
1496       DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__,
1497           map->x, map->y, map->w, map->h,
1498           mt, map->x + image_x, map->y + image_y, map->ptr, map->stride);
1499    } else {
1500       DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
1501           map->x, map->y, map->w, map->h,
1502           mt, map->ptr, map->stride);
1503    }
1504 }
1505
1506 static void
1507 intel_miptree_unmap_s8(struct intel_context *intel,
1508                        struct intel_mipmap_tree *mt,
1509                        struct intel_miptree_map *map,
1510                        unsigned int level,
1511                        unsigned int slice)
1512 {
1513    if (map->mode & GL_MAP_WRITE_BIT) {
1514       unsigned int image_x, image_y;
1515       uint8_t *untiled_s8_map = map->ptr;
1516       uint8_t *tiled_s8_map = intel_miptree_map_raw(intel, mt);
1517
1518       intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
1519
1520       for (uint32_t y = 0; y < map->h; y++) {
1521          for (uint32_t x = 0; x < map->w; x++) {
1522             ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
1523                                                x + map->x,
1524                                                y + map->y,
1525                                                intel->has_swizzling);
1526             tiled_s8_map[offset] = untiled_s8_map[y * map->w + x];
1527          }
1528       }
1529
1530       intel_miptree_unmap_raw(intel, mt);
1531    }
1532
1533    free(map->buffer);
1534 }
1535
1536 static void
1537 intel_miptree_map_etc(struct intel_context *intel,
1538                       struct intel_mipmap_tree *mt,
1539                       struct intel_miptree_map *map,
1540                       unsigned int level,
1541                       unsigned int slice)
1542 {
1543    assert(mt->etc_format != MESA_FORMAT_NONE);
1544    if (mt->etc_format == MESA_FORMAT_ETC1_RGB8) {
1545       assert(mt->format == MESA_FORMAT_RGBX8888_REV);
1546    }
1547
1548    assert(map->mode & GL_MAP_WRITE_BIT);
1549    assert(map->mode & GL_MAP_INVALIDATE_RANGE_BIT);
1550
1551    map->stride = _mesa_format_row_stride(mt->etc_format, map->w);
1552    map->buffer = malloc(_mesa_format_image_size(mt->etc_format,
1553                                                 map->w, map->h, 1));
1554    map->ptr = map->buffer;
1555 }
1556
1557 static void
1558 intel_miptree_unmap_etc(struct intel_context *intel,
1559                         struct intel_mipmap_tree *mt,
1560                         struct intel_miptree_map *map,
1561                         unsigned int level,
1562                         unsigned int slice)
1563 {
1564    uint32_t image_x;
1565    uint32_t image_y;
1566    intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
1567
1568    image_x += map->x;
1569    image_y += map->y;
1570
1571    uint8_t *dst = intel_miptree_map_raw(intel, mt)
1572                 + image_y * mt->region->pitch
1573                 + image_x * mt->region->cpp;
1574
1575    if (mt->etc_format == MESA_FORMAT_ETC1_RGB8)
1576       _mesa_etc1_unpack_rgba8888(dst, mt->region->pitch,
1577                                  map->ptr, map->stride,
1578                                  map->w, map->h);
1579    else
1580       _mesa_unpack_etc2_format(dst, mt->region->pitch,
1581                                map->ptr, map->stride,
1582                                map->w, map->h, mt->etc_format);
1583
1584    intel_miptree_unmap_raw(intel, mt);
1585    free(map->buffer);
1586 }
1587
1588 /**
1589  * Mapping function for packed depth/stencil miptrees backed by real separate
1590  * miptrees for depth and stencil.
1591  *
1592  * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
1593  * separate from the depth buffer.  Yet at the GL API level, we have to expose
1594  * packed depth/stencil textures and FBO attachments, and Mesa core expects to
1595  * be able to map that memory for texture storage and glReadPixels-type
1596  * operations.  We give Mesa core that access by mallocing a temporary and
1597  * copying the data between the actual backing store and the temporary.
1598  */
1599 static void
1600 intel_miptree_map_depthstencil(struct intel_context *intel,
1601                                struct intel_mipmap_tree *mt,
1602                                struct intel_miptree_map *map,
1603                                unsigned int level, unsigned int slice)
1604 {
1605    struct intel_mipmap_tree *z_mt = mt;
1606    struct intel_mipmap_tree *s_mt = mt->stencil_mt;
1607    bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
1608    int packed_bpp = map_z32f_x24s8 ? 8 : 4;
1609
1610    map->stride = map->w * packed_bpp;
1611    map->buffer = map->ptr = malloc(map->stride * map->h);
1612    if (!map->buffer)
1613       return;
1614
1615    /* One of either READ_BIT or WRITE_BIT or both is set.  READ_BIT implies no
1616     * INVALIDATE_RANGE_BIT.  WRITE_BIT needs the original values read in unless
1617     * invalidate is set, since we'll be writing the whole rectangle from our
1618     * temporary buffer back out.
1619     */
1620    if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
1621       uint32_t *packed_map = map->ptr;
1622       uint8_t *s_map = intel_miptree_map_raw(intel, s_mt);
1623       uint32_t *z_map = intel_miptree_map_raw(intel, z_mt);
1624       unsigned int s_image_x, s_image_y;
1625       unsigned int z_image_x, z_image_y;
1626
1627       intel_miptree_get_image_offset(s_mt, level, slice,
1628                                      &s_image_x, &s_image_y);
1629       intel_miptree_get_image_offset(z_mt, level, slice,
1630                                      &z_image_x, &z_image_y);
1631
1632       for (uint32_t y = 0; y < map->h; y++) {
1633          for (uint32_t x = 0; x < map->w; x++) {
1634             int map_x = map->x + x, map_y = map->y + y;
1635             ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
1636                                                  map_x + s_image_x,
1637                                                  map_y + s_image_y,
1638                                                  intel->has_swizzling);
1639             ptrdiff_t z_offset = ((map_y + z_image_y) *
1640                                   (z_mt->region->pitch / 4) +
1641                                   (map_x + z_image_x));
1642             uint8_t s = s_map[s_offset];
1643             uint32_t z = z_map[z_offset];
1644
1645             if (map_z32f_x24s8) {
1646                packed_map[(y * map->w + x) * 2 + 0] = z;
1647                packed_map[(y * map->w + x) * 2 + 1] = s;
1648             } else {
1649                packed_map[y * map->w + x] = (s << 24) | (z & 0x00ffffff);
1650             }
1651          }
1652       }
1653
1654       intel_miptree_unmap_raw(intel, s_mt);
1655       intel_miptree_unmap_raw(intel, z_mt);
1656
1657       DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
1658           __FUNCTION__,
1659           map->x, map->y, map->w, map->h,
1660           z_mt, map->x + z_image_x, map->y + z_image_y,
1661           s_mt, map->x + s_image_x, map->y + s_image_y,
1662           map->ptr, map->stride);
1663    } else {
1664       DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
1665           map->x, map->y, map->w, map->h,
1666           mt, map->ptr, map->stride);
1667    }
1668 }
1669
1670 static void
1671 intel_miptree_unmap_depthstencil(struct intel_context *intel,
1672                                  struct intel_mipmap_tree *mt,
1673                                  struct intel_miptree_map *map,
1674                                  unsigned int level,
1675                                  unsigned int slice)
1676 {
1677    struct intel_mipmap_tree *z_mt = mt;
1678    struct intel_mipmap_tree *s_mt = mt->stencil_mt;
1679    bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
1680
1681    if (map->mode & GL_MAP_WRITE_BIT) {
1682       uint32_t *packed_map = map->ptr;
1683       uint8_t *s_map = intel_miptree_map_raw(intel, s_mt);
1684       uint32_t *z_map = intel_miptree_map_raw(intel, z_mt);
1685       unsigned int s_image_x, s_image_y;
1686       unsigned int z_image_x, z_image_y;
1687
1688       intel_miptree_get_image_offset(s_mt, level, slice,
1689                                      &s_image_x, &s_image_y);
1690       intel_miptree_get_image_offset(z_mt, level, slice,
1691                                      &z_image_x, &z_image_y);
1692
1693       for (uint32_t y = 0; y < map->h; y++) {
1694          for (uint32_t x = 0; x < map->w; x++) {
1695             ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
1696                                                  x + s_image_x + map->x,
1697                                                  y + s_image_y + map->y,
1698                                                  intel->has_swizzling);
1699             ptrdiff_t z_offset = ((y + z_image_y) *
1700                                   (z_mt->region->pitch / 4) +
1701                                   (x + z_image_x));
1702
1703             if (map_z32f_x24s8) {
1704                z_map[z_offset] = packed_map[(y * map->w + x) * 2 + 0];
1705                s_map[s_offset] = packed_map[(y * map->w + x) * 2 + 1];
1706             } else {
1707                uint32_t packed = packed_map[y * map->w + x];
1708                s_map[s_offset] = packed >> 24;
1709                z_map[z_offset] = packed;
1710             }
1711          }
1712       }
1713
1714       intel_miptree_unmap_raw(intel, s_mt);
1715       intel_miptree_unmap_raw(intel, z_mt);
1716
1717       DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
1718           __FUNCTION__,
1719           map->x, map->y, map->w, map->h,
1720           z_mt, _mesa_get_format_name(z_mt->format),
1721           map->x + z_image_x, map->y + z_image_y,
1722           s_mt, map->x + s_image_x, map->y + s_image_y,
1723           map->ptr, map->stride);
1724    }
1725
1726    free(map->buffer);
1727 }
1728
1729 /**
1730  * Create and attach a map to the miptree at (level, slice). Return the
1731  * attached map.
1732  */
1733 static struct intel_miptree_map*
1734 intel_miptree_attach_map(struct intel_mipmap_tree *mt,
1735                          unsigned int level,
1736                          unsigned int slice,
1737                          unsigned int x,
1738                          unsigned int y,
1739                          unsigned int w,
1740                          unsigned int h,
1741                          GLbitfield mode)
1742 {
1743    struct intel_miptree_map *map = calloc(1, sizeof(*map));
1744
1745    if (!map)
1746       return NULL;
1747
1748    assert(mt->level[level].slice[slice].map == NULL);
1749    mt->level[level].slice[slice].map = map;
1750
1751    map->mode = mode;
1752    map->x = x;
1753    map->y = y;
1754    map->w = w;
1755    map->h = h;
1756
1757    return map;
1758 }
1759
1760 /**
1761  * Release the map at (level, slice).
1762  */
1763 static void
1764 intel_miptree_release_map(struct intel_mipmap_tree *mt,
1765                          unsigned int level,
1766                          unsigned int slice)
1767 {
1768    struct intel_miptree_map **map;
1769
1770    map = &mt->level[level].slice[slice].map;
1771    free(*map);
1772    *map = NULL;
1773 }
1774
1775 static void
1776 intel_miptree_map_singlesample(struct intel_context *intel,
1777                                struct intel_mipmap_tree *mt,
1778                                unsigned int level,
1779                                unsigned int slice,
1780                                unsigned int x,
1781                                unsigned int y,
1782                                unsigned int w,
1783                                unsigned int h,
1784                                GLbitfield mode,
1785                                void **out_ptr,
1786                                int *out_stride)
1787 {
1788    struct intel_miptree_map *map;
1789
1790    assert(mt->num_samples <= 1);
1791
1792    map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
1793    if (!map){
1794       *out_ptr = NULL;
1795       *out_stride = 0;
1796       return;
1797    }
1798
1799    intel_miptree_slice_resolve_depth(intel, mt, level, slice);
1800    if (map->mode & GL_MAP_WRITE_BIT) {
1801       intel_miptree_slice_set_needs_hiz_resolve(mt, level, slice);
1802    }
1803
1804    if (mt->format == MESA_FORMAT_S8) {
1805       intel_miptree_map_s8(intel, mt, map, level, slice);
1806    } else if (mt->etc_format != MESA_FORMAT_NONE &&
1807               !(mode & BRW_MAP_DIRECT_BIT)) {
1808       intel_miptree_map_etc(intel, mt, map, level, slice);
1809    } else if (mt->stencil_mt && !(mode & BRW_MAP_DIRECT_BIT)) {
1810       intel_miptree_map_depthstencil(intel, mt, map, level, slice);
1811    }
1812    /* According to the Ivy Bridge PRM, Vol1 Part4, section 1.2.1.2 (Graphics
1813     * Data Size Limitations):
1814     *
1815     *    The BLT engine is capable of transferring very large quantities of
1816     *    graphics data. Any graphics data read from and written to the
1817     *    destination is permitted to represent a number of pixels that
1818     *    occupies up to 65,536 scan lines and up to 32,768 bytes per scan line
1819     *    at the destination. The maximum number of pixels that may be
1820     *    represented per scan line’s worth of graphics data depends on the
1821     *    color depth.
1822     *
1823     * Furthermore, intelEmitCopyBlit (which is called by
1824     * intel_miptree_map_blit) uses a signed 16-bit integer to represent buffer
1825     * pitch, so it can only handle buffer pitches < 32k.
1826     *
1827     * As a result of these two limitations, we can only use
1828     * intel_miptree_map_blit() when the region's pitch is less than 32k.
1829     */
1830    else if (intel->has_llc &&
1831             !(mode & GL_MAP_WRITE_BIT) &&
1832             !mt->compressed &&
1833             mt->region->tiling == I915_TILING_X &&
1834             mt->region->pitch < 32768) {
1835       intel_miptree_map_blit(intel, mt, map, level, slice);
1836    } else if (mt->region->tiling != I915_TILING_NONE &&
1837               mt->region->bo->size >= intel->max_gtt_map_object_size) {
1838       assert(mt->region->pitch < 32768);
1839       intel_miptree_map_blit(intel, mt, map, level, slice);
1840    } else {
1841       intel_miptree_map_gtt(intel, mt, map, level, slice);
1842    }
1843
1844    *out_ptr = map->ptr;
1845    *out_stride = map->stride;
1846
1847    if (map->ptr == NULL)
1848       intel_miptree_release_map(mt, level, slice);
1849 }
1850
1851 static void
1852 intel_miptree_unmap_singlesample(struct intel_context *intel,
1853                                  struct intel_mipmap_tree *mt,
1854                                  unsigned int level,
1855                                  unsigned int slice)
1856 {
1857    struct intel_miptree_map *map = mt->level[level].slice[slice].map;
1858
1859    assert(mt->num_samples <= 1);
1860
1861    if (!map)
1862       return;
1863
1864    DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__,
1865        mt, _mesa_get_format_name(mt->format), level, slice);
1866
1867    if (mt->format == MESA_FORMAT_S8) {
1868       intel_miptree_unmap_s8(intel, mt, map, level, slice);
1869    } else if (mt->etc_format != MESA_FORMAT_NONE &&
1870               !(map->mode & BRW_MAP_DIRECT_BIT)) {
1871       intel_miptree_unmap_etc(intel, mt, map, level, slice);
1872    } else if (mt->stencil_mt && !(map->mode & BRW_MAP_DIRECT_BIT)) {
1873       intel_miptree_unmap_depthstencil(intel, mt, map, level, slice);
1874    } else if (map->bo) {
1875       intel_miptree_unmap_blit(intel, mt, map, level, slice);
1876    } else {
1877       intel_miptree_unmap_gtt(intel, mt, map, level, slice);
1878    }
1879
1880    intel_miptree_release_map(mt, level, slice);
1881 }
1882
1883 static void
1884 intel_miptree_map_multisample(struct intel_context *intel,
1885                               struct intel_mipmap_tree *mt,
1886                               unsigned int level,
1887                               unsigned int slice,
1888                               unsigned int x,
1889                               unsigned int y,
1890                               unsigned int w,
1891                               unsigned int h,
1892                               GLbitfield mode,
1893                               void **out_ptr,
1894                               int *out_stride)
1895 {
1896    struct intel_miptree_map *map;
1897
1898    assert(mt->num_samples > 1);
1899
1900    /* Only flat, renderbuffer-like miptrees are supported. */
1901    if (mt->target != GL_TEXTURE_2D ||
1902        mt->first_level != 0 ||
1903        mt->last_level != 0) {
1904       _mesa_problem(&intel->ctx, "attempt to map a multisample miptree for "
1905                     "which (target, first_level, last_level != "
1906                     "(GL_TEXTURE_2D, 0, 0)");
1907       goto fail;
1908    }
1909
1910    map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
1911    if (!map)
1912       goto fail;
1913
1914    if (!mt->singlesample_mt) {
1915       mt->singlesample_mt =
1916          intel_miptree_create_for_renderbuffer(intel,
1917                                                mt->format,
1918                                                mt->logical_width0,
1919                                                mt->logical_height0,
1920                                                0 /*num_samples*/);
1921       if (!mt->singlesample_mt)
1922          goto fail;
1923
1924       map->singlesample_mt_is_tmp = true;
1925       mt->need_downsample = true;
1926    }
1927
1928    intel_miptree_downsample(intel, mt);
1929    intel_miptree_map_singlesample(intel, mt->singlesample_mt,
1930                                   level, slice,
1931                                   x, y, w, h,
1932                                   mode,
1933                                   out_ptr, out_stride);
1934    return;
1935
1936 fail:
1937    intel_miptree_release_map(mt, level, slice);
1938    *out_ptr = NULL;
1939    *out_stride = 0;
1940 }
1941
1942 static void
1943 intel_miptree_unmap_multisample(struct intel_context *intel,
1944                                 struct intel_mipmap_tree *mt,
1945                                 unsigned int level,
1946                                 unsigned int slice)
1947 {
1948    struct intel_miptree_map *map = mt->level[level].slice[slice].map;
1949
1950    assert(mt->num_samples > 1);
1951
1952    if (!map)
1953       return;
1954
1955    intel_miptree_unmap_singlesample(intel, mt->singlesample_mt, level, slice);
1956
1957    mt->need_downsample = false;
1958    if (map->mode & GL_MAP_WRITE_BIT)
1959       intel_miptree_upsample(intel, mt);
1960
1961    if (map->singlesample_mt_is_tmp)
1962       intel_miptree_release(&mt->singlesample_mt);
1963
1964    intel_miptree_release_map(mt, level, slice);
1965 }
1966
1967 void
1968 intel_miptree_map(struct intel_context *intel,
1969                   struct intel_mipmap_tree *mt,
1970                   unsigned int level,
1971                   unsigned int slice,
1972                   unsigned int x,
1973                   unsigned int y,
1974                   unsigned int w,
1975                   unsigned int h,
1976                   GLbitfield mode,
1977                   void **out_ptr,
1978                   int *out_stride)
1979 {
1980    if (mt->num_samples <= 1)
1981       intel_miptree_map_singlesample(intel, mt,
1982                                      level, slice,
1983                                      x, y, w, h,
1984                                      mode,
1985                                      out_ptr, out_stride);
1986    else
1987       intel_miptree_map_multisample(intel, mt,
1988                                     level, slice,
1989                                     x, y, w, h,
1990                                     mode,
1991                                     out_ptr, out_stride);
1992 }
1993
1994 void
1995 intel_miptree_unmap(struct intel_context *intel,
1996                     struct intel_mipmap_tree *mt,
1997                     unsigned int level,
1998                     unsigned int slice)
1999 {
2000    if (mt->num_samples <= 1)
2001       intel_miptree_unmap_singlesample(intel, mt, level, slice);
2002    else
2003       intel_miptree_unmap_multisample(intel, mt, level, slice);
2004 }