OSDN Git Service

81b3068553d877b7653bcdf9d12e0bb01a48f8e8
[android-x86/external-mesa.git] / src / gallium / auxiliary / draw / draw_context.c
1 /**************************************************************************
2  * 
3  * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
4  * All Rights Reserved.
5  * 
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  * 
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  * 
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25  * 
26  **************************************************************************/
27
28  /*
29   * Authors:
30   *   Keith Whitwell <keith@tungstengraphics.com>
31   */
32
33
34 #include "pipe/p_context.h"
35 #include "util/u_memory.h"
36 #include "util/u_math.h"
37 #include "util/u_cpu_detect.h"
38 #include "util/u_inlines.h"
39 #include "util/u_helpers.h"
40 #include "util/u_prim.h"
41 #include "draw_context.h"
42 #include "draw_vs.h"
43 #include "draw_gs.h"
44
45 #if HAVE_LLVM
46 #include "gallivm/lp_bld_init.h"
47 #include "gallivm/lp_bld_limits.h"
48 #include "draw_llvm.h"
49
50 boolean
51 draw_get_option_use_llvm(void)
52 {
53    static boolean first = TRUE;
54    static boolean value;
55    if (first) {
56       first = FALSE;
57       value = debug_get_bool_option("DRAW_USE_LLVM", TRUE);
58
59 #ifdef PIPE_ARCH_X86
60       util_cpu_detect();
61       /* require SSE2 due to LLVM PR6960. */
62       if (!util_cpu_caps.has_sse2)
63          value = FALSE;
64 #endif
65    }
66    return value;
67 }
68 #endif
69
70
71 /**
72  * Create new draw module context with gallivm state for LLVM JIT.
73  */
74 static struct draw_context *
75 draw_create_context(struct pipe_context *pipe, boolean try_llvm)
76 {
77    struct draw_context *draw = CALLOC_STRUCT( draw_context );
78    if (draw == NULL)
79       goto err_out;
80
81 #if HAVE_LLVM
82    if (try_llvm && draw_get_option_use_llvm()) {
83       draw->llvm = draw_llvm_create(draw);
84       if (!draw->llvm)
85          goto err_destroy;
86    }
87 #endif
88
89    draw->pipe = pipe;
90
91    if (!draw_init(draw))
92       goto err_destroy;
93
94    return draw;
95
96 err_destroy:
97    draw_destroy( draw );
98 err_out:
99    return NULL;
100 }
101
102
103 /**
104  * Create new draw module context, with LLVM JIT.
105  */
106 struct draw_context *
107 draw_create(struct pipe_context *pipe)
108 {
109    return draw_create_context(pipe, TRUE);
110 }
111
112
113 /**
114  * Create a new draw context, without LLVM JIT.
115  */
116 struct draw_context *
117 draw_create_no_llvm(struct pipe_context *pipe)
118 {
119    return draw_create_context(pipe, FALSE);
120 }
121
122
123 boolean draw_init(struct draw_context *draw)
124 {
125    /*
126     * Note that several functions compute the clipmask of the predefined
127     * formats with hardcoded formulas instead of using these. So modifications
128     * here must be reflected there too.
129     */
130
131    ASSIGN_4V( draw->plane[0], -1,  0,  0, 1 );
132    ASSIGN_4V( draw->plane[1],  1,  0,  0, 1 );
133    ASSIGN_4V( draw->plane[2],  0, -1,  0, 1 );
134    ASSIGN_4V( draw->plane[3],  0,  1,  0, 1 );
135    ASSIGN_4V( draw->plane[4],  0,  0,  1, 1 ); /* yes these are correct */
136    ASSIGN_4V( draw->plane[5],  0,  0, -1, 1 ); /* mesa's a bit wonky */
137    draw->clip_xy = TRUE;
138    draw->clip_z = TRUE;
139
140    draw->pt.user.planes = (float (*) [DRAW_TOTAL_CLIP_PLANES][4]) &(draw->plane[0]);
141
142    if (!draw_pipeline_init( draw ))
143       return FALSE;
144
145    if (!draw_pt_init( draw ))
146       return FALSE;
147
148    if (!draw_vs_init( draw ))
149       return FALSE;
150
151    if (!draw_gs_init( draw ))
152       return FALSE;
153
154    draw->quads_always_flatshade_last = !draw->pipe->screen->get_param(
155       draw->pipe->screen, PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION);
156
157    return TRUE;
158 }
159
160 /*
161  * Called whenever we're starting to draw a new instance.
162  * Some internal structures don't want to have to reset internal
163  * members on each invocation (because their state might have to persist
164  * between multiple primitive restart rendering call) but might have to 
165  * for each new instance. 
166  * This is particularly the case for primitive id's in geometry shader.
167  */
168 void draw_new_instance(struct draw_context *draw)
169 {
170    draw_geometry_shader_new_instance(draw->gs.geometry_shader);
171 }
172
173
174 void draw_destroy( struct draw_context *draw )
175 {
176    struct pipe_context *pipe;
177    unsigned i, j;
178
179    if (!draw)
180       return;
181
182    pipe = draw->pipe;
183
184    /* free any rasterizer CSOs that we may have created.
185     */
186    for (i = 0; i < 2; i++) {
187       for (j = 0; j < 2; j++) {
188          if (draw->rasterizer_no_cull[i][j]) {
189             pipe->delete_rasterizer_state(pipe, draw->rasterizer_no_cull[i][j]);
190          }
191       }
192    }
193
194    for (i = 0; i < draw->pt.nr_vertex_buffers; i++) {
195       pipe_resource_reference(&draw->pt.vertex_buffer[i].buffer, NULL);
196    }
197
198    /* Not so fast -- we're just borrowing this at the moment.
199     * 
200    if (draw->render)
201       draw->render->destroy( draw->render );
202    */
203
204    draw_pipeline_destroy( draw );
205    draw_pt_destroy( draw );
206    draw_vs_destroy( draw );
207    draw_gs_destroy( draw );
208 #ifdef HAVE_LLVM
209    if (draw->llvm)
210       draw_llvm_destroy( draw->llvm );
211 #endif
212
213    FREE( draw );
214 }
215
216
217
218 void draw_flush( struct draw_context *draw )
219 {
220    draw_do_flush( draw, DRAW_FLUSH_BACKEND );
221 }
222
223
224 /**
225  * Specify the Minimum Resolvable Depth factor for polygon offset.
226  * This factor potentially depends on the number of Z buffer bits,
227  * the rasterization algorithm and the arithmetic performed on Z
228  * values between vertex shading and rasterization.  It will vary
229  * from one driver to another.
230  */
231 void draw_set_mrd(struct draw_context *draw, double mrd)
232 {
233    draw->mrd = mrd;
234 }
235
236
237 static void update_clip_flags( struct draw_context *draw )
238 {
239    draw->clip_xy = !draw->driver.bypass_clip_xy;
240    draw->guard_band_xy = (!draw->driver.bypass_clip_xy &&
241                           draw->driver.guard_band_xy);
242    draw->clip_z = (!draw->driver.bypass_clip_z &&
243                    draw->rasterizer && draw->rasterizer->depth_clip);
244    draw->clip_user = draw->rasterizer &&
245                      draw->rasterizer->clip_plane_enable != 0;
246 }
247
248 /**
249  * Register new primitive rasterization/rendering state.
250  * This causes the drawing pipeline to be rebuilt.
251  */
252 void draw_set_rasterizer_state( struct draw_context *draw,
253                                 const struct pipe_rasterizer_state *raster,
254                                 void *rast_handle )
255 {
256    if (!draw->suspend_flushing) {
257       draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
258
259       draw->rasterizer = raster;
260       draw->rast_handle = rast_handle;
261       update_clip_flags(draw);
262    }
263 }
264
265 /* With a little more work, llvmpipe will be able to turn this off and
266  * do its own x/y clipping.  
267  *
268  * Some hardware can turn off clipping altogether - in particular any
269  * hardware with a TNL unit can do its own clipping, even if it is
270  * relying on the draw module for some other reason.
271  */
272 void draw_set_driver_clipping( struct draw_context *draw,
273                                boolean bypass_clip_xy,
274                                boolean bypass_clip_z,
275                                boolean guard_band_xy)
276 {
277    draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
278
279    draw->driver.bypass_clip_xy = bypass_clip_xy;
280    draw->driver.bypass_clip_z = bypass_clip_z;
281    draw->driver.guard_band_xy = guard_band_xy;
282    update_clip_flags(draw);
283 }
284
285
286 /** 
287  * Plug in the primitive rendering/rasterization stage (which is the last
288  * stage in the drawing pipeline).
289  * This is provided by the device driver.
290  */
291 void draw_set_rasterize_stage( struct draw_context *draw,
292                                struct draw_stage *stage )
293 {
294    draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
295
296    draw->pipeline.rasterize = stage;
297 }
298
299
300 /**
301  * Set the draw module's clipping state.
302  */
303 void draw_set_clip_state( struct draw_context *draw,
304                           const struct pipe_clip_state *clip )
305 {
306    draw_do_flush(draw, DRAW_FLUSH_PARAMETER_CHANGE);
307
308    memcpy(&draw->plane[6], clip->ucp, sizeof(clip->ucp));
309 }
310
311
312 /**
313  * Set the draw module's viewport state.
314  */
315 void draw_set_viewport_states( struct draw_context *draw,
316                                unsigned start_slot,
317                                unsigned num_viewports,
318                                const struct pipe_viewport_state *vps )
319 {
320    const struct pipe_viewport_state *viewport = vps;
321    draw_do_flush(draw, DRAW_FLUSH_PARAMETER_CHANGE);
322
323    debug_assert(start_slot < PIPE_MAX_VIEWPORTS);
324    debug_assert((start_slot + num_viewports) <= PIPE_MAX_VIEWPORTS);
325
326    memcpy(draw->viewports + start_slot, vps,
327           sizeof(struct pipe_viewport_state) * num_viewports);
328
329    draw->identity_viewport = (num_viewports == 1) &&
330       (viewport->scale[0] == 1.0f &&
331        viewport->scale[1] == 1.0f &&
332        viewport->scale[2] == 1.0f &&
333        viewport->scale[3] == 1.0f &&
334        viewport->translate[0] == 0.0f &&
335        viewport->translate[1] == 0.0f &&
336        viewport->translate[2] == 0.0f &&
337        viewport->translate[3] == 0.0f);
338 }
339
340
341
342 void
343 draw_set_vertex_buffers(struct draw_context *draw,
344                         unsigned start_slot, unsigned count,
345                         const struct pipe_vertex_buffer *buffers)
346 {
347    assert(start_slot + count <= PIPE_MAX_ATTRIBS);
348
349    util_set_vertex_buffers_count(draw->pt.vertex_buffer,
350                                  &draw->pt.nr_vertex_buffers,
351                                  buffers, start_slot, count);
352 }
353
354
355 void
356 draw_set_vertex_elements(struct draw_context *draw,
357                          unsigned count,
358                          const struct pipe_vertex_element *elements)
359 {
360    assert(count <= PIPE_MAX_ATTRIBS);
361
362    /* We could improve this by only flushing the frontend and the fetch part
363     * of the middle. This would avoid recalculating the emit keys.*/
364    draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
365
366    memcpy(draw->pt.vertex_element, elements, count * sizeof(elements[0]));
367    draw->pt.nr_vertex_elements = count;
368 }
369
370
371 /**
372  * Tell drawing context where to find mapped vertex buffers.
373  */
374 void
375 draw_set_mapped_vertex_buffer(struct draw_context *draw,
376                               unsigned attr, const void *buffer,
377                               size_t size)
378 {
379    draw->pt.user.vbuffer[attr].map  = buffer;
380    draw->pt.user.vbuffer[attr].size = size;
381 }
382
383
384 void
385 draw_set_mapped_constant_buffer(struct draw_context *draw,
386                                 unsigned shader_type,
387                                 unsigned slot,
388                                 const void *buffer,
389                                 unsigned size )
390 {
391    debug_assert(shader_type == PIPE_SHADER_VERTEX ||
392                 shader_type == PIPE_SHADER_GEOMETRY);
393    debug_assert(slot < PIPE_MAX_CONSTANT_BUFFERS);
394
395    draw_do_flush(draw, DRAW_FLUSH_PARAMETER_CHANGE);
396
397    switch (shader_type) {
398    case PIPE_SHADER_VERTEX:
399       draw->pt.user.vs_constants[slot] = buffer;
400       draw->pt.user.vs_constants_size[slot] = size;
401       break;
402    case PIPE_SHADER_GEOMETRY:
403       draw->pt.user.gs_constants[slot] = buffer;
404       draw->pt.user.gs_constants_size[slot] = size;
405       break;
406    default:
407       assert(0 && "invalid shader type in draw_set_mapped_constant_buffer");
408    }
409 }
410
411
412 /**
413  * Tells the draw module to draw points with triangles if their size
414  * is greater than this threshold.
415  */
416 void
417 draw_wide_point_threshold(struct draw_context *draw, float threshold)
418 {
419    draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
420    draw->pipeline.wide_point_threshold = threshold;
421 }
422
423
424 /**
425  * Should the draw module handle point->quad conversion for drawing sprites?
426  */
427 void
428 draw_wide_point_sprites(struct draw_context *draw, boolean draw_sprite)
429 {
430    draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
431    draw->pipeline.wide_point_sprites = draw_sprite;
432 }
433
434
435 /**
436  * Tells the draw module to draw lines with triangles if their width
437  * is greater than this threshold.
438  */
439 void
440 draw_wide_line_threshold(struct draw_context *draw, float threshold)
441 {
442    draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
443    draw->pipeline.wide_line_threshold = roundf(threshold);
444 }
445
446
447 /**
448  * Tells the draw module whether or not to implement line stipple.
449  */
450 void
451 draw_enable_line_stipple(struct draw_context *draw, boolean enable)
452 {
453    draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
454    draw->pipeline.line_stipple = enable;
455 }
456
457
458 /**
459  * Tells draw module whether to convert points to quads for sprite mode.
460  */
461 void
462 draw_enable_point_sprites(struct draw_context *draw, boolean enable)
463 {
464    draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
465    draw->pipeline.point_sprite = enable;
466 }
467
468
469 void
470 draw_set_force_passthrough( struct draw_context *draw, boolean enable )
471 {
472    draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
473    draw->force_passthrough = enable;
474 }
475
476
477
478 /**
479  * Allocate an extra vertex/geometry shader vertex attribute, if it doesn't
480  * exist already.
481  *
482  * This is used by some of the optional draw module stages such
483  * as wide_point which may need to allocate additional generic/texcoord
484  * attributes.
485  */
486 int
487 draw_alloc_extra_vertex_attrib(struct draw_context *draw,
488                                uint semantic_name, uint semantic_index)
489 {
490    int slot;
491    uint num_outputs;
492    uint n;
493
494    slot = draw_find_shader_output(draw, semantic_name, semantic_index);
495    if (slot >= 0) {
496       return slot;
497    }
498
499    num_outputs = draw_current_shader_outputs(draw);
500    n = draw->extra_shader_outputs.num;
501
502    assert(n < Elements(draw->extra_shader_outputs.semantic_name));
503
504    draw->extra_shader_outputs.semantic_name[n] = semantic_name;
505    draw->extra_shader_outputs.semantic_index[n] = semantic_index;
506    draw->extra_shader_outputs.slot[n] = num_outputs + n;
507    draw->extra_shader_outputs.num++;
508
509    return draw->extra_shader_outputs.slot[n];
510 }
511
512
513 /**
514  * Remove all extra vertex attributes that were allocated with
515  * draw_alloc_extra_vertex_attrib().
516  */
517 void
518 draw_remove_extra_vertex_attribs(struct draw_context *draw)
519 {
520    draw->extra_shader_outputs.num = 0;
521 }
522
523
524 /**
525  * If a geometry shader is present, return its info, else the vertex shader's
526  * info.
527  */
528 struct tgsi_shader_info *
529 draw_get_shader_info(const struct draw_context *draw)
530 {
531
532    if (draw->gs.geometry_shader) {
533       return &draw->gs.geometry_shader->info;
534    } else {
535       return &draw->vs.vertex_shader->info;
536    }
537 }
538
539
540 /**
541  * Ask the draw module for the location/slot of the given vertex attribute in
542  * a post-transformed vertex.
543  *
544  * With this function, drivers that use the draw module should have no reason
545  * to track the current vertex/geometry shader.
546  *
547  * Note that the draw module may sometimes generate vertices with extra
548  * attributes (such as texcoords for AA lines).  The driver can call this
549  * function to find those attributes.
550  *
551  * -1 is returned if the attribute is not found since this is
552  * an undefined situation. Note, that zero is valid and can
553  * be used by any of the attributes, because position is not
554  * required to be attribute 0 or even at all present.
555  */
556 int
557 draw_find_shader_output(const struct draw_context *draw,
558                         uint semantic_name, uint semantic_index)
559 {
560    const struct tgsi_shader_info *info = draw_get_shader_info(draw);
561    uint i;
562
563    for (i = 0; i < info->num_outputs; i++) {
564       if (info->output_semantic_name[i] == semantic_name &&
565           info->output_semantic_index[i] == semantic_index)
566          return i;
567    }
568
569    /* Search the extra vertex attributes */
570    for (i = 0; i < draw->extra_shader_outputs.num; i++) {
571       if (draw->extra_shader_outputs.semantic_name[i] == semantic_name &&
572           draw->extra_shader_outputs.semantic_index[i] == semantic_index) {
573          return draw->extra_shader_outputs.slot[i];
574       }
575    }
576
577    return -1;
578 }
579
580
581 /**
582  * Return total number of the shader outputs.  This function is similar to
583  * draw_current_shader_outputs() but this function also counts any extra
584  * vertex/geometry output attributes that may be filled in by some draw
585  * stages (such as AA point, AA line).
586  *
587  * If geometry shader is present, its output will be returned,
588  * if not vertex shader is used.
589  */
590 uint
591 draw_num_shader_outputs(const struct draw_context *draw)
592 {
593    const struct tgsi_shader_info *info = draw_get_shader_info(draw);
594    uint count;
595
596    count = info->num_outputs;
597    count += draw->extra_shader_outputs.num;
598
599    return count;
600 }
601
602
603 /**
604  * Provide TGSI sampler objects for vertex/geometry shaders that use
605  * texture fetches.  This state only needs to be set once per context.
606  * This might only be used by software drivers for the time being.
607  */
608 void
609 draw_texture_sampler(struct draw_context *draw,
610                      uint shader,
611                      struct tgsi_sampler *sampler)
612 {
613    if (shader == PIPE_SHADER_VERTEX) {
614       draw->vs.tgsi.sampler = sampler;
615    } else {
616       debug_assert(shader == PIPE_SHADER_GEOMETRY);
617       draw->gs.tgsi.sampler = sampler;
618    }
619 }
620
621
622
623
624 void draw_set_render( struct draw_context *draw, 
625                       struct vbuf_render *render )
626 {
627    draw->render = render;
628 }
629
630
631 /**
632  * Tell the draw module where vertex indexes/elements are located, and
633  * their size (in bytes).
634  *
635  * Note: the caller must apply the pipe_index_buffer::offset value to
636  * the address.  The draw module doesn't do that.
637  */
638 void
639 draw_set_indexes(struct draw_context *draw,
640                  const void *elements, unsigned elem_size,
641                  unsigned elem_buffer_space)
642 {
643    assert(elem_size == 0 ||
644           elem_size == 1 ||
645           elem_size == 2 ||
646           elem_size == 4);
647    draw->pt.user.elts = elements;
648    draw->pt.user.eltSizeIB = elem_size;
649    if (elem_size)
650       draw->pt.user.eltMax = elem_buffer_space / elem_size;
651    else
652       draw->pt.user.eltMax = 0;
653 }
654
655
656 /* Revamp me please:
657  */
658 void draw_do_flush( struct draw_context *draw, unsigned flags )
659 {
660    if (!draw->suspend_flushing)
661    {
662       assert(!draw->flushing); /* catch inadvertant recursion */
663
664       draw->flushing = TRUE;
665
666       draw_pipeline_flush( draw, flags );
667
668       draw_pt_flush( draw, flags );
669
670       draw->flushing = FALSE;
671    }
672 }
673
674
675 /**
676  * Return the number of output attributes produced by the geometry
677  * shader, if present.  If no geometry shader, return the number of
678  * outputs from the vertex shader.
679  * \sa draw_num_shader_outputs
680  */
681 uint
682 draw_current_shader_outputs(const struct draw_context *draw)
683 {
684    if (draw->gs.geometry_shader)
685       return draw->gs.num_gs_outputs;
686    return draw->vs.num_vs_outputs;
687 }
688
689
690 /**
691  * Return the index of the shader output which will contain the
692  * vertex position.
693  */
694 uint
695 draw_current_shader_position_output(const struct draw_context *draw)
696 {
697    if (draw->gs.geometry_shader)
698       return draw->gs.position_output;
699    return draw->vs.position_output;
700 }
701
702
703 /**
704  * Return the index of the shader output which will contain the
705  * viewport index.
706  */
707 uint
708 draw_current_shader_viewport_index_output(const struct draw_context *draw)
709 {
710    if (draw->gs.geometry_shader)
711       return draw->gs.geometry_shader->viewport_index_output;
712    return 0;
713 }
714
715 /**
716  * Returns true if there's a geometry shader bound and the geometry
717  * shader writes out a viewport index.
718  */
719 boolean
720 draw_current_shader_uses_viewport_index(const struct draw_context *draw)
721 {
722    if (draw->gs.geometry_shader)
723       return draw->gs.geometry_shader->info.writes_viewport_index;
724    return FALSE;
725 }
726
727
728 /**
729  * Return the index of the shader output which will contain the
730  * vertex position.
731  */
732 uint
733 draw_current_shader_clipvertex_output(const struct draw_context *draw)
734 {
735    return draw->vs.clipvertex_output;
736 }
737
738 uint
739 draw_current_shader_clipdistance_output(const struct draw_context *draw, int index)
740 {
741    if (draw->gs.geometry_shader)
742       return draw->gs.geometry_shader->clipdistance_output[index];
743    return draw->vs.clipdistance_output[index];
744 }
745
746
747 uint
748 draw_current_shader_num_written_clipdistances(const struct draw_context *draw)
749 {
750    if (draw->gs.geometry_shader)
751       return draw->gs.geometry_shader->info.num_written_clipdistance;
752    return draw->vs.vertex_shader->info.num_written_clipdistance;
753 }
754
755 /**
756  * Return a pointer/handle for a driver/CSO rasterizer object which
757  * disabled culling, stippling, unfilled tris, etc.
758  * This is used by some pipeline stages (such as wide_point, aa_line
759  * and aa_point) which convert points/lines into triangles.  In those
760  * cases we don't want to accidentally cull the triangles.
761  *
762  * \param scissor  should the rasterizer state enable scissoring?
763  * \param flatshade  should the rasterizer state use flat shading?
764  * \return  rasterizer CSO handle
765  */
766 void *
767 draw_get_rasterizer_no_cull( struct draw_context *draw,
768                              boolean scissor,
769                              boolean flatshade )
770 {
771    if (!draw->rasterizer_no_cull[scissor][flatshade]) {
772       /* create now */
773       struct pipe_context *pipe = draw->pipe;
774       struct pipe_rasterizer_state rast;
775
776       memset(&rast, 0, sizeof(rast));
777       rast.scissor = scissor;
778       rast.flatshade = flatshade;
779       rast.front_ccw = 1;
780       rast.half_pixel_center = draw->rasterizer->half_pixel_center;
781       rast.bottom_edge_rule = draw->rasterizer->bottom_edge_rule;
782       rast.clip_halfz = draw->rasterizer->clip_halfz;
783
784       draw->rasterizer_no_cull[scissor][flatshade] =
785          pipe->create_rasterizer_state(pipe, &rast);
786    }
787    return draw->rasterizer_no_cull[scissor][flatshade];
788 }
789
790 void
791 draw_set_mapped_so_targets(struct draw_context *draw,
792                            int num_targets,
793                            struct draw_so_target *targets[PIPE_MAX_SO_BUFFERS])
794 {
795    int i;
796
797    for (i = 0; i < num_targets; i++)
798       draw->so.targets[i] = targets[i];
799    for (i = num_targets; i < PIPE_MAX_SO_BUFFERS; i++)
800       draw->so.targets[i] = NULL;
801
802    draw->so.num_targets = num_targets;
803 }
804
805 void
806 draw_set_sampler_views(struct draw_context *draw,
807                        unsigned shader_stage,
808                        struct pipe_sampler_view **views,
809                        unsigned num)
810 {
811    unsigned i;
812
813    debug_assert(shader_stage < PIPE_SHADER_TYPES);
814    debug_assert(num <= PIPE_MAX_SHADER_SAMPLER_VIEWS);
815
816    draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
817
818    for (i = 0; i < num; ++i)
819       draw->sampler_views[shader_stage][i] = views[i];
820    for (i = num; i < PIPE_MAX_SHADER_SAMPLER_VIEWS; ++i)
821       draw->sampler_views[shader_stage][i] = NULL;
822
823    draw->num_sampler_views[shader_stage] = num;
824 }
825
826 void
827 draw_set_samplers(struct draw_context *draw,
828                   unsigned shader_stage,
829                   struct pipe_sampler_state **samplers,
830                   unsigned num)
831 {
832    unsigned i;
833
834    debug_assert(shader_stage < PIPE_SHADER_TYPES);
835    debug_assert(num <= PIPE_MAX_SAMPLERS);
836
837    draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
838
839    for (i = 0; i < num; ++i)
840       draw->samplers[shader_stage][i] = samplers[i];
841    for (i = num; i < PIPE_MAX_SAMPLERS; ++i)
842       draw->samplers[shader_stage][i] = NULL;
843
844    draw->num_samplers[shader_stage] = num;
845
846 #ifdef HAVE_LLVM
847    if (draw->llvm)
848       draw_llvm_set_sampler_state(draw, shader_stage);
849 #endif
850 }
851
852 void
853 draw_set_mapped_texture(struct draw_context *draw,
854                         unsigned shader_stage,
855                         unsigned sview_idx,
856                         uint32_t width, uint32_t height, uint32_t depth,
857                         uint32_t first_level, uint32_t last_level,
858                         const void *base_ptr,
859                         uint32_t row_stride[PIPE_MAX_TEXTURE_LEVELS],
860                         uint32_t img_stride[PIPE_MAX_TEXTURE_LEVELS],
861                         uint32_t mip_offsets[PIPE_MAX_TEXTURE_LEVELS])
862 {
863 #ifdef HAVE_LLVM
864    if (draw->llvm)
865       draw_llvm_set_mapped_texture(draw,
866                                    shader_stage,
867                                    sview_idx,
868                                    width, height, depth, first_level,
869                                    last_level, base_ptr,
870                                    row_stride, img_stride, mip_offsets);
871 #endif
872 }
873
874 /**
875  * XXX: Results for PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS because there are two
876  * different ways of setting textures, and drivers typically only support one.
877  */
878 int
879 draw_get_shader_param_no_llvm(unsigned shader, enum pipe_shader_cap param)
880 {
881    switch(shader) {
882    case PIPE_SHADER_VERTEX:
883    case PIPE_SHADER_GEOMETRY:
884       return tgsi_exec_get_shader_param(param);
885    default:
886       return 0;
887    }
888 }
889
890 /**
891  * XXX: Results for PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS because there are two
892  * different ways of setting textures, and drivers typically only support one.
893  */
894 int
895 draw_get_shader_param(unsigned shader, enum pipe_shader_cap param)
896 {
897
898 #ifdef HAVE_LLVM
899    if (draw_get_option_use_llvm()) {
900       switch(shader) {
901       case PIPE_SHADER_VERTEX:
902       case PIPE_SHADER_GEOMETRY:
903          return gallivm_get_shader_param(param);
904       default:
905          return 0;
906       }
907    }
908 #endif
909
910    return draw_get_shader_param_no_llvm(shader, param);
911 }
912
913 /**
914  * Enables or disables collection of statistics.
915  *
916  * Draw module is capable of generating statistics for the vertex
917  * processing pipeline. Collection of that data isn't free and so
918  * it's disabled by default. The users of the module can enable
919  * (or disable) this functionality through this function.
920  * The actual data will be emitted through the VBUF interface,
921  * the 'pipeline_statistics' callback to be exact.
922  */
923 void
924 draw_collect_pipeline_statistics(struct draw_context *draw,
925                                  boolean enable)
926 {
927    draw->collect_statistics = enable;
928 }
929
930 /**
931  * Computes clipper invocation statistics.
932  *
933  * Figures out how many primitives would have been
934  * sent to the clipper given the specified
935  * prim info data.
936  */
937 void
938 draw_stats_clipper_primitives(struct draw_context *draw,
939                               const struct draw_prim_info *prim_info)
940 {
941    if (draw->collect_statistics) {
942       unsigned start, i;
943       for (start = i = 0;
944            i < prim_info->primitive_count;
945            start += prim_info->primitive_lengths[i], i++)
946       {
947          draw->statistics.c_invocations +=
948             u_decomposed_prims_for_vertices(prim_info->prim,
949                                             prim_info->primitive_lengths[i]);
950       }
951    }
952 }