1 /* savage_state.c -- State and drawing support for Savage
3 * Copyright 2004 Felix Kuehling
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "savage_drm.h"
27 #include "savage_drv.h"
29 void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv,
30 const drm_clip_rect_t *pbox)
32 uint32_t scstart = dev_priv->state.s3d.new_scstart;
33 uint32_t scend = dev_priv->state.s3d.new_scend;
34 scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) |
35 ((uint32_t)pbox->x1 & 0x000007ff) |
36 (((uint32_t)pbox->y1 << 16) & 0x07ff0000);
37 scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) |
38 (((uint32_t)pbox->x2-1) & 0x000007ff) |
39 ((((uint32_t)pbox->y2-1) << 16) & 0x07ff0000);
40 if (scstart != dev_priv->state.s3d.scstart ||
41 scend != dev_priv->state.s3d.scend) {
44 DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D);
45 DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2);
48 dev_priv->state.s3d.scstart = scstart;
49 dev_priv->state.s3d.scend = scend;
50 dev_priv->waiting = 1;
55 void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
56 const drm_clip_rect_t *pbox)
58 uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0;
59 uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1;
60 drawctrl0 = (drawctrl0 & ~SAVAGE_SCISSOR_MASK_S4) |
61 ((uint32_t)pbox->x1 & 0x000007ff) |
62 (((uint32_t)pbox->y1 << 12) & 0x00fff000);
63 drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) |
64 (((uint32_t)pbox->x2-1) & 0x000007ff) |
65 ((((uint32_t)pbox->y2-1) << 12) & 0x00fff000);
66 if (drawctrl0 != dev_priv->state.s4.drawctrl0 ||
67 drawctrl1 != dev_priv->state.s4.drawctrl1) {
70 DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D);
71 DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2);
74 dev_priv->state.s4.drawctrl0 = drawctrl0;
75 dev_priv->state.s4.drawctrl1 = drawctrl1;
76 dev_priv->waiting = 1;
81 static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit,
84 if ((addr & 6) != 2) { /* reserved bits */
85 DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr);
86 return DRM_ERR(EINVAL);
88 if (!(addr & 1)) { /* local */
90 if (addr < dev_priv->texture_offset ||
91 addr >= dev_priv->texture_offset+dev_priv->texture_size) {
93 ("bad texAddr%d %08x (local addr out of range)\n",
95 return DRM_ERR(EINVAL);
98 if (!dev_priv->agp_textures) {
99 DRM_ERROR("bad texAddr%d %08x (AGP not available)\n",
101 return DRM_ERR(EINVAL);
104 if (addr < dev_priv->agp_textures->offset ||
105 addr >= (dev_priv->agp_textures->offset +
106 dev_priv->agp_textures->size)) {
108 ("bad texAddr%d %08x (AGP addr out of range)\n",
110 return DRM_ERR(EINVAL);
116 #define SAVE_STATE(reg,where) \
117 if(start <= reg && start+count > reg) \
118 dev_priv->state.where = regs[reg - start]
119 #define SAVE_STATE_MASK(reg,where,mask) do { \
120 if(start <= reg && start+count > reg) { \
122 tmp = regs[reg - start]; \
123 dev_priv->state.where = (tmp & (mask)) | \
124 (dev_priv->state.where & ~(mask)); \
127 static int savage_verify_state_s3d(drm_savage_private_t *dev_priv,
128 unsigned int start, unsigned int count,
129 const uint32_t *regs)
131 if (start < SAVAGE_TEXPALADDR_S3D ||
132 start+count-1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
133 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
134 start, start+count-1);
135 return DRM_ERR(EINVAL);
138 SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart,
139 ~SAVAGE_SCISSOR_MASK_S3D);
140 SAVE_STATE_MASK(SAVAGE_SCEND_S3D, s3d.new_scend,
141 ~SAVAGE_SCISSOR_MASK_S3D);
143 /* if any texture regs were changed ... */
144 if (start <= SAVAGE_TEXCTRL_S3D &&
145 start+count > SAVAGE_TEXPALADDR_S3D) {
146 /* ... check texture state */
147 SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl);
148 SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr);
149 if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK)
150 return savage_verify_texaddr(dev_priv, 0,
151 dev_priv->state.s3d.texaddr);
157 static int savage_verify_state_s4(drm_savage_private_t *dev_priv,
158 unsigned int start, unsigned int count,
159 const uint32_t *regs)
163 if (start < SAVAGE_DRAWLOCALCTRL_S4 ||
164 start+count-1 > SAVAGE_TEXBLENDCOLOR_S4) {
165 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
166 start, start+count-1);
167 return DRM_ERR(EINVAL);
170 SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0,
171 ~SAVAGE_SCISSOR_MASK_S4);
172 SAVE_STATE_MASK(SAVAGE_DRAWCTRL1_S4, s4.new_drawctrl1,
173 ~SAVAGE_SCISSOR_MASK_S4);
175 /* if any texture regs were changed ... */
176 if (start <= SAVAGE_TEXDESCR_S4 &&
177 start + count > SAVAGE_TEXPALADDR_S4) {
178 /* ... check texture state */
179 SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr);
180 SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0);
181 SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1);
182 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK)
183 ret |= savage_verify_texaddr(dev_priv, 0,
184 dev_priv->state.s4.texaddr0);
185 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK)
186 ret |= savage_verify_texaddr(dev_priv, 1,
187 dev_priv->state.s4.texaddr1);
193 #undef SAVE_STATE_MASK
195 static int savage_dispatch_state(drm_savage_private_t *dev_priv,
196 const drm_savage_cmd_header_t *cmd_header,
197 const uint32_t *regs)
199 unsigned int count = cmd_header->state.count;
200 unsigned int start = cmd_header->state.start;
201 unsigned int count2 = 0;
202 unsigned int bci_size;
209 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
210 ret = savage_verify_state_s3d(dev_priv, start, count, regs);
213 /* scissor regs are emitted in savage_dispatch_draw */
214 if (start < SAVAGE_SCSTART_S3D) {
215 if (start+count > SAVAGE_SCEND_S3D+1)
216 count2 = count - (SAVAGE_SCEND_S3D+1 - start);
217 if (start+count > SAVAGE_SCSTART_S3D)
218 count = SAVAGE_SCSTART_S3D - start;
219 } else if (start <= SAVAGE_SCEND_S3D) {
220 if (start+count > SAVAGE_SCEND_S3D+1) {
221 count -= SAVAGE_SCEND_S3D+1 - start;
222 start = SAVAGE_SCEND_S3D+1;
227 ret = savage_verify_state_s4(dev_priv, start, count, regs);
230 /* scissor regs are emitted in savage_dispatch_draw */
231 if (start < SAVAGE_DRAWCTRL0_S4) {
232 if (start+count > SAVAGE_DRAWCTRL1_S4+1)
234 (SAVAGE_DRAWCTRL1_S4 + 1 - start);
235 if (start+count > SAVAGE_DRAWCTRL0_S4)
236 count = SAVAGE_DRAWCTRL0_S4 - start;
237 } else if (start <= SAVAGE_DRAWCTRL1_S4) {
238 if (start+count > SAVAGE_DRAWCTRL1_S4+1) {
239 count -= SAVAGE_DRAWCTRL1_S4+1 - start;
240 start = SAVAGE_DRAWCTRL1_S4+1;
246 bci_size = count + (count+254)/255 + count2 + (count2+254)/255;
248 if (cmd_header->state.global) {
249 BEGIN_DMA(bci_size+1);
250 DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
251 dev_priv->waiting = 1;
258 unsigned int n = count < 255 ? count : 255;
259 DMA_SET_REGISTERS(start, n);
276 static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
277 const drm_savage_cmd_header_t *cmd_header,
278 const drm_buf_t *dmabuf)
280 unsigned char reorder = 0;
281 unsigned int prim = cmd_header->prim.prim;
282 unsigned int skip = cmd_header->prim.skip;
283 unsigned int n = cmd_header->prim.count;
284 unsigned int start = cmd_header->prim.start;
289 DRM_ERROR("called without dma buffers!\n");
290 return DRM_ERR(EINVAL);
297 case SAVAGE_PRIM_TRILIST_201:
299 prim = SAVAGE_PRIM_TRILIST;
300 case SAVAGE_PRIM_TRILIST:
302 DRM_ERROR("wrong number of vertices %u in TRILIST\n",
304 return DRM_ERR(EINVAL);
307 case SAVAGE_PRIM_TRISTRIP:
308 case SAVAGE_PRIM_TRIFAN:
311 ("wrong number of vertices %u in TRIFAN/STRIP\n",
313 return DRM_ERR(EINVAL);
317 DRM_ERROR("invalid primitive type %u\n", prim);
318 return DRM_ERR(EINVAL);
321 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
323 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
324 return DRM_ERR(EINVAL);
327 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
328 (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
329 (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
330 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
331 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
332 return DRM_ERR(EINVAL);
335 DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
336 return DRM_ERR(EINVAL);
340 if (start + n > dmabuf->total/32) {
341 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
342 start, start + n - 1, dmabuf->total/32);
343 return DRM_ERR(EINVAL);
346 /* Vertex DMA doesn't work with command DMA at the same time,
347 * so we use BCI_... to submit commands here. Flush buffered
348 * faked DMA first. */
351 if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
353 BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
354 BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
355 dev_priv->state.common.vbaddr = dmabuf->bus_address;
357 if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
358 /* Workaround for what looks like a hardware bug. If a
359 * WAIT_3D_IDLE was emitted some time before the
360 * indexed drawing command then the engine will lock
361 * up. There are two known workarounds:
362 * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
364 for (i = 0; i < 63; ++i)
365 BCI_WRITE(BCI_CMD_WAIT);
366 dev_priv->waiting = 0;
371 /* Can emit up to 255 indices (85 triangles) at once. */
372 unsigned int count = n > 255 ? 255 : n;
374 /* Need to reorder indices for correct flat
375 * shading while preserving the clock sense
376 * for correct culling. Only on Savage3D. */
377 int reorder[3] = {-1, -1, -1};
378 reorder[start%3] = 2;
380 BEGIN_BCI((count+1+1)/2);
381 BCI_DRAW_INDICES_S3D(count, prim, start+2);
383 for (i = start+1; i+1 < start+count; i += 2)
384 BCI_WRITE((i + reorder[i % 3]) |
386 reorder[(i + 1) % 3]) << 16));
388 BCI_WRITE(i + reorder[i%3]);
389 } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
390 BEGIN_BCI((count+1+1)/2);
391 BCI_DRAW_INDICES_S3D(count, prim, start);
393 for (i = start+1; i+1 < start+count; i += 2)
394 BCI_WRITE(i | ((i+1) << 16));
398 BEGIN_BCI((count+2+1)/2);
399 BCI_DRAW_INDICES_S4(count, prim, skip);
401 for (i = start; i+1 < start+count; i += 2)
402 BCI_WRITE(i | ((i+1) << 16));
410 prim |= BCI_CMD_DRAW_CONT;
416 static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
417 const drm_savage_cmd_header_t *cmd_header,
418 const uint32_t *vtxbuf, unsigned int vb_size,
419 unsigned int vb_stride)
421 unsigned char reorder = 0;
422 unsigned int prim = cmd_header->prim.prim;
423 unsigned int skip = cmd_header->prim.skip;
424 unsigned int n = cmd_header->prim.count;
425 unsigned int start = cmd_header->prim.start;
426 unsigned int vtx_size;
434 case SAVAGE_PRIM_TRILIST_201:
436 prim = SAVAGE_PRIM_TRILIST;
437 case SAVAGE_PRIM_TRILIST:
439 DRM_ERROR("wrong number of vertices %u in TRILIST\n",
441 return DRM_ERR(EINVAL);
444 case SAVAGE_PRIM_TRISTRIP:
445 case SAVAGE_PRIM_TRIFAN:
448 ("wrong number of vertices %u in TRIFAN/STRIP\n",
450 return DRM_ERR(EINVAL);
454 DRM_ERROR("invalid primitive type %u\n", prim);
455 return DRM_ERR(EINVAL);
458 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
459 if (skip > SAVAGE_SKIP_ALL_S3D) {
460 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
461 return DRM_ERR(EINVAL);
463 vtx_size = 8; /* full vertex */
465 if (skip > SAVAGE_SKIP_ALL_S4) {
466 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
467 return DRM_ERR(EINVAL);
469 vtx_size = 10; /* full vertex */
472 vtx_size -= (skip & 1) + (skip >> 1 & 1) +
473 (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
474 (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
476 if (vtx_size > vb_stride) {
477 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
478 vtx_size, vb_stride);
479 return DRM_ERR(EINVAL);
482 if (start + n > vb_size / (vb_stride*4)) {
483 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
484 start, start + n - 1, vb_size / (vb_stride*4));
485 return DRM_ERR(EINVAL);
490 /* Can emit up to 255 vertices (85 triangles) at once. */
491 unsigned int count = n > 255 ? 255 : n;
493 /* Need to reorder vertices for correct flat
494 * shading while preserving the clock sense
495 * for correct culling. Only on Savage3D. */
496 int reorder[3] = {-1, -1, -1};
497 reorder[start%3] = 2;
499 BEGIN_DMA(count*vtx_size+1);
500 DMA_DRAW_PRIMITIVE(count, prim, skip);
502 for (i = start; i < start+count; ++i) {
503 unsigned int j = i + reorder[i % 3];
504 DMA_COPY(&vtxbuf[vb_stride*j], vtx_size);
509 BEGIN_DMA(count*vtx_size+1);
510 DMA_DRAW_PRIMITIVE(count, prim, skip);
512 if (vb_stride == vtx_size) {
513 DMA_COPY(&vtxbuf[vb_stride*start],
516 for (i = start; i < start+count; ++i) {
517 DMA_COPY(&vtxbuf[vb_stride*i],
528 prim |= BCI_CMD_DRAW_CONT;
534 static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
535 const drm_savage_cmd_header_t *cmd_header,
537 const drm_buf_t *dmabuf)
539 unsigned char reorder = 0;
540 unsigned int prim = cmd_header->idx.prim;
541 unsigned int skip = cmd_header->idx.skip;
542 unsigned int n = cmd_header->idx.count;
547 DRM_ERROR("called without dma buffers!\n");
548 return DRM_ERR(EINVAL);
555 case SAVAGE_PRIM_TRILIST_201:
557 prim = SAVAGE_PRIM_TRILIST;
558 case SAVAGE_PRIM_TRILIST:
560 DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
561 return DRM_ERR(EINVAL);
564 case SAVAGE_PRIM_TRISTRIP:
565 case SAVAGE_PRIM_TRIFAN:
568 ("wrong number of indices %u in TRIFAN/STRIP\n", n);
569 return DRM_ERR(EINVAL);
573 DRM_ERROR("invalid primitive type %u\n", prim);
574 return DRM_ERR(EINVAL);
577 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
579 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
580 return DRM_ERR(EINVAL);
583 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
584 (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
585 (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
586 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
587 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
588 return DRM_ERR(EINVAL);
591 DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
592 return DRM_ERR(EINVAL);
596 /* Vertex DMA doesn't work with command DMA at the same time,
597 * so we use BCI_... to submit commands here. Flush buffered
598 * faked DMA first. */
601 if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
603 BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
604 BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
605 dev_priv->state.common.vbaddr = dmabuf->bus_address;
607 if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
608 /* Workaround for what looks like a hardware bug. If a
609 * WAIT_3D_IDLE was emitted some time before the
610 * indexed drawing command then the engine will lock
611 * up. There are two known workarounds:
612 * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
614 for (i = 0; i < 63; ++i)
615 BCI_WRITE(BCI_CMD_WAIT);
616 dev_priv->waiting = 0;
621 /* Can emit up to 255 indices (85 triangles) at once. */
622 unsigned int count = n > 255 ? 255 : n;
625 for (i = 0; i < count; ++i) {
626 if (idx[i] > dmabuf->total/32) {
627 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
628 i, idx[i], dmabuf->total/32);
629 return DRM_ERR(EINVAL);
634 /* Need to reorder indices for correct flat
635 * shading while preserving the clock sense
636 * for correct culling. Only on Savage3D. */
637 int reorder[3] = {2, -1, -1};
639 BEGIN_BCI((count+1+1)/2);
640 BCI_DRAW_INDICES_S3D(count, prim, idx[2]);
642 for (i = 1; i+1 < count; i += 2)
643 BCI_WRITE(idx[i + reorder[i % 3]] |
645 reorder[(i + 1) % 3]] << 16));
647 BCI_WRITE(idx[i + reorder[i%3]]);
648 } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
649 BEGIN_BCI((count+1+1)/2);
650 BCI_DRAW_INDICES_S3D(count, prim, idx[0]);
652 for (i = 1; i+1 < count; i += 2)
653 BCI_WRITE(idx[i] | (idx[i+1] << 16));
657 BEGIN_BCI((count+2+1)/2);
658 BCI_DRAW_INDICES_S4(count, prim, skip);
660 for (i = 0; i+1 < count; i += 2)
661 BCI_WRITE(idx[i] | (idx[i+1] << 16));
669 prim |= BCI_CMD_DRAW_CONT;
675 static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
676 const drm_savage_cmd_header_t *cmd_header,
678 const uint32_t *vtxbuf,
679 unsigned int vb_size, unsigned int vb_stride)
681 unsigned char reorder = 0;
682 unsigned int prim = cmd_header->idx.prim;
683 unsigned int skip = cmd_header->idx.skip;
684 unsigned int n = cmd_header->idx.count;
685 unsigned int vtx_size;
693 case SAVAGE_PRIM_TRILIST_201:
695 prim = SAVAGE_PRIM_TRILIST;
696 case SAVAGE_PRIM_TRILIST:
698 DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
699 return DRM_ERR(EINVAL);
702 case SAVAGE_PRIM_TRISTRIP:
703 case SAVAGE_PRIM_TRIFAN:
706 ("wrong number of indices %u in TRIFAN/STRIP\n", n);
707 return DRM_ERR(EINVAL);
711 DRM_ERROR("invalid primitive type %u\n", prim);
712 return DRM_ERR(EINVAL);
715 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
716 if (skip > SAVAGE_SKIP_ALL_S3D) {
717 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
718 return DRM_ERR(EINVAL);
720 vtx_size = 8; /* full vertex */
722 if (skip > SAVAGE_SKIP_ALL_S4) {
723 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
724 return DRM_ERR(EINVAL);
726 vtx_size = 10; /* full vertex */
729 vtx_size -= (skip & 1) + (skip >> 1 & 1) +
730 (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
731 (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
733 if (vtx_size > vb_stride) {
734 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
735 vtx_size, vb_stride);
736 return DRM_ERR(EINVAL);
741 /* Can emit up to 255 vertices (85 triangles) at once. */
742 unsigned int count = n > 255 ? 255 : n;
745 for (i = 0; i < count; ++i) {
746 if (idx[i] > vb_size / (vb_stride*4)) {
747 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
748 i, idx[i], vb_size / (vb_stride*4));
749 return DRM_ERR(EINVAL);
754 /* Need to reorder vertices for correct flat
755 * shading while preserving the clock sense
756 * for correct culling. Only on Savage3D. */
757 int reorder[3] = {2, -1, -1};
759 BEGIN_DMA(count*vtx_size+1);
760 DMA_DRAW_PRIMITIVE(count, prim, skip);
762 for (i = 0; i < count; ++i) {
763 unsigned int j = idx[i + reorder[i % 3]];
764 DMA_COPY(&vtxbuf[vb_stride*j], vtx_size);
769 BEGIN_DMA(count*vtx_size+1);
770 DMA_DRAW_PRIMITIVE(count, prim, skip);
772 for (i = 0; i < count; ++i) {
773 unsigned int j = idx[i];
774 DMA_COPY(&vtxbuf[vb_stride*j], vtx_size);
783 prim |= BCI_CMD_DRAW_CONT;
789 static int savage_dispatch_clear(drm_savage_private_t *dev_priv,
790 const drm_savage_cmd_header_t *cmd_header,
791 const drm_savage_cmd_header_t *data,
793 const drm_clip_rect_t *boxes)
795 unsigned int flags = cmd_header->clear0.flags;
796 unsigned int clear_cmd;
797 unsigned int i, nbufs;
803 clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
804 BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW;
805 BCI_CMD_SET_ROP(clear_cmd,0xCC);
807 nbufs = ((flags & SAVAGE_FRONT) ? 1 : 0) +
808 ((flags & SAVAGE_BACK) ? 1 : 0) + ((flags & SAVAGE_DEPTH) ? 1 : 0);
812 if (data->clear1.mask != 0xffffffff) {
815 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
816 DMA_WRITE(data->clear1.mask);
819 for (i = 0; i < nbox; ++i) {
820 unsigned int x, y, w, h;
823 x = boxes[i].x1, y = boxes[i].y1;
824 w = boxes[i].x2 - boxes[i].x1;
825 h = boxes[i].y2 - boxes[i].y1;
827 for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) {
830 DMA_WRITE(clear_cmd);
833 DMA_WRITE(dev_priv->front_offset);
834 DMA_WRITE(dev_priv->front_bd);
837 DMA_WRITE(dev_priv->back_offset);
838 DMA_WRITE(dev_priv->back_bd);
841 DMA_WRITE(dev_priv->depth_offset);
842 DMA_WRITE(dev_priv->depth_bd);
845 DMA_WRITE(data->clear1.value);
846 DMA_WRITE(BCI_X_Y(x, y));
847 DMA_WRITE(BCI_W_H(w, h));
851 if (data->clear1.mask != 0xffffffff) {
854 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
855 DMA_WRITE(0xffffffff);
862 static int savage_dispatch_swap(drm_savage_private_t *dev_priv,
863 unsigned int nbox, const drm_clip_rect_t *boxes)
865 unsigned int swap_cmd;
872 swap_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
873 BCI_CMD_SRC_PBD_COLOR_NEW | BCI_CMD_DEST_GBD;
874 BCI_CMD_SET_ROP(swap_cmd,0xCC);
876 for (i = 0; i < nbox; ++i) {
879 DMA_WRITE(dev_priv->back_offset);
880 DMA_WRITE(dev_priv->back_bd);
881 DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
882 DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
883 DMA_WRITE(BCI_W_H(boxes[i].x2-boxes[i].x1,
884 boxes[i].y2-boxes[i].y1));
891 static int savage_dispatch_draw(drm_savage_private_t *dev_priv,
892 const drm_savage_cmd_header_t *start,
893 const drm_savage_cmd_header_t *end,
894 const drm_buf_t *dmabuf,
895 const unsigned int *vtxbuf,
896 unsigned int vb_size, unsigned int vb_stride,
898 const drm_clip_rect_t *boxes)
903 for (i = 0; i < nbox; ++i) {
904 const drm_savage_cmd_header_t *cmdbuf;
905 dev_priv->emit_clip_rect(dev_priv, &boxes[i]);
908 while (cmdbuf < end) {
909 drm_savage_cmd_header_t cmd_header;
910 cmd_header = *cmdbuf;
912 switch (cmd_header.cmd.cmd) {
913 case SAVAGE_CMD_DMA_PRIM:
914 ret = savage_dispatch_dma_prim(
915 dev_priv, &cmd_header, dmabuf);
917 case SAVAGE_CMD_VB_PRIM:
918 ret = savage_dispatch_vb_prim(
919 dev_priv, &cmd_header,
920 vtxbuf, vb_size, vb_stride);
922 case SAVAGE_CMD_DMA_IDX:
923 j = (cmd_header.idx.count + 3) / 4;
924 /* j was check in savage_bci_cmdbuf */
925 ret = savage_dispatch_dma_idx(dev_priv,
926 &cmd_header, (const uint16_t *)cmdbuf,
930 case SAVAGE_CMD_VB_IDX:
931 j = (cmd_header.idx.count + 3) / 4;
932 /* j was check in savage_bci_cmdbuf */
933 ret = savage_dispatch_vb_idx(dev_priv,
934 &cmd_header, (const uint16_t *)cmdbuf,
935 (const uint32_t *)vtxbuf, vb_size,
940 /* What's the best return code? EFAULT? */
941 DRM_ERROR("IMPLEMENTATION ERROR: "
942 "non-drawing-command %d\n",
944 return DRM_ERR(EINVAL);
955 int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
958 drm_savage_private_t *dev_priv = dev->dev_private;
959 drm_device_dma_t *dma = dev->dma;
961 drm_savage_cmdbuf_t cmdbuf;
962 drm_savage_cmd_header_t *kcmd_addr = NULL;
963 drm_savage_cmd_header_t *first_draw_cmd;
964 unsigned int *kvb_addr = NULL;
965 drm_clip_rect_t *kbox_addr = NULL;
971 LOCK_TEST_WITH_RETURN(dev, filp);
973 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_savage_cmdbuf_t __user *)data,
976 if (dma && dma->buflist) {
977 if (cmdbuf.dma_idx > dma->buf_count) {
979 ("vertex buffer index %u out of range (0-%u)\n",
980 cmdbuf.dma_idx, dma->buf_count-1);
981 return DRM_ERR(EINVAL);
983 dmabuf = dma->buflist[cmdbuf.dma_idx];
988 /* Copy the user buffers into kernel temporary areas. This hasn't been
989 * a performance loss compared to VERIFYAREA_READ/
990 * COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct
991 * for locking on FreeBSD.
994 kcmd_addr = drm_alloc(cmdbuf.size * 8, DRM_MEM_DRIVER);
995 if (kcmd_addr == NULL)
996 return DRM_ERR(ENOMEM);
998 if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf.cmd_addr,
1001 drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER);
1002 return DRM_ERR(EFAULT);
1004 cmdbuf.cmd_addr = kcmd_addr;
1006 if (cmdbuf.vb_size) {
1007 kvb_addr = drm_alloc(cmdbuf.vb_size, DRM_MEM_DRIVER);
1008 if (kvb_addr == NULL) {
1009 ret = DRM_ERR(ENOMEM);
1013 if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf.vb_addr,
1015 ret = DRM_ERR(EFAULT);
1018 cmdbuf.vb_addr = kvb_addr;
1021 kbox_addr = drm_alloc(cmdbuf.nbox * sizeof(drm_clip_rect_t),
1023 if (kbox_addr == NULL) {
1024 ret = DRM_ERR(ENOMEM);
1028 if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf.box_addr,
1029 cmdbuf.nbox * sizeof(drm_clip_rect_t))) {
1030 ret = DRM_ERR(EFAULT);
1033 cmdbuf.box_addr = kbox_addr;
1036 /* Make sure writes to DMA buffers are finished before sending
1037 * DMA commands to the graphics hardware. */
1038 DRM_MEMORYBARRIER();
1040 /* Coming from user space. Don't know if the Xserver has
1041 * emitted wait commands. Assuming the worst. */
1042 dev_priv->waiting = 1;
1045 first_draw_cmd = NULL;
1046 while (i < cmdbuf.size) {
1047 drm_savage_cmd_header_t cmd_header;
1048 cmd_header = *(drm_savage_cmd_header_t *)cmdbuf.cmd_addr;
1052 /* Group drawing commands with same state to minimize
1053 * iterations over clip rects. */
1055 switch (cmd_header.cmd.cmd) {
1056 case SAVAGE_CMD_DMA_IDX:
1057 case SAVAGE_CMD_VB_IDX:
1058 j = (cmd_header.idx.count + 3) / 4;
1059 if (i + j > cmdbuf.size) {
1060 DRM_ERROR("indexed drawing command extends "
1061 "beyond end of command buffer\n");
1063 return DRM_ERR(EINVAL);
1066 case SAVAGE_CMD_DMA_PRIM:
1067 case SAVAGE_CMD_VB_PRIM:
1068 if (!first_draw_cmd)
1069 first_draw_cmd = cmdbuf.cmd_addr-1;
1070 cmdbuf.cmd_addr += j;
1074 if (first_draw_cmd) {
1075 ret = savage_dispatch_draw (
1076 dev_priv, first_draw_cmd,
1078 dmabuf, cmdbuf.vb_addr, cmdbuf.vb_size,
1080 cmdbuf.nbox, cmdbuf.box_addr);
1083 first_draw_cmd = NULL;
1089 switch (cmd_header.cmd.cmd) {
1090 case SAVAGE_CMD_STATE:
1091 j = (cmd_header.state.count + 1) / 2;
1092 if (i + j > cmdbuf.size) {
1093 DRM_ERROR("command SAVAGE_CMD_STATE extends "
1094 "beyond end of command buffer\n");
1096 ret = DRM_ERR(EINVAL);
1099 ret = savage_dispatch_state(dev_priv, &cmd_header,
1100 (const uint32_t *)cmdbuf.cmd_addr);
1101 cmdbuf.cmd_addr += j;
1104 case SAVAGE_CMD_CLEAR:
1105 if (i + 1 > cmdbuf.size) {
1106 DRM_ERROR("command SAVAGE_CMD_CLEAR extends "
1107 "beyond end of command buffer\n");
1109 ret = DRM_ERR(EINVAL);
1112 ret = savage_dispatch_clear(dev_priv, &cmd_header,
1114 cmdbuf.nbox, cmdbuf.box_addr);
1118 case SAVAGE_CMD_SWAP:
1119 ret = savage_dispatch_swap(dev_priv, cmdbuf.nbox,
1123 DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd);
1125 ret = DRM_ERR(EINVAL);
1135 if (first_draw_cmd) {
1136 ret = savage_dispatch_draw (
1137 dev_priv, first_draw_cmd, cmdbuf.cmd_addr, dmabuf,
1138 cmdbuf.vb_addr, cmdbuf.vb_size, cmdbuf.vb_stride,
1139 cmdbuf.nbox, cmdbuf.box_addr);
1148 if (dmabuf && cmdbuf.discard) {
1149 drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private;
1151 event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
1152 SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
1153 savage_freelist_put(dev, dmabuf);
1157 /* If we didn't need to allocate them, these'll be NULL */
1158 drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER);
1159 drm_free(kvb_addr, cmdbuf.vb_size, DRM_MEM_DRIVER);
1160 drm_free(kbox_addr, cmdbuf.nbox * sizeof(drm_clip_rect_t),