OSDN Git Service

r600g: don't emit surface_sync after FLUSH_AND_INV_EVENT
[android-x86/external-mesa.git] / src / gallium / drivers / r600 / r600_hw_context.c
1 /*
2  * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Jerome Glisse
25  */
26 #include "r600_pipe.h"
27 #include "r600d.h"
28 #include "util/u_memory.h"
29 #include <errno.h>
30 #include <unistd.h>
31
32 /* Get backends mask */
33 void r600_get_backend_mask(struct r600_context *ctx)
34 {
35         struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
36         struct r600_resource *buffer;
37         uint32_t *results;
38         unsigned num_backends = ctx->screen->info.r600_num_backends;
39         unsigned i, mask = 0;
40         uint64_t va;
41
42         /* if backend_map query is supported by the kernel */
43         if (ctx->screen->info.r600_backend_map_valid) {
44                 unsigned num_tile_pipes = ctx->screen->info.r600_num_tile_pipes;
45                 unsigned backend_map = ctx->screen->info.r600_backend_map;
46                 unsigned item_width, item_mask;
47
48                 if (ctx->chip_class >= EVERGREEN) {
49                         item_width = 4;
50                         item_mask = 0x7;
51                 } else {
52                         item_width = 2;
53                         item_mask = 0x3;
54                 }
55
56                 while(num_tile_pipes--) {
57                         i = backend_map & item_mask;
58                         mask |= (1<<i);
59                         backend_map >>= item_width;
60                 }
61                 if (mask != 0) {
62                         ctx->backend_mask = mask;
63                         return;
64                 }
65         }
66
67         /* otherwise backup path for older kernels */
68
69         /* create buffer for event data */
70         buffer = (struct r600_resource*)
71                 pipe_buffer_create(&ctx->screen->screen, PIPE_BIND_CUSTOM,
72                                    PIPE_USAGE_STAGING, ctx->max_db*16);
73         if (!buffer)
74                 goto err;
75         va = r600_resource_va(&ctx->screen->screen, (void*)buffer);
76
77         /* initialize buffer with zeroes */
78         results = r600_buffer_mmap_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
79         if (results) {
80                 memset(results, 0, ctx->max_db * 4 * 4);
81                 ctx->ws->buffer_unmap(buffer->cs_buf);
82
83                 /* emit EVENT_WRITE for ZPASS_DONE */
84                 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0);
85                 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1);
86                 cs->buf[cs->cdw++] = va;
87                 cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF;
88
89                 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
90                 cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, &ctx->rings.gfx, buffer, RADEON_USAGE_WRITE);
91
92                 /* analyze results */
93                 results = r600_buffer_mmap_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
94                 if (results) {
95                         for(i = 0; i < ctx->max_db; i++) {
96                                 /* at least highest bit will be set if backend is used */
97                                 if (results[i*4 + 1])
98                                         mask |= (1<<i);
99                         }
100                         ctx->ws->buffer_unmap(buffer->cs_buf);
101                 }
102         }
103
104         pipe_resource_reference((struct pipe_resource**)&buffer, NULL);
105
106         if (mask != 0) {
107                 ctx->backend_mask = mask;
108                 return;
109         }
110
111 err:
112         /* fallback to old method - set num_backends lower bits to 1 */
113         ctx->backend_mask = (~((uint32_t)0))>>(32-num_backends);
114         return;
115 }
116
117 void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw,
118                         boolean count_draw_in)
119 {
120         if (!ctx->ws->cs_memory_below_limit(ctx->rings.gfx.cs, ctx->vram, ctx->gtt)) {
121                 ctx->gtt = 0;
122                 ctx->vram = 0;
123                 ctx->rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC);
124                 return;
125         }
126         /* all will be accounted once relocation are emited */
127         ctx->gtt = 0;
128         ctx->vram = 0;
129
130         /* The number of dwords we already used in the CS so far. */
131         num_dw += ctx->rings.gfx.cs->cdw;
132
133         if (count_draw_in) {
134                 unsigned i;
135
136                 /* The number of dwords all the dirty states would take. */
137                 for (i = 0; i < R600_NUM_ATOMS; i++) {
138                         if (ctx->atoms[i] && ctx->atoms[i]->dirty) {
139                                 num_dw += ctx->atoms[i]->num_dw;
140                                 if (ctx->screen->trace_bo) {
141                                         num_dw += R600_TRACE_CS_DWORDS;
142                                 }
143                         }
144                 }
145
146                 /* The upper-bound of how much space a draw command would take. */
147                 num_dw += R600_MAX_FLUSH_CS_DWORDS + R600_MAX_DRAW_CS_DWORDS;
148                 if (ctx->screen->trace_bo) {
149                         num_dw += R600_TRACE_CS_DWORDS;
150                 }
151         }
152
153         /* Count in queries_suspend. */
154         num_dw += ctx->num_cs_dw_nontimer_queries_suspend;
155
156         /* Count in streamout_end at the end of CS. */
157         if (ctx->streamout.begin_emitted) {
158                 num_dw += ctx->streamout.num_dw_for_end;
159         }
160
161         /* Count in render_condition(NULL) at the end of CS. */
162         if (ctx->predicate_drawing) {
163                 num_dw += 3;
164         }
165
166         /* SX_MISC */
167         if (ctx->chip_class <= R700) {
168                 num_dw += 3;
169         }
170
171         /* Count in framebuffer cache flushes at the end of CS. */
172         num_dw += R600_MAX_FLUSH_CS_DWORDS;
173
174         /* The fence at the end of CS. */
175         num_dw += 10;
176
177         /* Flush if there's not enough space. */
178         if (num_dw > RADEON_MAX_CMDBUF_DWORDS) {
179                 ctx->rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC);
180         }
181 }
182
183 void r600_flush_emit(struct r600_context *rctx)
184 {
185         struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
186         unsigned cp_coher_cntl = 0;
187         unsigned wait_until = 0;
188         unsigned emit_flush = 0;
189
190         if (!rctx->flags) {
191                 return;
192         }
193
194         if (rctx->flags & R600_CONTEXT_WAIT_3D_IDLE) {
195                 wait_until |= S_008040_WAIT_3D_IDLE(1);
196         }
197         if (rctx->flags & R600_CONTEXT_WAIT_CP_DMA_IDLE) {
198                 wait_until |= S_008040_WAIT_CP_DMA_IDLE(1);
199         }
200
201         if (wait_until) {
202                 /* Use of WAIT_UNTIL is deprecated on Cayman+ */
203                 if (rctx->family >= CHIP_CAYMAN) {
204                         /* emit a PS partial flush on Cayman/TN */
205                         rctx->flags |= R600_CONTEXT_PS_PARTIAL_FLUSH;
206                 }
207         }
208
209         if (rctx->flags & R600_CONTEXT_PS_PARTIAL_FLUSH) {
210                 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
211                 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4);
212         }
213
214         if (rctx->chip_class >= R700 &&
215             (rctx->flags & R600_CONTEXT_FLUSH_AND_INV_CB_META)) {
216                 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
217                 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0);
218         }
219
220         if (rctx->chip_class >= R700 &&
221             (rctx->flags & R600_CONTEXT_FLUSH_AND_INV_DB_META)) {
222                 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
223                 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0);
224         }
225
226         if (rctx->flags & R600_CONTEXT_FLUSH_AND_INV) {
227                 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
228                 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0);
229         }
230
231         if (rctx->flags & R600_CONTEXT_INVAL_READ_CACHES) {
232                 cp_coher_cntl |= S_0085F0_VC_ACTION_ENA(1) |
233                                 S_0085F0_TC_ACTION_ENA(1) |
234                                 S_0085F0_FULL_CACHE_ENA(1);
235                 emit_flush = 1;
236         }
237
238         if (rctx->flags & R600_CONTEXT_STREAMOUT_FLUSH) {
239                 cp_coher_cntl |= S_0085F0_SO0_DEST_BASE_ENA(1) |
240                                 S_0085F0_SO1_DEST_BASE_ENA(1) |
241                                 S_0085F0_SO2_DEST_BASE_ENA(1) |
242                                 S_0085F0_SO3_DEST_BASE_ENA(1) |
243                                 S_0085F0_SMX_ACTION_ENA(1);
244                 emit_flush = 1;
245         }
246
247         if (emit_flush) {
248                 cs->buf[cs->cdw++] = PKT3(PKT3_SURFACE_SYNC, 3, 0);
249                 cs->buf[cs->cdw++] = cp_coher_cntl;   /* CP_COHER_CNTL */
250                 cs->buf[cs->cdw++] = 0xffffffff;      /* CP_COHER_SIZE */
251                 cs->buf[cs->cdw++] = 0;               /* CP_COHER_BASE */
252                 cs->buf[cs->cdw++] = 0x0000000A;      /* POLL_INTERVAL */
253         }
254
255         if (wait_until) {
256                 /* Use of WAIT_UNTIL is deprecated on Cayman+ */
257                 if (rctx->family < CHIP_CAYMAN) {
258                         /* wait for things to settle */
259                         r600_write_config_reg(cs, R_008040_WAIT_UNTIL, wait_until);
260                 }
261         }
262
263         /* everything is properly flushed */
264         rctx->flags = 0;
265 }
266
267 void r600_context_flush(struct r600_context *ctx, unsigned flags)
268 {
269         struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
270
271         if (cs->cdw == ctx->start_cs_cmd.num_dw)
272                 return;
273
274         ctx->nontimer_queries_suspended = false;
275         ctx->streamout.suspended = false;
276
277         /* suspend queries */
278         if (ctx->num_cs_dw_nontimer_queries_suspend) {
279                 r600_suspend_nontimer_queries(ctx);
280                 ctx->nontimer_queries_suspended = true;
281         }
282
283         if (ctx->streamout.begin_emitted) {
284                 r600_emit_streamout_end(ctx);
285                 ctx->streamout.suspended = true;
286         }
287
288         /* flush is needed to avoid lockups on some chips with user fences
289          * this will also flush the framebuffer cache
290          */
291         ctx->flags |= R600_CONTEXT_FLUSH_AND_INV |
292                       R600_CONTEXT_FLUSH_AND_INV_CB_META |
293                       R600_CONTEXT_FLUSH_AND_INV_DB_META |
294                       R600_CONTEXT_WAIT_3D_IDLE |
295                       R600_CONTEXT_WAIT_CP_DMA_IDLE;
296
297         r600_flush_emit(ctx);
298
299         /* old kernels and userspace don't set SX_MISC, so we must reset it to 0 here */
300         if (ctx->chip_class <= R700) {
301                 r600_write_context_reg(cs, R_028350_SX_MISC, 0);
302         }
303
304         /* force to keep tiling flags */
305         if (ctx->keep_tiling_flags) {
306                 flags |= RADEON_FLUSH_KEEP_TILING_FLAGS;
307         }
308
309         /* Flush the CS. */
310         ctx->ws->cs_flush(ctx->rings.gfx.cs, flags, ctx->screen->cs_count++);
311 }
312
313 void r600_begin_new_cs(struct r600_context *ctx)
314 {
315         unsigned shader;
316
317         ctx->flags = 0;
318         ctx->gtt = 0;
319         ctx->vram = 0;
320
321         /* Begin a new CS. */
322         r600_emit_command_buffer(ctx->rings.gfx.cs, &ctx->start_cs_cmd);
323
324         /* Re-emit states. */
325         ctx->alphatest_state.atom.dirty = true;
326         ctx->blend_color.atom.dirty = true;
327         ctx->cb_misc_state.atom.dirty = true;
328         ctx->clip_misc_state.atom.dirty = true;
329         ctx->clip_state.atom.dirty = true;
330         ctx->db_misc_state.atom.dirty = true;
331         ctx->db_state.atom.dirty = true;
332         ctx->framebuffer.atom.dirty = true;
333         ctx->pixel_shader.atom.dirty = true;
334         ctx->poly_offset_state.atom.dirty = true;
335         ctx->vgt_state.atom.dirty = true;
336         ctx->sample_mask.atom.dirty = true;
337         ctx->scissor.atom.dirty = true;
338         ctx->config_state.atom.dirty = true;
339         ctx->stencil_ref.atom.dirty = true;
340         ctx->vertex_fetch_shader.atom.dirty = true;
341         ctx->vertex_shader.atom.dirty = true;
342         ctx->viewport.atom.dirty = true;
343
344         if (ctx->blend_state.cso)
345                 ctx->blend_state.atom.dirty = true;
346         if (ctx->dsa_state.cso)
347                 ctx->dsa_state.atom.dirty = true;
348         if (ctx->rasterizer_state.cso)
349                 ctx->rasterizer_state.atom.dirty = true;
350
351         if (ctx->chip_class <= R700) {
352                 ctx->seamless_cube_map.atom.dirty = true;
353         }
354
355         ctx->vertex_buffer_state.dirty_mask = ctx->vertex_buffer_state.enabled_mask;
356         r600_vertex_buffers_dirty(ctx);
357
358         /* Re-emit shader resources. */
359         for (shader = 0; shader < PIPE_SHADER_TYPES; shader++) {
360                 struct r600_constbuf_state *constbuf = &ctx->constbuf_state[shader];
361                 struct r600_textures_info *samplers = &ctx->samplers[shader];
362
363                 constbuf->dirty_mask = constbuf->enabled_mask;
364                 samplers->views.dirty_mask = samplers->views.enabled_mask;
365                 samplers->states.dirty_mask = samplers->states.enabled_mask;
366
367                 r600_constant_buffers_dirty(ctx, constbuf);
368                 r600_sampler_views_dirty(ctx, &samplers->views);
369                 r600_sampler_states_dirty(ctx, &samplers->states);
370         }
371
372         if (ctx->streamout.suspended) {
373                 ctx->streamout.append_bitmask = ctx->streamout.enabled_mask;
374                 r600_streamout_buffers_dirty(ctx);
375         }
376
377         /* resume queries */
378         if (ctx->nontimer_queries_suspended) {
379                 r600_resume_nontimer_queries(ctx);
380         }
381
382         /* Re-emit the draw state. */
383         ctx->last_primitive_type = -1;
384         ctx->last_start_instance = -1;
385 }
386
387 void r600_context_emit_fence(struct r600_context *ctx, struct r600_resource *fence_bo, unsigned offset, unsigned value)
388 {
389         struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
390         uint64_t va;
391
392         r600_need_cs_space(ctx, 10, FALSE);
393
394         va = r600_resource_va(&ctx->screen->screen, (void*)fence_bo);
395         va = va + (offset << 2);
396
397         /* Use of WAIT_UNTIL is deprecated on Cayman+ */
398         if (ctx->family >= CHIP_CAYMAN) {
399                 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
400                 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4);
401         } else {
402                 r600_write_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
403         }
404
405         cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0);
406         cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5);
407         cs->buf[cs->cdw++] = va & 0xFFFFFFFFUL;       /* ADDRESS_LO */
408         /* DATA_SEL | INT_EN | ADDRESS_HI */
409         cs->buf[cs->cdw++] = (1 << 29) | (0 << 24) | ((va >> 32UL) & 0xFF);
410         cs->buf[cs->cdw++] = value;                   /* DATA_LO */
411         cs->buf[cs->cdw++] = 0;                       /* DATA_HI */
412         cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
413         cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, &ctx->rings.gfx, fence_bo, RADEON_USAGE_WRITE);
414 }
415
416 static void r600_flush_vgt_streamout(struct r600_context *ctx)
417 {
418         struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
419
420         r600_write_config_reg(cs, R_008490_CP_STRMOUT_CNTL, 0);
421
422         cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
423         cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH) | EVENT_INDEX(0);
424
425         cs->buf[cs->cdw++] = PKT3(PKT3_WAIT_REG_MEM, 5, 0);
426         cs->buf[cs->cdw++] = WAIT_REG_MEM_EQUAL; /* wait until the register is equal to the reference value */
427         cs->buf[cs->cdw++] = R_008490_CP_STRMOUT_CNTL >> 2;  /* register */
428         cs->buf[cs->cdw++] = 0;
429         cs->buf[cs->cdw++] = S_008490_OFFSET_UPDATE_DONE(1); /* reference value */
430         cs->buf[cs->cdw++] = S_008490_OFFSET_UPDATE_DONE(1); /* mask */
431         cs->buf[cs->cdw++] = 4; /* poll interval */
432 }
433
434 static void r600_set_streamout_enable(struct r600_context *ctx, unsigned buffer_enable_bit)
435 {
436         struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
437
438         if (buffer_enable_bit) {
439                 r600_write_context_reg(cs, R_028AB0_VGT_STRMOUT_EN, S_028AB0_STREAMOUT(1));
440                 r600_write_context_reg(cs, R_028B20_VGT_STRMOUT_BUFFER_EN, buffer_enable_bit);
441         } else {
442                 r600_write_context_reg(cs, R_028AB0_VGT_STRMOUT_EN, S_028AB0_STREAMOUT(0));
443         }
444 }
445
446 void r600_emit_streamout_begin(struct r600_context *ctx, struct r600_atom *atom)
447 {
448         struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
449         struct r600_so_target **t = ctx->streamout.targets;
450         unsigned *stride_in_dw = ctx->vs_shader->so.stride;
451         unsigned i, update_flags = 0;
452         uint64_t va;
453
454         if (ctx->chip_class >= EVERGREEN) {
455                 evergreen_flush_vgt_streamout(ctx);
456                 evergreen_set_streamout_enable(ctx, ctx->streamout.enabled_mask);
457         } else {
458                 r600_flush_vgt_streamout(ctx);
459                 r600_set_streamout_enable(ctx, ctx->streamout.enabled_mask);
460         }
461
462         for (i = 0; i < ctx->streamout.num_targets; i++) {
463                 if (t[i]) {
464                         t[i]->stride_in_dw = stride_in_dw[i];
465                         t[i]->so_index = i;
466                         va = r600_resource_va(&ctx->screen->screen,
467                                               (void*)t[i]->b.buffer);
468
469                         update_flags |= SURFACE_BASE_UPDATE_STRMOUT(i);
470
471                         r600_write_context_reg_seq(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 3);
472                         r600_write_value(cs, (t[i]->b.buffer_offset +
473                                               t[i]->b.buffer_size) >> 2); /* BUFFER_SIZE (in DW) */
474                         r600_write_value(cs, stride_in_dw[i]);            /* VTX_STRIDE (in DW) */
475                         r600_write_value(cs, va >> 8);                    /* BUFFER_BASE */
476
477                         cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
478                         cs->buf[cs->cdw++] =
479                                 r600_context_bo_reloc(ctx, &ctx->rings.gfx, r600_resource(t[i]->b.buffer),
480                                                       RADEON_USAGE_WRITE);
481
482                         /* R7xx requires this packet after updating BUFFER_BASE.
483                          * Without this, R7xx locks up. */
484                         if (ctx->family >= CHIP_RS780 && ctx->family <= CHIP_RV740) {
485                                 cs->buf[cs->cdw++] = PKT3(PKT3_STRMOUT_BASE_UPDATE, 1, 0);
486                                 cs->buf[cs->cdw++] = i;
487                                 cs->buf[cs->cdw++] = va >> 8;
488
489                                 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
490                                 cs->buf[cs->cdw++] =
491                                         r600_context_bo_reloc(ctx, &ctx->rings.gfx, r600_resource(t[i]->b.buffer),
492                                                               RADEON_USAGE_WRITE);
493                         }
494
495                         if (ctx->streamout.append_bitmask & (1 << i)) {
496                                 va = r600_resource_va(&ctx->screen->screen,
497                                                       (void*)t[i]->buf_filled_size) + t[i]->buf_filled_size_offset;
498                                 /* Append. */
499                                 cs->buf[cs->cdw++] = PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0);
500                                 cs->buf[cs->cdw++] = STRMOUT_SELECT_BUFFER(i) |
501                                                                STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_MEM); /* control */
502                                 cs->buf[cs->cdw++] = 0; /* unused */
503                                 cs->buf[cs->cdw++] = 0; /* unused */
504                                 cs->buf[cs->cdw++] = va & 0xFFFFFFFFUL; /* src address lo */
505                                 cs->buf[cs->cdw++] = (va >> 32UL) & 0xFFUL; /* src address hi */
506
507                                 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
508                                 cs->buf[cs->cdw++] =
509                                         r600_context_bo_reloc(ctx,  &ctx->rings.gfx, t[i]->buf_filled_size,
510                                                               RADEON_USAGE_READ);
511                         } else {
512                                 /* Start from the beginning. */
513                                 cs->buf[cs->cdw++] = PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0);
514                                 cs->buf[cs->cdw++] = STRMOUT_SELECT_BUFFER(i) |
515                                                                STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_PACKET); /* control */
516                                 cs->buf[cs->cdw++] = 0; /* unused */
517                                 cs->buf[cs->cdw++] = 0; /* unused */
518                                 cs->buf[cs->cdw++] = t[i]->b.buffer_offset >> 2; /* buffer offset in DW */
519                                 cs->buf[cs->cdw++] = 0; /* unused */
520                         }
521                 }
522         }
523
524         if (ctx->family > CHIP_R600 && ctx->family < CHIP_RV770) {
525                 cs->buf[cs->cdw++] = PKT3(PKT3_SURFACE_BASE_UPDATE, 0, 0);
526                 cs->buf[cs->cdw++] = update_flags;
527         }
528         ctx->streamout.begin_emitted = true;
529 }
530
531 void r600_emit_streamout_end(struct r600_context *ctx)
532 {
533         struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
534         struct r600_so_target **t = ctx->streamout.targets;
535         unsigned i;
536         uint64_t va;
537
538         if (ctx->chip_class >= EVERGREEN) {
539                 evergreen_flush_vgt_streamout(ctx);
540         } else {
541                 r600_flush_vgt_streamout(ctx);
542         }
543
544         for (i = 0; i < ctx->streamout.num_targets; i++) {
545                 if (t[i]) {
546                         va = r600_resource_va(&ctx->screen->screen,
547                                               (void*)t[i]->buf_filled_size) + t[i]->buf_filled_size_offset;
548                         cs->buf[cs->cdw++] = PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0);
549                         cs->buf[cs->cdw++] = STRMOUT_SELECT_BUFFER(i) |
550                                                        STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_NONE) |
551                                                        STRMOUT_STORE_BUFFER_FILLED_SIZE; /* control */
552                         cs->buf[cs->cdw++] = va & 0xFFFFFFFFUL;     /* dst address lo */
553                         cs->buf[cs->cdw++] = (va >> 32UL) & 0xFFUL; /* dst address hi */
554                         cs->buf[cs->cdw++] = 0; /* unused */
555                         cs->buf[cs->cdw++] = 0; /* unused */
556
557                         cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
558                         cs->buf[cs->cdw++] =
559                                 r600_context_bo_reloc(ctx,  &ctx->rings.gfx, t[i]->buf_filled_size,
560                                                       RADEON_USAGE_WRITE);
561                 }
562         }
563
564         if (ctx->chip_class >= EVERGREEN) {
565                 ctx->flags |= R600_CONTEXT_STREAMOUT_FLUSH;
566                 evergreen_set_streamout_enable(ctx, 0);
567         } else {
568                 if (ctx->chip_class >= R700) {
569                         ctx->flags |= R600_CONTEXT_STREAMOUT_FLUSH;
570                 }
571                 r600_set_streamout_enable(ctx, 0);
572         }
573         ctx->flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV;
574         ctx->streamout.begin_emitted = false;
575 }
576
577 /* The max number of bytes to copy per packet. */
578 #define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - 8)
579
580 void r600_cp_dma_copy_buffer(struct r600_context *rctx,
581                              struct pipe_resource *dst, uint64_t dst_offset,
582                              struct pipe_resource *src, uint64_t src_offset,
583                              unsigned size)
584 {
585         struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
586
587         assert(size);
588         assert(rctx->screen->has_cp_dma);
589
590         dst_offset += r600_resource_va(&rctx->screen->screen, dst);
591         src_offset += r600_resource_va(&rctx->screen->screen, src);
592
593         /* We flush the caches, because we might read from or write
594          * to resources which are bound right now. */
595         rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES |
596                        R600_CONTEXT_FLUSH_AND_INV |
597                        R600_CONTEXT_FLUSH_AND_INV_CB_META |
598                        R600_CONTEXT_FLUSH_AND_INV_DB_META |
599                        R600_CONTEXT_STREAMOUT_FLUSH |
600                        R600_CONTEXT_WAIT_3D_IDLE;
601
602         /* There are differences between R700 and EG in CP DMA,
603          * but we only use the common bits here. */
604         while (size) {
605                 unsigned sync = 0;
606                 unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
607                 unsigned src_reloc, dst_reloc;
608
609                 r600_need_cs_space(rctx, 10 + (rctx->flags ? R600_MAX_FLUSH_CS_DWORDS : 0), FALSE);
610
611                 /* Flush the caches for the first copy only. */
612                 if (rctx->flags) {
613                         r600_flush_emit(rctx);
614                 }
615
616                 /* Do the synchronization after the last copy, so that all data is written to memory. */
617                 if (size == byte_count) {
618                         sync = PKT3_CP_DMA_CP_SYNC;
619                 }
620
621                 /* This must be done after r600_need_cs_space. */
622                 src_reloc = r600_context_bo_reloc(rctx, &rctx->rings.gfx, (struct r600_resource*)src, RADEON_USAGE_READ);
623                 dst_reloc = r600_context_bo_reloc(rctx, &rctx->rings.gfx, (struct r600_resource*)dst, RADEON_USAGE_WRITE);
624
625                 r600_write_value(cs, PKT3(PKT3_CP_DMA, 4, 0));
626                 r600_write_value(cs, src_offset);       /* SRC_ADDR_LO [31:0] */
627                 r600_write_value(cs, sync | ((src_offset >> 32) & 0xff));               /* CP_SYNC [31] | SRC_ADDR_HI [7:0] */
628                 r600_write_value(cs, dst_offset);       /* DST_ADDR_LO [31:0] */
629                 r600_write_value(cs, (dst_offset >> 32) & 0xff);                /* DST_ADDR_HI [7:0] */
630                 r600_write_value(cs, byte_count);       /* COMMAND [29:22] | BYTE_COUNT [20:0] */
631
632                 r600_write_value(cs, PKT3(PKT3_NOP, 0, 0));
633                 r600_write_value(cs, src_reloc);
634                 r600_write_value(cs, PKT3(PKT3_NOP, 0, 0));
635                 r600_write_value(cs, dst_reloc);
636
637                 size -= byte_count;
638                 src_offset += byte_count;
639                 dst_offset += byte_count;
640         }
641
642         /* Invalidate the read caches. */
643         rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
644
645         util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
646                        dst_offset + size);
647 }
648
649 void r600_need_dma_space(struct r600_context *ctx, unsigned num_dw)
650 {
651         /* The number of dwords we already used in the DMA so far. */
652         num_dw += ctx->rings.dma.cs->cdw;
653         /* Flush if there's not enough space. */
654         if (num_dw > RADEON_MAX_CMDBUF_DWORDS) {
655                 ctx->rings.dma.flush(ctx, RADEON_FLUSH_ASYNC);
656         }
657 }
658
659 void r600_dma_copy(struct r600_context *rctx,
660                 struct pipe_resource *dst,
661                 struct pipe_resource *src,
662                 uint64_t dst_offset,
663                 uint64_t src_offset,
664                 uint64_t size)
665 {
666         struct radeon_winsys_cs *cs = rctx->rings.dma.cs;
667         unsigned i, ncopy, csize, shift;
668         struct r600_resource *rdst = (struct r600_resource*)dst;
669         struct r600_resource *rsrc = (struct r600_resource*)src;
670
671         /* make sure that the dma ring is only one active */
672         rctx->rings.gfx.flush(rctx, RADEON_FLUSH_ASYNC);
673
674         size >>= 2;
675         shift = 2;
676         ncopy = (size / 0xffff) + !!(size % 0xffff);
677
678         r600_need_dma_space(rctx, ncopy * 5);
679         for (i = 0; i < ncopy; i++) {
680                 csize = size < 0xffff ? size : 0xffff;
681                 /* emit reloc before writting cs so that cs is always in consistent state */
682                 r600_context_bo_reloc(rctx, &rctx->rings.dma, rsrc, RADEON_USAGE_READ);
683                 r600_context_bo_reloc(rctx, &rctx->rings.dma, rdst, RADEON_USAGE_WRITE);
684                 cs->buf[cs->cdw++] = DMA_PACKET(DMA_PACKET_COPY, 0, 0, csize);
685                 cs->buf[cs->cdw++] = dst_offset & 0xfffffffc;
686                 cs->buf[cs->cdw++] = src_offset & 0xfffffffc;
687                 cs->buf[cs->cdw++] = (dst_offset >> 32UL) & 0xff;
688                 cs->buf[cs->cdw++] = (src_offset >> 32UL) & 0xff;
689                 dst_offset += csize << shift;
690                 src_offset += csize << shift;
691                 size -= csize;
692         }
693
694         util_range_add(&rdst->valid_buffer_range, dst_offset,
695                        dst_offset + size);
696 }