2 * Copyright 2012 Nouveau Project
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Christoph Bumiller
25 #include "nvc0/nvc0_context.h"
26 #include "nvc0/nve4_compute.h"
28 #include "codegen/nv50_ir_driver.h"
31 static void nve4_compute_dump_launch_desc(const struct nve4_cp_launch_desc *);
36 nve4_screen_compute_setup(struct nvc0_screen *screen,
37 struct nouveau_pushbuf *push)
39 struct nouveau_device *dev = screen->base.device;
40 struct nouveau_object *chan = screen->base.channel;
46 switch (dev->chipset & ~0xf) {
49 obj_class = NVF0_COMPUTE_CLASS; /* GK110 */
52 obj_class = NVE4_COMPUTE_CLASS; /* GK104 */
55 obj_class = GM107_COMPUTE_CLASS;
58 obj_class = GM200_COMPUTE_CLASS;
61 NOUVEAU_ERR("unsupported chipset: NV%02x\n", dev->chipset);
65 ret = nouveau_object_new(chan, 0xbeef00c0, obj_class, NULL, 0,
68 NOUVEAU_ERR("Failed to allocate compute object: %d\n", ret);
72 BEGIN_NVC0(push, SUBC_CP(NV01_SUBCHAN_OBJECT), 1);
73 PUSH_DATA (push, screen->compute->oclass);
75 BEGIN_NVC0(push, NVE4_CP(TEMP_ADDRESS_HIGH), 2);
76 PUSH_DATAh(push, screen->tls->offset);
77 PUSH_DATA (push, screen->tls->offset);
78 /* No idea why there are 2. Divide size by 2 to be safe.
79 * Actually this might be per-MP TEMP size and looks like I'm only using
80 * 2 MPs instead of all 8.
82 BEGIN_NVC0(push, NVE4_CP(MP_TEMP_SIZE_HIGH(0)), 3);
83 PUSH_DATAh(push, screen->tls->size / screen->mp_count);
84 PUSH_DATA (push, (screen->tls->size / screen->mp_count) & ~0x7fff);
85 PUSH_DATA (push, 0xff);
86 BEGIN_NVC0(push, NVE4_CP(MP_TEMP_SIZE_HIGH(1)), 3);
87 PUSH_DATAh(push, screen->tls->size / screen->mp_count);
88 PUSH_DATA (push, (screen->tls->size / screen->mp_count) & ~0x7fff);
89 PUSH_DATA (push, 0xff);
91 /* Unified address space ? Who needs that ? Certainly not OpenCL.
93 * FATAL: Buffers with addresses inside [0x1000000, 0x3000000] will NOT be
94 * accessible. We cannot prevent that at the moment, so expect failure.
96 BEGIN_NVC0(push, NVE4_CP(LOCAL_BASE), 1);
97 PUSH_DATA (push, 0xff << 24);
98 BEGIN_NVC0(push, NVE4_CP(SHARED_BASE), 1);
99 PUSH_DATA (push, 0xfe << 24);
101 BEGIN_NVC0(push, NVE4_CP(CODE_ADDRESS_HIGH), 2);
102 PUSH_DATAh(push, screen->text->offset);
103 PUSH_DATA (push, screen->text->offset);
105 BEGIN_NVC0(push, SUBC_CP(0x0310), 1);
106 PUSH_DATA (push, (obj_class >= NVF0_COMPUTE_CLASS) ? 0x400 : 0x300);
108 /* NOTE: these do not affect the state used by the 3D object */
109 BEGIN_NVC0(push, NVE4_CP(TIC_ADDRESS_HIGH), 3);
110 PUSH_DATAh(push, screen->txc->offset);
111 PUSH_DATA (push, screen->txc->offset);
112 PUSH_DATA (push, NVC0_TIC_MAX_ENTRIES - 1);
113 BEGIN_NVC0(push, NVE4_CP(TSC_ADDRESS_HIGH), 3);
114 PUSH_DATAh(push, screen->txc->offset + 65536);
115 PUSH_DATA (push, screen->txc->offset + 65536);
116 PUSH_DATA (push, NVC0_TSC_MAX_ENTRIES - 1);
118 if (obj_class >= NVF0_COMPUTE_CLASS) {
119 /* The blob calls GK110_COMPUTE.FIRMWARE[0x6], along with the args (0x1)
120 * passed with GK110_COMPUTE.GRAPH.SCRATCH[0x2]. This is currently
121 * disabled because our firmware doesn't support these commands and the
122 * GPU hangs if they are used. */
123 BEGIN_NIC0(push, SUBC_CP(0x0248), 64);
124 for (i = 63; i >= 0; i--)
125 PUSH_DATA(push, 0x38000 | i);
126 IMMED_NVC0(push, SUBC_CP(NV50_GRAPH_SERIALIZE), 0);
129 BEGIN_NVC0(push, NVE4_CP(TEX_CB_INDEX), 1);
130 PUSH_DATA (push, 7); /* does not interfere with 3D */
132 /* Disabling this UNK command avoid a read fault when using texelFetch()
133 * from a compute shader for weird reasons.
134 if (obj_class == NVF0_COMPUTE_CLASS)
135 IMMED_NVC0(push, SUBC_CP(0x02c4), 1);
138 address = screen->uniform_bo->offset + NVC0_CB_AUX_INFO(5);
140 /* MS sample coordinate offsets: these do not work with _ALT modes ! */
141 BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
142 PUSH_DATAh(push, address + NVC0_CB_AUX_MS_INFO);
143 PUSH_DATA (push, address + NVC0_CB_AUX_MS_INFO);
144 BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
145 PUSH_DATA (push, 64);
147 BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 17);
148 PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
149 PUSH_DATA (push, 0); /* 0 */
151 PUSH_DATA (push, 1); /* 1 */
153 PUSH_DATA (push, 0); /* 2 */
155 PUSH_DATA (push, 1); /* 3 */
157 PUSH_DATA (push, 2); /* 4 */
159 PUSH_DATA (push, 3); /* 5 */
161 PUSH_DATA (push, 2); /* 6 */
163 PUSH_DATA (push, 3); /* 7 */
166 #ifdef NOUVEAU_NVE4_MP_TRAP_HANDLER
167 BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
168 PUSH_DATAh(push, screen->parm->offset + NVE4_CP_INPUT_TRAP_INFO_PTR);
169 PUSH_DATA (push, screen->parm->offset + NVE4_CP_INPUT_TRAP_INFO_PTR);
170 BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
171 PUSH_DATA (push, 28);
173 BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 8);
175 PUSH_DATA (push, screen->parm->offset + NVE4_CP_PARAM_TRAP_INFO);
176 PUSH_DATAh(push, screen->parm->offset + NVE4_CP_PARAM_TRAP_INFO);
177 PUSH_DATA (push, screen->tls->offset);
178 PUSH_DATAh(push, screen->tls->offset);
179 PUSH_DATA (push, screen->tls->size / 2); /* MP TEMP block size */
180 PUSH_DATA (push, screen->tls->size / 2 / 64); /* warp TEMP block size */
181 PUSH_DATA (push, 0); /* warp cfstack size */
184 BEGIN_NVC0(push, NVE4_CP(FLUSH), 1);
185 PUSH_DATA (push, NVE4_COMPUTE_FLUSH_CB);
191 nve4_compute_validate_surfaces(struct nvc0_context *nvc0)
193 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
198 if (!nvc0->images_dirty[s])
201 address = nvc0->screen->uniform_bo->offset + NVC0_CB_AUX_INFO(s);
203 BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
204 PUSH_DATAh(push, address + NVC0_CB_AUX_SU_INFO(0));
205 PUSH_DATA (push, address + NVC0_CB_AUX_SU_INFO(0));
206 BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
207 PUSH_DATA (push, 16 * NVC0_MAX_IMAGES * 4);
208 PUSH_DATA (push, 0x1);
209 BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + 16 * NVC0_MAX_IMAGES);
210 PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
212 for (i = 0; i < NVC0_MAX_IMAGES; ++i) {
213 struct pipe_image_view *view = &nvc0->images[s][i];
214 if (view->resource) {
215 struct nv04_resource *res = nv04_resource(view->resource);
217 if (res->base.target == PIPE_BUFFER) {
218 if (view->access & PIPE_IMAGE_ACCESS_WRITE)
219 nvc0_mark_image_range_valid(view);
222 nve4_set_surface_info(push, view, nvc0);
223 BCTX_REFN(nvc0->bufctx_cp, CP_SUF, res, RDWR);
225 for (j = 0; j < 16; j++)
231 /* Thankfully, textures with samplers follow the normal rules. */
233 nve4_compute_validate_samplers(struct nvc0_context *nvc0)
235 bool need_flush = nve4_validate_tsc(nvc0, 5);
237 BEGIN_NVC0(nvc0->base.pushbuf, NVE4_CP(TSC_FLUSH), 1);
238 PUSH_DATA (nvc0->base.pushbuf, 0);
241 /* (Code duplicated at bottom for various non-convincing reasons.
242 * E.g. we might want to use the COMPUTE subchannel to upload TIC/TSC
243 * entries to avoid a subchannel switch.
244 * Same for texture cache flushes.
245 * Also, the bufctx differs, and more IFs in the 3D version looks ugly.)
247 static void nve4_compute_validate_textures(struct nvc0_context *);
250 nve4_compute_set_tex_handles(struct nvc0_context *nvc0)
252 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
253 struct nvc0_screen *screen = nvc0->screen;
255 const unsigned s = nvc0_shader_stage(PIPE_SHADER_COMPUTE);
257 uint32_t dirty = nvc0->textures_dirty[s] | nvc0->samplers_dirty[s];
262 n = util_logbase2(dirty) + 1 - i;
265 address = screen->uniform_bo->offset + NVC0_CB_AUX_INFO(s);
267 BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
268 PUSH_DATAh(push, address + NVC0_CB_AUX_TEX_INFO(i));
269 PUSH_DATA (push, address + NVC0_CB_AUX_TEX_INFO(i));
270 BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
271 PUSH_DATA (push, n * 4);
272 PUSH_DATA (push, 0x1);
273 BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + n);
274 PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
275 PUSH_DATAp(push, &nvc0->tex_handles[s][i], n);
277 BEGIN_NVC0(push, NVE4_CP(FLUSH), 1);
278 PUSH_DATA (push, NVE4_COMPUTE_FLUSH_CB);
280 nvc0->textures_dirty[s] = 0;
281 nvc0->samplers_dirty[s] = 0;
285 nve4_compute_validate_constbufs(struct nvc0_context *nvc0)
287 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
290 while (nvc0->constbuf_dirty[s]) {
291 int i = ffs(nvc0->constbuf_dirty[s]) - 1;
292 nvc0->constbuf_dirty[s] &= ~(1 << i);
294 if (nvc0->constbuf[s][i].user) {
295 struct nouveau_bo *bo = nvc0->screen->uniform_bo;
296 const unsigned base = NVC0_CB_USR_INFO(s);
297 const unsigned size = nvc0->constbuf[s][0].size;
298 assert(i == 0); /* we really only want OpenGL uniforms here */
299 assert(nvc0->constbuf[s][0].u.data);
301 BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
302 PUSH_DATAh(push, bo->offset + base);
303 PUSH_DATA (push, bo->offset + base);
304 BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
305 PUSH_DATA (push, size);
306 PUSH_DATA (push, 0x1);
307 BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + (size / 4));
308 PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
309 PUSH_DATAp(push, nvc0->constbuf[s][0].u.data, size / 4);
312 struct nv04_resource *res =
313 nv04_resource(nvc0->constbuf[s][i].u.buf);
316 = nvc0->screen->uniform_bo->offset + NVC0_CB_AUX_INFO(s);
318 assert(i > 0); /* we really only want uniform buffer objects */
320 BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
321 PUSH_DATAh(push, address + NVC0_CB_AUX_UBO_INFO(i - 1));
322 PUSH_DATA (push, address + NVC0_CB_AUX_UBO_INFO(i - 1));
323 BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
324 PUSH_DATA (push, 4 * 4);
325 PUSH_DATA (push, 0x1);
326 BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + 4);
327 PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
329 PUSH_DATA (push, res->address + nvc0->constbuf[s][i].offset);
330 PUSH_DATAh(push, res->address + nvc0->constbuf[s][i].offset);
331 PUSH_DATA (push, nvc0->constbuf[5][i].size);
333 BCTX_REFN(nvc0->bufctx_cp, CP_CB(i), res, RD);
335 res->cb_bindings[s] |= 1 << i;
340 BEGIN_NVC0(push, NVE4_CP(FLUSH), 1);
341 PUSH_DATA (push, NVE4_COMPUTE_FLUSH_CB);
345 nve4_compute_validate_buffers(struct nvc0_context *nvc0)
347 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
352 address = nvc0->screen->uniform_bo->offset + NVC0_CB_AUX_INFO(s);
354 BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
355 PUSH_DATAh(push, address + NVC0_CB_AUX_BUF_INFO(0));
356 PUSH_DATA (push, address + NVC0_CB_AUX_BUF_INFO(0));
357 BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
358 PUSH_DATA (push, 4 * NVC0_MAX_BUFFERS * 4);
359 PUSH_DATA (push, 0x1);
360 BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + 4 * NVC0_MAX_BUFFERS);
361 PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
363 for (i = 0; i < NVC0_MAX_BUFFERS; i++) {
364 if (nvc0->buffers[s][i].buffer) {
365 struct nv04_resource *res =
366 nv04_resource(nvc0->buffers[s][i].buffer);
367 PUSH_DATA (push, res->address + nvc0->buffers[s][i].buffer_offset);
368 PUSH_DATAh(push, res->address + nvc0->buffers[s][i].buffer_offset);
369 PUSH_DATA (push, nvc0->buffers[s][i].buffer_size);
371 BCTX_REFN(nvc0->bufctx_cp, CP_BUF, res, RDWR);
372 util_range_add(&res->valid_buffer_range,
373 nvc0->buffers[s][i].buffer_offset,
374 nvc0->buffers[s][i].buffer_size);
384 static struct nvc0_state_validate
385 validate_list_cp[] = {
386 { nvc0_compprog_validate, NVC0_NEW_CP_PROGRAM },
387 { nve4_compute_validate_textures, NVC0_NEW_CP_TEXTURES },
388 { nve4_compute_validate_samplers, NVC0_NEW_CP_SAMPLERS },
389 { nve4_compute_set_tex_handles, NVC0_NEW_CP_TEXTURES |
390 NVC0_NEW_CP_SAMPLERS },
391 { nve4_compute_validate_surfaces, NVC0_NEW_CP_SURFACES },
392 { nvc0_compute_validate_globals, NVC0_NEW_CP_GLOBALS },
393 { nve4_compute_validate_buffers, NVC0_NEW_CP_BUFFERS },
394 { nve4_compute_validate_constbufs, NVC0_NEW_CP_CONSTBUF },
398 nve4_state_validate_cp(struct nvc0_context *nvc0, uint32_t mask)
402 ret = nvc0_state_validate(nvc0, mask, validate_list_cp,
403 ARRAY_SIZE(validate_list_cp), &nvc0->dirty_cp,
406 if (unlikely(nvc0->state.flushed))
407 nvc0_bufctx_fence(nvc0, nvc0->bufctx_cp, true);
412 nve4_compute_upload_input(struct nvc0_context *nvc0,
413 const struct pipe_grid_info *info)
415 struct nvc0_screen *screen = nvc0->screen;
416 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
417 struct nvc0_program *cp = nvc0->compprog;
420 address = screen->uniform_bo->offset + NVC0_CB_AUX_INFO(5);
423 BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
424 PUSH_DATAh(push, screen->uniform_bo->offset + NVC0_CB_USR_INFO(5));
425 PUSH_DATA (push, screen->uniform_bo->offset + NVC0_CB_USR_INFO(5));
426 BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
427 PUSH_DATA (push, cp->parm_size);
428 PUSH_DATA (push, 0x1);
429 BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + (cp->parm_size / 4));
430 PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
431 PUSH_DATAp(push, info->input, cp->parm_size / 4);
433 BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
434 PUSH_DATAh(push, address + NVC0_CB_AUX_GRID_INFO);
435 PUSH_DATA (push, address + NVC0_CB_AUX_GRID_INFO);
436 BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
437 PUSH_DATA (push, 7 * 4);
438 PUSH_DATA (push, 0x1);
440 if (unlikely(info->indirect)) {
441 struct nv04_resource *res = nv04_resource(info->indirect);
442 uint32_t offset = res->offset + info->indirect_offset;
444 nouveau_pushbuf_space(push, 16, 0, 1);
445 PUSH_REFN(push, res->bo, NOUVEAU_BO_RD | res->domain);
447 BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + 7);
448 PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
449 PUSH_DATAp(push, info->block, 3);
450 nouveau_pushbuf_data(push, res->bo, offset,
451 NVC0_IB_ENTRY_1_NO_PREFETCH | 3 * 4);
453 BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + 7);
454 PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
455 PUSH_DATAp(push, info->block, 3);
456 PUSH_DATAp(push, info->grid, 3);
460 BEGIN_NVC0(push, NVE4_CP(FLUSH), 1);
461 PUSH_DATA (push, NVE4_COMPUTE_FLUSH_CB);
464 static inline uint8_t
465 nve4_compute_derive_cache_split(struct nvc0_context *nvc0, uint32_t shared_size)
467 if (shared_size > (32 << 10))
468 return NVC0_3D_CACHE_SPLIT_48K_SHARED_16K_L1;
469 if (shared_size > (16 << 10))
470 return NVE4_3D_CACHE_SPLIT_32K_SHARED_32K_L1;
471 return NVC1_3D_CACHE_SPLIT_16K_SHARED_48K_L1;
475 nve4_compute_setup_launch_desc(struct nvc0_context *nvc0,
476 struct nve4_cp_launch_desc *desc,
477 const struct pipe_grid_info *info)
479 const struct nvc0_screen *screen = nvc0->screen;
480 const struct nvc0_program *cp = nvc0->compprog;
482 nve4_cp_launch_desc_init_default(desc);
484 desc->entry = nvc0_program_symbol_offset(cp, info->pc);
486 desc->griddim_x = info->grid[0];
487 desc->griddim_y = info->grid[1];
488 desc->griddim_z = info->grid[2];
489 desc->blockdim_x = info->block[0];
490 desc->blockdim_y = info->block[1];
491 desc->blockdim_z = info->block[2];
493 desc->shared_size = align(cp->cp.smem_size, 0x100);
494 desc->local_size_p = (cp->hdr[1] & 0xfffff0) + align(cp->cp.lmem_size, 0x10);
495 desc->local_size_n = 0;
496 desc->cstack_size = 0x800;
497 desc->cache_split = nve4_compute_derive_cache_split(nvc0, cp->cp.smem_size);
499 desc->gpr_alloc = cp->num_gprs;
500 desc->bar_alloc = cp->num_barriers;
502 // Only bind user uniforms and the driver constant buffer through the
503 // launch descriptor because UBOs are sticked to the driver cb to avoid the
504 // limitation of 8 CBs.
505 if (nvc0->constbuf[5][0].user || cp->parm_size) {
506 nve4_cp_launch_desc_set_cb(desc, 0, screen->uniform_bo,
507 NVC0_CB_USR_INFO(5), 1 << 16);
509 nve4_cp_launch_desc_set_cb(desc, 7, screen->uniform_bo,
510 NVC0_CB_AUX_INFO(5), 1 << 11);
513 static inline struct nve4_cp_launch_desc *
514 nve4_compute_alloc_launch_desc(struct nouveau_context *nv,
515 struct nouveau_bo **pbo, uint64_t *pgpuaddr)
517 uint8_t *ptr = nouveau_scratch_get(nv, 512, pgpuaddr, pbo);
520 if (*pgpuaddr & 255) {
521 unsigned adj = 256 - (*pgpuaddr & 255);
525 return (struct nve4_cp_launch_desc *)ptr;
529 nve4_launch_grid(struct pipe_context *pipe, const struct pipe_grid_info *info)
531 struct nvc0_context *nvc0 = nvc0_context(pipe);
532 struct nvc0_screen *screen = nvc0->screen;
533 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
534 struct nve4_cp_launch_desc *desc;
535 uint64_t desc_gpuaddr;
536 struct nouveau_bo *desc_bo;
539 pipe_mutex_lock(screen->base.push_mutex);
541 desc = nve4_compute_alloc_launch_desc(&nvc0->base, &desc_bo, &desc_gpuaddr);
546 BCTX_REFN_bo(nvc0->bufctx_cp, CP_DESC, NOUVEAU_BO_GART | NOUVEAU_BO_RD,
549 ret = !nve4_state_validate_cp(nvc0, ~0);
553 nve4_compute_setup_launch_desc(nvc0, desc, info);
555 nve4_compute_upload_input(nvc0, info);
558 if (debug_get_num_option("NV50_PROG_DEBUG", 0))
559 nve4_compute_dump_launch_desc(desc);
562 if (unlikely(info->indirect)) {
563 struct nv04_resource *res = nv04_resource(info->indirect);
564 uint32_t offset = res->offset + info->indirect_offset;
566 /* upload the descriptor */
567 BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
568 PUSH_DATAh(push, desc_gpuaddr);
569 PUSH_DATA (push, desc_gpuaddr);
570 BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
571 PUSH_DATA (push, 256);
573 BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + (256 / 4));
574 PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x08 << 1));
575 PUSH_DATAp(push, (const uint32_t *)desc, 256 / 4);
577 /* overwrite griddim_x and griddim_y as two 32-bits integers even
578 * if griddim_y must be a 16-bits integer */
579 BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
580 PUSH_DATAh(push, desc_gpuaddr + 48);
581 PUSH_DATA (push, desc_gpuaddr + 48);
582 BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
586 nouveau_pushbuf_space(push, 16, 0, 1);
587 PUSH_REFN(push, res->bo, NOUVEAU_BO_RD | res->domain);
589 BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + (8 / 4));
590 PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x08 << 1));
591 nouveau_pushbuf_data(push, res->bo, offset,
592 NVC0_IB_ENTRY_1_NO_PREFETCH | 2 * 4);
594 /* overwrite the 16 high bits of griddim_y with griddim_z because
595 * we need (z << 16) | x */
596 BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
597 PUSH_DATAh(push, desc_gpuaddr + 54);
598 PUSH_DATA (push, desc_gpuaddr + 54);
599 BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
602 BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + (4 / 4));
603 PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x08 << 1));
604 nouveau_pushbuf_data(push, res->bo, offset + 8,
605 NVC0_IB_ENTRY_1_NO_PREFETCH | 1 * 4);
608 /* upload descriptor and flush */
609 BEGIN_NVC0(push, NVE4_CP(LAUNCH_DESC_ADDRESS), 1);
610 PUSH_DATA (push, desc_gpuaddr >> 8);
611 BEGIN_NVC0(push, NVE4_CP(LAUNCH), 1);
612 PUSH_DATA (push, 0x3);
613 BEGIN_NVC0(push, SUBC_CP(NV50_GRAPH_SERIALIZE), 1);
618 NOUVEAU_ERR("Failed to launch grid !\n");
619 nouveau_scratch_done(&nvc0->base);
620 nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_DESC);
621 pipe_mutex_unlock(screen->base.push_mutex);
625 #define NVE4_TIC_ENTRY_INVALID 0x000fffff
628 nve4_compute_validate_textures(struct nvc0_context *nvc0)
630 struct nouveau_bo *txc = nvc0->screen->txc;
631 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
632 const unsigned s = 5;
634 uint32_t commands[2][32];
635 unsigned n[2] = { 0, 0 };
637 for (i = 0; i < nvc0->num_textures[s]; ++i) {
638 struct nv50_tic_entry *tic = nv50_tic_entry(nvc0->textures[s][i]);
639 struct nv04_resource *res;
640 const bool dirty = !!(nvc0->textures_dirty[s] & (1 << i));
643 nvc0->tex_handles[s][i] |= NVE4_TIC_ENTRY_INVALID;
646 res = nv04_resource(tic->pipe.texture);
647 nvc0_update_tic(nvc0, tic, res);
650 tic->id = nvc0_screen_tic_alloc(nvc0->screen, tic);
652 PUSH_SPACE(push, 16);
653 BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
654 PUSH_DATAh(push, txc->offset + (tic->id * 32));
655 PUSH_DATA (push, txc->offset + (tic->id * 32));
656 BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
657 PUSH_DATA (push, 32);
659 BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 9);
660 PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
661 PUSH_DATAp(push, &tic->tic[0], 8);
663 commands[0][n[0]++] = (tic->id << 4) | 1;
665 if (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
666 commands[1][n[1]++] = (tic->id << 4) | 1;
668 nvc0->screen->tic.lock[tic->id / 32] |= 1 << (tic->id % 32);
670 res->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
671 res->status |= NOUVEAU_BUFFER_STATUS_GPU_READING;
673 nvc0->tex_handles[s][i] &= ~NVE4_TIC_ENTRY_INVALID;
674 nvc0->tex_handles[s][i] |= tic->id;
676 BCTX_REFN(nvc0->bufctx_cp, CP_TEX(i), res, RD);
678 for (; i < nvc0->state.num_textures[s]; ++i) {
679 nvc0->tex_handles[s][i] |= NVE4_TIC_ENTRY_INVALID;
680 nvc0->textures_dirty[s] |= 1 << i;
684 BEGIN_NIC0(push, NVE4_CP(TIC_FLUSH), n[0]);
685 PUSH_DATAp(push, commands[0], n[0]);
688 BEGIN_NIC0(push, NVE4_CP(TEX_CACHE_CTL), n[1]);
689 PUSH_DATAp(push, commands[1], n[1]);
692 nvc0->state.num_textures[s] = nvc0->num_textures[s];
697 static const char *nve4_cache_split_name(unsigned value)
700 case NVC1_3D_CACHE_SPLIT_16K_SHARED_48K_L1: return "16K_SHARED_48K_L1";
701 case NVE4_3D_CACHE_SPLIT_32K_SHARED_32K_L1: return "32K_SHARED_32K_L1";
702 case NVC0_3D_CACHE_SPLIT_48K_SHARED_16K_L1: return "48K_SHARED_16K_L1";
709 nve4_compute_dump_launch_desc(const struct nve4_cp_launch_desc *desc)
711 const uint32_t *data = (const uint32_t *)desc;
715 debug_printf("COMPUTE LAUNCH DESCRIPTOR:\n");
717 for (i = 0; i < sizeof(*desc); i += 4) {
719 debug_printf("[%x]: 0x%08x\n", i, data[i / 4]);
723 debug_printf("...\n");
728 debug_printf("entry = 0x%x\n", desc->entry);
729 debug_printf("grid dimensions = %ux%ux%u\n",
730 desc->griddim_x, desc->griddim_y, desc->griddim_z);
731 debug_printf("block dimensions = %ux%ux%u\n",
732 desc->blockdim_x, desc->blockdim_y, desc->blockdim_z);
733 debug_printf("s[] size: 0x%x\n", desc->shared_size);
734 debug_printf("l[] size: -0x%x / +0x%x\n",
735 desc->local_size_n, desc->local_size_p);
736 debug_printf("stack size: 0x%x\n", desc->cstack_size);
737 debug_printf("barrier count: %u\n", desc->bar_alloc);
738 debug_printf("$r count: %u\n", desc->gpr_alloc);
739 debug_printf("cache split: %s\n", nve4_cache_split_name(desc->cache_split));
741 for (i = 0; i < 8; ++i) {
743 uint32_t size = desc->cb[i].size;
744 bool valid = !!(desc->cb_mask & (1 << i));
746 address = ((uint64_t)desc->cb[i].address_h << 32) | desc->cb[i].address_l;
748 if (!valid && !address && !size)
750 debug_printf("CB[%u]: address = 0x%"PRIx64", size 0x%x%s\n",
751 i, address, size, valid ? "" : " (invalid)");
756 #ifdef NOUVEAU_NVE4_MP_TRAP_HANDLER
758 nve4_compute_trap_info(struct nvc0_context *nvc0)
760 struct nvc0_screen *screen = nvc0->screen;
761 struct nouveau_bo *bo = screen->parm;
763 volatile struct nve4_mp_trap_info *info;
766 ret = nouveau_bo_map(bo, NOUVEAU_BO_RDWR, nvc0->base.client);
769 map = (uint8_t *)bo->map;
770 info = (volatile struct nve4_mp_trap_info *)(map + NVE4_CP_PARAM_TRAP_INFO);
773 debug_printf("trapstat = %08x\n", info->trapstat);
774 debug_printf("warperr = %08x\n", info->warperr);
775 debug_printf("PC = %x\n", info->pc);
776 debug_printf("tid = %u %u %u\n",
777 info->tid[0], info->tid[1], info->tid[2]);
778 debug_printf("ctaid = %u %u %u\n",
779 info->ctaid[0], info->ctaid[1], info->ctaid[2]);
780 for (i = 0; i <= 63; ++i)
781 debug_printf("$r%i = %08x\n", i, info->r[i]);
782 for (i = 0; i <= 6; ++i)
783 debug_printf("$p%i = %i\n", i, (info->flags >> i) & 1);
784 debug_printf("$c = %x\n", info->flags >> 12);