2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <engine/fifo.h>
26 #include <core/client.h>
27 #include <core/engctx.h>
28 #include <core/enum.h>
29 #include <core/handle.h>
30 #include <subdev/bar.h>
31 #include <subdev/fb.h>
32 #include <subdev/mmu.h>
33 #include <subdev/timer.h>
35 #include <nvif/class.h>
36 #include <nvif/ioctl.h>
37 #include <nvif/unpack.h>
40 struct nvkm_fifo base;
42 struct work_struct fault;
46 struct nvkm_gpuobj *mem[2];
48 wait_queue_head_t wait;
52 struct nvkm_gpuobj *mem;
58 struct gf100_fifo_base {
59 struct nvkm_fifo_base base;
60 struct nvkm_gpuobj *pgd;
64 struct gf100_fifo_chan {
65 struct nvkm_fifo_chan base;
73 /*******************************************************************************
74 * FIFO channel objects
75 ******************************************************************************/
78 gf100_fifo_runlist_update(struct gf100_fifo *fifo)
80 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
81 struct nvkm_device *device = subdev->device;
82 struct nvkm_bar *bar = device->bar;
83 struct nvkm_gpuobj *cur;
86 mutex_lock(&nv_subdev(fifo)->mutex);
87 cur = fifo->runlist.mem[fifo->runlist.active];
88 fifo->runlist.active = !fifo->runlist.active;
91 for (i = 0, p = 0; i < 128; i++) {
92 struct gf100_fifo_chan *chan = (void *)fifo->base.channel[i];
93 if (chan && chan->state == RUNNING) {
94 nvkm_wo32(cur, p + 0, i);
95 nvkm_wo32(cur, p + 4, 0x00000004);
102 nvkm_wr32(device, 0x002270, cur->addr >> 12);
103 nvkm_wr32(device, 0x002274, 0x01f00000 | (p >> 3));
105 if (wait_event_timeout(fifo->runlist.wait,
106 !(nvkm_rd32(device, 0x00227c) & 0x00100000),
107 msecs_to_jiffies(2000)) == 0)
108 nvkm_error(subdev, "runlist update timeout\n");
109 mutex_unlock(&nv_subdev(fifo)->mutex);
113 gf100_fifo_context_attach(struct nvkm_object *parent,
114 struct nvkm_object *object)
116 struct nvkm_bar *bar = nvkm_bar(parent);
117 struct gf100_fifo_base *base = (void *)parent->parent;
118 struct nvkm_gpuobj *engn = &base->base.gpuobj;
119 struct nvkm_engctx *ectx = (void *)object;
123 switch (nv_engidx(object->engine)) {
124 case NVDEV_ENGINE_SW : return 0;
125 case NVDEV_ENGINE_GR : addr = 0x0210; break;
126 case NVDEV_ENGINE_CE0 : addr = 0x0230; break;
127 case NVDEV_ENGINE_CE1 : addr = 0x0240; break;
128 case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
129 case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
130 case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
135 if (!ectx->vma.node) {
136 ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
137 NV_MEM_ACCESS_RW, &ectx->vma);
141 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
145 nvkm_wo32(engn, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
146 nvkm_wo32(engn, addr + 0x04, upper_32_bits(ectx->vma.offset));
153 gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend,
154 struct nvkm_object *object)
156 struct gf100_fifo *fifo = (void *)parent->engine;
157 struct gf100_fifo_base *base = (void *)parent->parent;
158 struct gf100_fifo_chan *chan = (void *)parent;
159 struct nvkm_gpuobj *engn = &base->base.gpuobj;
160 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
161 struct nvkm_device *device = subdev->device;
162 struct nvkm_bar *bar = device->bar;
165 switch (nv_engidx(object->engine)) {
166 case NVDEV_ENGINE_SW : return 0;
167 case NVDEV_ENGINE_GR : addr = 0x0210; break;
168 case NVDEV_ENGINE_CE0 : addr = 0x0230; break;
169 case NVDEV_ENGINE_CE1 : addr = 0x0240; break;
170 case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
171 case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
172 case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
177 nvkm_wr32(device, 0x002634, chan->base.chid);
178 if (nvkm_msec(device, 2000,
179 if (nvkm_rd32(device, 0x002634) == chan->base.chid)
182 nvkm_error(subdev, "channel %d [%s] kick timeout\n",
183 chan->base.chid, nvkm_client_name(chan));
189 nvkm_wo32(engn, addr + 0x00, 0x00000000);
190 nvkm_wo32(engn, addr + 0x04, 0x00000000);
197 gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
198 struct nvkm_oclass *oclass, void *data, u32 size,
199 struct nvkm_object **pobject)
202 struct fermi_channel_gpfifo_v0 v0;
204 struct nvkm_bar *bar = nvkm_bar(parent);
205 struct gf100_fifo *fifo = (void *)engine;
206 struct gf100_fifo_base *base = (void *)parent;
207 struct gf100_fifo_chan *chan;
208 struct nvkm_gpuobj *ramfc = &base->base.gpuobj;
209 u64 usermem, ioffset, ilength;
212 nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
213 if (nvif_unpack(args->v0, 0, 0, false)) {
214 nvif_ioctl(parent, "create channel gpfifo vers %d "
215 "ioffset %016llx ilength %08x\n",
216 args->v0.version, args->v0.ioffset,
223 ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
224 fifo->user.bar.offset, 0x1000, 0,
225 (1ULL << NVDEV_ENGINE_SW) |
226 (1ULL << NVDEV_ENGINE_GR) |
227 (1ULL << NVDEV_ENGINE_CE0) |
228 (1ULL << NVDEV_ENGINE_CE1) |
229 (1ULL << NVDEV_ENGINE_MSVLD) |
230 (1ULL << NVDEV_ENGINE_MSPDEC) |
231 (1ULL << NVDEV_ENGINE_MSPPP), &chan);
232 *pobject = nv_object(chan);
236 args->v0.chid = chan->base.chid;
238 nv_parent(chan)->context_attach = gf100_fifo_context_attach;
239 nv_parent(chan)->context_detach = gf100_fifo_context_detach;
241 usermem = chan->base.chid * 0x1000;
242 ioffset = args->v0.ioffset;
243 ilength = order_base_2(args->v0.ilength / 8);
245 nvkm_kmap(fifo->user.mem);
246 for (i = 0; i < 0x1000; i += 4)
247 nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
248 nvkm_done(fifo->user.mem);
251 nvkm_wo32(ramfc, 0x08, lower_32_bits(fifo->user.mem->addr + usermem));
252 nvkm_wo32(ramfc, 0x0c, upper_32_bits(fifo->user.mem->addr + usermem));
253 nvkm_wo32(ramfc, 0x10, 0x0000face);
254 nvkm_wo32(ramfc, 0x30, 0xfffff902);
255 nvkm_wo32(ramfc, 0x48, lower_32_bits(ioffset));
256 nvkm_wo32(ramfc, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
257 nvkm_wo32(ramfc, 0x54, 0x00000002);
258 nvkm_wo32(ramfc, 0x84, 0x20400000);
259 nvkm_wo32(ramfc, 0x94, 0x30000001);
260 nvkm_wo32(ramfc, 0x9c, 0x00000100);
261 nvkm_wo32(ramfc, 0xa4, 0x1f1f1f1f);
262 nvkm_wo32(ramfc, 0xa8, 0x1f1f1f1f);
263 nvkm_wo32(ramfc, 0xac, 0x0000001f);
264 nvkm_wo32(ramfc, 0xb8, 0xf8000000);
265 nvkm_wo32(ramfc, 0xf8, 0x10003080); /* 0x002310 */
266 nvkm_wo32(ramfc, 0xfc, 0x10000010); /* 0x002350 */
273 gf100_fifo_chan_init(struct nvkm_object *object)
275 struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
276 struct gf100_fifo *fifo = (void *)object->engine;
277 struct gf100_fifo_chan *chan = (void *)object;
278 struct nvkm_device *device = fifo->base.engine.subdev.device;
279 u32 chid = chan->base.chid;
282 ret = nvkm_fifo_channel_init(&chan->base);
286 nvkm_wr32(device, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12);
288 if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
289 nvkm_wr32(device, 0x003004 + (chid * 8), 0x001f0001);
290 gf100_fifo_runlist_update(fifo);
296 static void gf100_fifo_intr_engine(struct gf100_fifo *fifo);
299 gf100_fifo_chan_fini(struct nvkm_object *object, bool suspend)
301 struct gf100_fifo *fifo = (void *)object->engine;
302 struct gf100_fifo_chan *chan = (void *)object;
303 struct nvkm_device *device = fifo->base.engine.subdev.device;
304 u32 chid = chan->base.chid;
306 if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
307 nvkm_mask(device, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
308 gf100_fifo_runlist_update(fifo);
311 gf100_fifo_intr_engine(fifo);
313 nvkm_wr32(device, 0x003000 + (chid * 8), 0x00000000);
314 return nvkm_fifo_channel_fini(&chan->base, suspend);
317 static struct nvkm_ofuncs
318 gf100_fifo_ofuncs = {
319 .ctor = gf100_fifo_chan_ctor,
320 .dtor = _nvkm_fifo_channel_dtor,
321 .init = gf100_fifo_chan_init,
322 .fini = gf100_fifo_chan_fini,
323 .map = _nvkm_fifo_channel_map,
324 .rd32 = _nvkm_fifo_channel_rd32,
325 .wr32 = _nvkm_fifo_channel_wr32,
326 .ntfy = _nvkm_fifo_channel_ntfy
329 static struct nvkm_oclass
330 gf100_fifo_sclass[] = {
331 { FERMI_CHANNEL_GPFIFO, &gf100_fifo_ofuncs },
335 /*******************************************************************************
336 * FIFO context - instmem heap and vm setup
337 ******************************************************************************/
340 gf100_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
341 struct nvkm_oclass *oclass, void *data, u32 size,
342 struct nvkm_object **pobject)
344 struct gf100_fifo_base *base;
347 ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
348 0x1000, NVOBJ_FLAG_ZERO_ALLOC |
349 NVOBJ_FLAG_HEAP, &base);
350 *pobject = nv_object(base);
354 ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
359 nvkm_kmap(&base->base.gpuobj);
360 nvkm_wo32(&base->base.gpuobj, 0x0200, lower_32_bits(base->pgd->addr));
361 nvkm_wo32(&base->base.gpuobj, 0x0204, upper_32_bits(base->pgd->addr));
362 nvkm_wo32(&base->base.gpuobj, 0x0208, 0xffffffff);
363 nvkm_wo32(&base->base.gpuobj, 0x020c, 0x000000ff);
364 nvkm_done(&base->base.gpuobj);
366 ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
374 gf100_fifo_context_dtor(struct nvkm_object *object)
376 struct gf100_fifo_base *base = (void *)object;
377 nvkm_vm_ref(NULL, &base->vm, base->pgd);
378 nvkm_gpuobj_ref(NULL, &base->pgd);
379 nvkm_fifo_context_destroy(&base->base);
382 static struct nvkm_oclass
383 gf100_fifo_cclass = {
384 .handle = NV_ENGCTX(FIFO, 0xc0),
385 .ofuncs = &(struct nvkm_ofuncs) {
386 .ctor = gf100_fifo_context_ctor,
387 .dtor = gf100_fifo_context_dtor,
388 .init = _nvkm_fifo_context_init,
389 .fini = _nvkm_fifo_context_fini,
390 .rd32 = _nvkm_fifo_context_rd32,
391 .wr32 = _nvkm_fifo_context_wr32,
395 /*******************************************************************************
397 ******************************************************************************/
400 gf100_fifo_engidx(struct gf100_fifo *fifo, u32 engn)
403 case NVDEV_ENGINE_GR : engn = 0; break;
404 case NVDEV_ENGINE_MSVLD : engn = 1; break;
405 case NVDEV_ENGINE_MSPPP : engn = 2; break;
406 case NVDEV_ENGINE_MSPDEC: engn = 3; break;
407 case NVDEV_ENGINE_CE0 : engn = 4; break;
408 case NVDEV_ENGINE_CE1 : engn = 5; break;
416 static inline struct nvkm_engine *
417 gf100_fifo_engine(struct gf100_fifo *fifo, u32 engn)
420 case 0: engn = NVDEV_ENGINE_GR; break;
421 case 1: engn = NVDEV_ENGINE_MSVLD; break;
422 case 2: engn = NVDEV_ENGINE_MSPPP; break;
423 case 3: engn = NVDEV_ENGINE_MSPDEC; break;
424 case 4: engn = NVDEV_ENGINE_CE0; break;
425 case 5: engn = NVDEV_ENGINE_CE1; break;
430 return nvkm_engine(fifo, engn);
434 gf100_fifo_recover_work(struct work_struct *work)
436 struct gf100_fifo *fifo = container_of(work, typeof(*fifo), fault);
437 struct nvkm_device *device = fifo->base.engine.subdev.device;
438 struct nvkm_object *engine;
443 spin_lock_irqsave(&fifo->base.lock, flags);
446 spin_unlock_irqrestore(&fifo->base.lock, flags);
448 for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
449 engm |= 1 << gf100_fifo_engidx(fifo, engn);
450 nvkm_mask(device, 0x002630, engm, engm);
452 for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
453 if ((engine = (void *)nvkm_engine(fifo, engn))) {
454 nv_ofuncs(engine)->fini(engine, false);
455 WARN_ON(nv_ofuncs(engine)->init(engine));
459 gf100_fifo_runlist_update(fifo);
460 nvkm_wr32(device, 0x00262c, engm);
461 nvkm_mask(device, 0x002630, engm, 0x00000000);
465 gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
466 struct gf100_fifo_chan *chan)
468 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
469 struct nvkm_device *device = subdev->device;
470 u32 chid = chan->base.chid;
473 nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
474 engine->subdev.name, chid);
476 nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
477 chan->state = KILLED;
479 spin_lock_irqsave(&fifo->base.lock, flags);
480 fifo->mask |= 1ULL << nv_engidx(engine);
481 spin_unlock_irqrestore(&fifo->base.lock, flags);
482 schedule_work(&fifo->fault);
486 gf100_fifo_swmthd(struct gf100_fifo *fifo, u32 chid, u32 mthd, u32 data)
488 struct gf100_fifo_chan *chan = NULL;
489 struct nvkm_handle *bind;
493 spin_lock_irqsave(&fifo->base.lock, flags);
494 if (likely(chid >= fifo->base.min && chid <= fifo->base.max))
495 chan = (void *)fifo->base.channel[chid];
499 bind = nvkm_namedb_get_class(nv_namedb(chan), NVIF_IOCTL_NEW_V0_SW_GF100);
501 if (!mthd || !nv_call(bind->object, mthd, data))
503 nvkm_namedb_put(bind);
507 spin_unlock_irqrestore(&fifo->base.lock, flags);
511 static const struct nvkm_enum
512 gf100_fifo_sched_reason[] = {
513 { 0x0a, "CTXSW_TIMEOUT" },
518 gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
520 struct nvkm_device *device = fifo->base.engine.subdev.device;
521 struct nvkm_engine *engine;
522 struct gf100_fifo_chan *chan;
525 for (engn = 0; engn < 6; engn++) {
526 u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
527 u32 busy = (stat & 0x80000000);
528 u32 save = (stat & 0x00100000); /* maybe? */
529 u32 unk0 = (stat & 0x00040000);
530 u32 unk1 = (stat & 0x00001000);
531 u32 chid = (stat & 0x0000007f);
534 if (busy && unk0 && unk1) {
535 if (!(chan = (void *)fifo->base.channel[chid]))
537 if (!(engine = gf100_fifo_engine(fifo, engn)))
539 gf100_fifo_recover(fifo, engine, chan);
545 gf100_fifo_intr_sched(struct gf100_fifo *fifo)
547 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
548 struct nvkm_device *device = subdev->device;
549 u32 intr = nvkm_rd32(device, 0x00254c);
550 u32 code = intr & 0x000000ff;
551 const struct nvkm_enum *en;
553 en = nvkm_enum_find(gf100_fifo_sched_reason, code);
555 nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
559 gf100_fifo_intr_sched_ctxsw(fifo);
566 static const struct nvkm_enum
567 gf100_fifo_fault_engine[] = {
568 { 0x00, "PGRAPH", NULL, NVDEV_ENGINE_GR },
569 { 0x03, "PEEPHOLE", NULL, NVDEV_ENGINE_IFB },
570 { 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
571 { 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
572 { 0x07, "PFIFO", NULL, NVDEV_ENGINE_FIFO },
573 { 0x10, "PMSVLD", NULL, NVDEV_ENGINE_MSVLD },
574 { 0x11, "PMSPPP", NULL, NVDEV_ENGINE_MSPPP },
575 { 0x13, "PCOUNTER" },
576 { 0x14, "PMSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
577 { 0x15, "PCE0", NULL, NVDEV_ENGINE_CE0 },
578 { 0x16, "PCE1", NULL, NVDEV_ENGINE_CE1 },
583 static const struct nvkm_enum
584 gf100_fifo_fault_reason[] = {
585 { 0x00, "PT_NOT_PRESENT" },
586 { 0x01, "PT_TOO_SHORT" },
587 { 0x02, "PAGE_NOT_PRESENT" },
588 { 0x03, "VM_LIMIT_EXCEEDED" },
589 { 0x04, "NO_CHANNEL" },
590 { 0x05, "PAGE_SYSTEM_ONLY" },
591 { 0x06, "PAGE_READ_ONLY" },
592 { 0x0a, "COMPRESSED_SYSRAM" },
593 { 0x0c, "INVALID_STORAGE_TYPE" },
597 static const struct nvkm_enum
598 gf100_fifo_fault_hubclient[] = {
601 { 0x04, "DISPATCH" },
604 { 0x07, "BAR_READ" },
605 { 0x08, "BAR_WRITE" },
609 { 0x11, "PCOUNTER" },
612 { 0x15, "CCACHE_POST" },
616 static const struct nvkm_enum
617 gf100_fifo_fault_gpcclient[] = {
626 gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
628 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
629 struct nvkm_device *device = subdev->device;
630 u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
631 u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
632 u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
633 u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
634 u32 gpc = (stat & 0x1f000000) >> 24;
635 u32 client = (stat & 0x00001f00) >> 8;
636 u32 write = (stat & 0x00000080);
637 u32 hub = (stat & 0x00000040);
638 u32 reason = (stat & 0x0000000f);
639 struct nvkm_object *engctx = NULL, *object;
640 struct nvkm_engine *engine = NULL;
641 const struct nvkm_enum *er, *eu, *ec;
644 er = nvkm_enum_find(gf100_fifo_fault_reason, reason);
645 eu = nvkm_enum_find(gf100_fifo_fault_engine, unit);
647 ec = nvkm_enum_find(gf100_fifo_fault_hubclient, client);
649 ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, client);
650 snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
655 case NVDEV_SUBDEV_BAR:
656 nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
658 case NVDEV_SUBDEV_INSTMEM:
659 nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
661 case NVDEV_ENGINE_IFB:
662 nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
665 engine = nvkm_engine(fifo, eu->data2);
667 engctx = nvkm_engctx_get(engine, inst);
673 "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
674 "reason %02x [%s] on channel %d [%010llx %s]\n",
675 write ? "write" : "read", (u64)vahi << 32 | valo,
676 unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
677 reason, er ? er->name : "", -1, (u64)inst << 12,
678 nvkm_client_name(engctx));
682 switch (nv_mclass(object)) {
683 case FERMI_CHANNEL_GPFIFO:
684 gf100_fifo_recover(fifo, engine, (void *)object);
687 object = object->parent;
690 nvkm_engctx_put(engctx);
693 static const struct nvkm_bitfield
694 gf100_fifo_pbdma_intr[] = {
695 /* { 0x00008000, "" } seen with null ib push */
696 { 0x00200000, "ILLEGAL_MTHD" },
697 { 0x00800000, "EMPTY_SUBC" },
702 gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
704 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
705 struct nvkm_device *device = subdev->device;
706 u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000));
707 u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
708 u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
709 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0x7f;
710 u32 subc = (addr & 0x00070000) >> 16;
711 u32 mthd = (addr & 0x00003ffc);
715 if (stat & 0x00800000) {
716 if (!gf100_fifo_swmthd(fifo, chid, mthd, data))
721 nvkm_snprintbf(msg, sizeof(msg), gf100_fifo_pbdma_intr, show);
722 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%s] subc %d "
723 "mthd %04x data %08x\n",
724 unit, show, msg, chid,
725 nvkm_client_name_for_fifo_chid(&fifo->base, chid),
729 nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
730 nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
734 gf100_fifo_intr_runlist(struct gf100_fifo *fifo)
736 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
737 struct nvkm_device *device = subdev->device;
738 u32 intr = nvkm_rd32(device, 0x002a00);
740 if (intr & 0x10000000) {
741 wake_up(&fifo->runlist.wait);
742 nvkm_wr32(device, 0x002a00, 0x10000000);
747 nvkm_error(subdev, "RUNLIST %08x\n", intr);
748 nvkm_wr32(device, 0x002a00, intr);
753 gf100_fifo_intr_engine_unit(struct gf100_fifo *fifo, int engn)
755 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
756 struct nvkm_device *device = subdev->device;
757 u32 intr = nvkm_rd32(device, 0x0025a8 + (engn * 0x04));
758 u32 inte = nvkm_rd32(device, 0x002628);
761 nvkm_wr32(device, 0x0025a8 + (engn * 0x04), intr);
763 for (unkn = 0; unkn < 8; unkn++) {
764 u32 ints = (intr >> (unkn * 0x04)) & inte;
766 nvkm_fifo_uevent(&fifo->base);
770 nvkm_error(subdev, "ENGINE %d %d %01x",
772 nvkm_mask(device, 0x002628, ints, 0);
778 gf100_fifo_intr_engine(struct gf100_fifo *fifo)
780 struct nvkm_device *device = fifo->base.engine.subdev.device;
781 u32 mask = nvkm_rd32(device, 0x0025a4);
783 u32 unit = __ffs(mask);
784 gf100_fifo_intr_engine_unit(fifo, unit);
785 mask &= ~(1 << unit);
790 gf100_fifo_intr(struct nvkm_subdev *subdev)
792 struct gf100_fifo *fifo = (void *)subdev;
793 struct nvkm_device *device = fifo->base.engine.subdev.device;
794 u32 mask = nvkm_rd32(device, 0x002140);
795 u32 stat = nvkm_rd32(device, 0x002100) & mask;
797 if (stat & 0x00000001) {
798 u32 intr = nvkm_rd32(device, 0x00252c);
799 nvkm_warn(subdev, "INTR 00000001: %08x\n", intr);
800 nvkm_wr32(device, 0x002100, 0x00000001);
804 if (stat & 0x00000100) {
805 gf100_fifo_intr_sched(fifo);
806 nvkm_wr32(device, 0x002100, 0x00000100);
810 if (stat & 0x00010000) {
811 u32 intr = nvkm_rd32(device, 0x00256c);
812 nvkm_warn(subdev, "INTR 00010000: %08x\n", intr);
813 nvkm_wr32(device, 0x002100, 0x00010000);
817 if (stat & 0x01000000) {
818 u32 intr = nvkm_rd32(device, 0x00258c);
819 nvkm_warn(subdev, "INTR 01000000: %08x\n", intr);
820 nvkm_wr32(device, 0x002100, 0x01000000);
824 if (stat & 0x10000000) {
825 u32 mask = nvkm_rd32(device, 0x00259c);
827 u32 unit = __ffs(mask);
828 gf100_fifo_intr_fault(fifo, unit);
829 nvkm_wr32(device, 0x00259c, (1 << unit));
830 mask &= ~(1 << unit);
835 if (stat & 0x20000000) {
836 u32 mask = nvkm_rd32(device, 0x0025a0);
838 u32 unit = __ffs(mask);
839 gf100_fifo_intr_pbdma(fifo, unit);
840 nvkm_wr32(device, 0x0025a0, (1 << unit));
841 mask &= ~(1 << unit);
846 if (stat & 0x40000000) {
847 gf100_fifo_intr_runlist(fifo);
851 if (stat & 0x80000000) {
852 gf100_fifo_intr_engine(fifo);
857 nvkm_error(subdev, "INTR %08x\n", stat);
858 nvkm_mask(device, 0x002140, stat, 0x00000000);
859 nvkm_wr32(device, 0x002100, stat);
864 gf100_fifo_uevent_init(struct nvkm_event *event, int type, int index)
866 struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
867 struct nvkm_device *device = fifo->engine.subdev.device;
868 nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
872 gf100_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
874 struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
875 struct nvkm_device *device = fifo->engine.subdev.device;
876 nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
879 static const struct nvkm_event_func
880 gf100_fifo_uevent_func = {
881 .ctor = nvkm_fifo_uevent_ctor,
882 .init = gf100_fifo_uevent_init,
883 .fini = gf100_fifo_uevent_fini,
887 gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
888 struct nvkm_oclass *oclass, void *data, u32 size,
889 struct nvkm_object **pobject)
891 struct gf100_fifo *fifo;
894 ret = nvkm_fifo_create(parent, engine, oclass, 0, 127, &fifo);
895 *pobject = nv_object(fifo);
899 INIT_WORK(&fifo->fault, gf100_fifo_recover_work);
901 ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x1000, 0x1000, 0,
902 &fifo->runlist.mem[0]);
906 ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x1000, 0x1000, 0,
907 &fifo->runlist.mem[1]);
911 init_waitqueue_head(&fifo->runlist.wait);
913 ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 128 * 0x1000, 0x1000, 0,
918 ret = nvkm_gpuobj_map(fifo->user.mem, NV_MEM_ACCESS_RW,
923 ret = nvkm_event_init(&gf100_fifo_uevent_func, 1, 1, &fifo->base.uevent);
927 nv_subdev(fifo)->unit = 0x00000100;
928 nv_subdev(fifo)->intr = gf100_fifo_intr;
929 nv_engine(fifo)->cclass = &gf100_fifo_cclass;
930 nv_engine(fifo)->sclass = gf100_fifo_sclass;
935 gf100_fifo_dtor(struct nvkm_object *object)
937 struct gf100_fifo *fifo = (void *)object;
939 nvkm_gpuobj_unmap(&fifo->user.bar);
940 nvkm_gpuobj_ref(NULL, &fifo->user.mem);
941 nvkm_gpuobj_ref(NULL, &fifo->runlist.mem[0]);
942 nvkm_gpuobj_ref(NULL, &fifo->runlist.mem[1]);
944 nvkm_fifo_destroy(&fifo->base);
948 gf100_fifo_init(struct nvkm_object *object)
950 struct gf100_fifo *fifo = (void *)object;
951 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
952 struct nvkm_device *device = subdev->device;
955 ret = nvkm_fifo_init(&fifo->base);
959 nvkm_wr32(device, 0x000204, 0xffffffff);
960 nvkm_wr32(device, 0x002204, 0xffffffff);
962 fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x002204));
963 nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
965 /* assign engines to PBDMAs */
966 if (fifo->spoon_nr >= 3) {
967 nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
968 nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
969 nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
970 nvkm_wr32(device, 0x002214, ~(1 << 1)); /* PMSVLD */
971 nvkm_wr32(device, 0x002218, ~(1 << 2)); /* PCE0 */
972 nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */
976 for (i = 0; i < fifo->spoon_nr; i++) {
977 nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
978 nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
979 nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
982 nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
983 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
985 nvkm_wr32(device, 0x002100, 0xffffffff);
986 nvkm_wr32(device, 0x002140, 0x7fffffff);
987 nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
992 gf100_fifo_oclass = &(struct nvkm_oclass) {
993 .handle = NV_ENGINE(FIFO, 0xc0),
994 .ofuncs = &(struct nvkm_ofuncs) {
995 .ctor = gf100_fifo_ctor,
996 .dtor = gf100_fifo_dtor,
997 .init = gf100_fifo_init,
998 .fini = _nvkm_fifo_fini,