diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2015-08-20 14:54:14 +1000 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2015-08-28 12:40:28 +1000 |
commit | 5444e770e3991ddb5a9583d622fc18bbf414b551 (patch) | |
tree | 819da72512a32e8668a95f846c94d2a417cb4c19 /drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c | |
parent | 3f532ef1e23395d7abb0eed316dd31123f87f448 (diff) |
drm/nouveau/fifo: switch to gpuobj accessor macros
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c | 67 |
1 files changed, 41 insertions, 26 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c index ce12e4e12b22..9a1ecef24813 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c @@ -106,15 +106,17 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine) cur = engn->runlist[engn->cur_runlist]; engn->cur_runlist = !engn->cur_runlist; + nvkm_kmap(cur); for (i = 0, p = 0; i < fifo->base.max; i++) { struct gk104_fifo_chan *chan = (void *)fifo->base.channel[i]; if (chan && chan->state == RUNNING && chan->engine == engine) { - nv_wo32(cur, p + 0, i); - nv_wo32(cur, p + 4, 0x00000000); + nvkm_wo32(cur, p + 0, i); + nvkm_wo32(cur, p + 4, 0x00000000); p += 8; } } bar->flush(bar); + nvkm_done(cur); nvkm_wr32(device, 0x002270, cur->addr >> 12); nvkm_wr32(device, 0x002274, (engine << 20) | (p >> 3)); @@ -132,6 +134,7 @@ gk104_fifo_context_attach(struct nvkm_object *parent, { struct nvkm_bar *bar = nvkm_bar(parent); struct gk104_fifo_base *base = (void *)parent->parent; + struct nvkm_gpuobj *engn = &base->base.gpuobj; struct nvkm_engctx *ectx = (void *)object; u32 addr; int ret; @@ -161,9 +164,11 @@ gk104_fifo_context_attach(struct nvkm_object *parent, nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12; } - nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4); - nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset)); + nvkm_kmap(engn); + nvkm_wo32(engn, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4); + nvkm_wo32(engn, addr + 0x04, upper_32_bits(ectx->vma.offset)); bar->flush(bar); + nvkm_done(engn); return 0; } @@ -195,6 +200,7 @@ gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend, struct nvkm_bar *bar = nvkm_bar(parent); struct gk104_fifo_base *base = (void *)parent->parent; struct gk104_fifo_chan *chan = (void *)parent; + struct nvkm_gpuobj *engn = &base->base.gpuobj; u32 addr; int ret; @@ -216,9 +222,11 @@ gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend, return ret; if (addr) { - nv_wo32(base, addr + 0x00, 0x00000000); - nv_wo32(base, addr + 0x04, 0x00000000); + nvkm_kmap(engn); + nvkm_wo32(engn, addr + 0x00, 0x00000000); + nvkm_wo32(engn, addr + 0x04, 0x00000000); bar->flush(bar); + nvkm_done(engn); } return 0; @@ -237,6 +245,7 @@ gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct gk104_fifo_base *base = (void *)parent; struct gk104_fifo_chan *chan; struct nvkm_subdev *subdev = &fifo->base.engine.subdev; + struct nvkm_gpuobj *ramfc = &base->base.gpuobj; u64 usermem, ioffset, ilength; int ret, i; @@ -282,24 +291,28 @@ gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ioffset = args->v0.ioffset; ilength = order_base_2(args->v0.ilength / 8); + nvkm_kmap(fifo->user.mem); for (i = 0; i < 0x200; i += 4) - nv_wo32(fifo->user.mem, usermem + i, 0x00000000); - - nv_wo32(base, 0x08, lower_32_bits(fifo->user.mem->addr + usermem)); - nv_wo32(base, 0x0c, upper_32_bits(fifo->user.mem->addr + usermem)); - nv_wo32(base, 0x10, 0x0000face); - nv_wo32(base, 0x30, 0xfffff902); - nv_wo32(base, 0x48, lower_32_bits(ioffset)); - nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16)); - nv_wo32(base, 0x84, 0x20400000); - nv_wo32(base, 0x94, 0x30000001); - nv_wo32(base, 0x9c, 0x00000100); - nv_wo32(base, 0xac, 0x0000001f); - nv_wo32(base, 0xe8, chan->base.chid); - nv_wo32(base, 0xb8, 0xf8000000); - nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */ - nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */ + nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000); + nvkm_done(fifo->user.mem); + + nvkm_kmap(ramfc); + nvkm_wo32(ramfc, 0x08, lower_32_bits(fifo->user.mem->addr + usermem)); + nvkm_wo32(ramfc, 0x0c, upper_32_bits(fifo->user.mem->addr + usermem)); + nvkm_wo32(ramfc, 0x10, 0x0000face); + nvkm_wo32(ramfc, 0x30, 0xfffff902); + nvkm_wo32(ramfc, 0x48, lower_32_bits(ioffset)); + nvkm_wo32(ramfc, 0x4c, upper_32_bits(ioffset) | (ilength << 16)); + nvkm_wo32(ramfc, 0x84, 0x20400000); + nvkm_wo32(ramfc, 0x94, 0x30000001); + nvkm_wo32(ramfc, 0x9c, 0x00000100); + nvkm_wo32(ramfc, 0xac, 0x0000001f); + nvkm_wo32(ramfc, 0xe8, chan->base.chid); + nvkm_wo32(ramfc, 0xb8, 0xf8000000); + nvkm_wo32(ramfc, 0xf8, 0x10003080); /* 0x002310 */ + nvkm_wo32(ramfc, 0xfc, 0x10000010); /* 0x002350 */ bar->flush(bar); + nvkm_done(ramfc); return 0; } @@ -387,10 +400,12 @@ gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, if (ret) return ret; - nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr)); - nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr)); - nv_wo32(base, 0x0208, 0xffffffff); - nv_wo32(base, 0x020c, 0x000000ff); + nvkm_kmap(&base->base.gpuobj); + nvkm_wo32(&base->base.gpuobj, 0x0200, lower_32_bits(base->pgd->addr)); + nvkm_wo32(&base->base.gpuobj, 0x0204, upper_32_bits(base->pgd->addr)); + nvkm_wo32(&base->base.gpuobj, 0x0208, 0xffffffff); + nvkm_wo32(&base->base.gpuobj, 0x020c, 0x000000ff); + nvkm_done(&base->base.gpuobj); ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd); if (ret) |