From c4584adc37720b65ae44a84c660d47b3ebcf7dfb Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 20 Aug 2015 14:54:11 +1000 Subject: drm/nouveau/gr: switch to new-style timer macros Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c | 23 ++++++--- drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c | 61 ++++++++++++++--------- drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c | 12 ++++- drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c | 5 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c | 15 ++++-- drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c | 5 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c | 6 ++- 7 files changed, 87 insertions(+), 40 deletions(-) (limited to 'drivers/gpu/drm/nouveau/nvkm/engine/gr') diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c index 43b393f4cd4d..ccce293191a4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c @@ -1312,7 +1312,10 @@ gf100_grctx_generate(struct gf100_gr *gr) nvkm_wr32(device, 0x100cb8, (chan->addr + 0x1000) >> 8); nvkm_wr32(device, 0x100cbc, 0x80000001); - nv_wait(gr, 0x100c80, 0x00008000, 0x00008000); + nvkm_msec(device, 2000, + if (nvkm_rd32(device, 0x100c80) & 0x00008000) + break; + ); /* setup default state for mmio list construction */ info.gr = gr; @@ -1326,8 +1329,10 @@ gf100_grctx_generate(struct gf100_gr *gr) nvkm_wr32(device, 0x409840, 0x00000030); nvkm_wr32(device, 0x409500, 0x80000000 | chan->addr >> 12); nvkm_wr32(device, 0x409504, 0x00000003); - if (!nv_wait(gr, 0x409800, 0x00000010, 0x00000010)) - nv_error(gr, "load_ctx timeout\n"); + nvkm_msec(device, 2000, + if (nvkm_rd32(device, 0x409800) & 0x00000010) + break; + ); nv_wo32(chan, 0x8001c, 1); nv_wo32(chan, 0x80020, 0); @@ -1338,8 +1343,10 @@ gf100_grctx_generate(struct gf100_gr *gr) nvkm_wr32(device, 0x409840, 0x80000000); nvkm_wr32(device, 0x409500, 0x80000000 | chan->addr >> 12); nvkm_wr32(device, 0x409504, 0x00000001); - if (!nv_wait(gr, 0x409800, 0x80000000, 0x80000000)) - nv_error(gr, "HUB_SET_CHAN timeout\n"); + nvkm_msec(device, 2000, + if (nvkm_rd32(device, 0x409800) & 0x80000000) + break; + ); } oclass->main(gr, &info); @@ -1349,8 +1356,10 @@ gf100_grctx_generate(struct gf100_gr *gr) */ nvkm_mask(device, 0x409b04, 0x80000000, 0x00000000); nvkm_wr32(device, 0x409000, 0x00000100); - if (!nv_wait(gr, 0x409b00, 0x80000000, 0x00000000)) { - nv_error(gr, "grctx template channel unload timeout\n"); + if (nvkm_msec(device, 2000, + if (!(nvkm_rd32(device, 0x409b00) & 0x80000000)) + break; + ) < 0) { ret = -EBUSY; goto done; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index b692e8e2b982..4bfec3aa0dee 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c @@ -741,7 +741,10 @@ gf100_gr_icmd(struct gf100_gr *gr, const struct gf100_gr_pack *p) */ if ((addr & 0xffff) == 0xe100) gf100_gr_wait_idle(gr); - nv_wait(gr, 0x400700, 0x00000004, 0x00000000); + nvkm_msec(device, 2000, + if (!(nvkm_rd32(device, 0x400700) & 0x00000004)) + break; + ); addr += init->pitch; } } @@ -1312,8 +1315,11 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr) nvkm_wr32(device, 0x40910c, 0x00000000); nvkm_wr32(device, 0x41a100, 0x00000002); nvkm_wr32(device, 0x409100, 0x00000002); - if (!nv_wait(gr, 0x409800, 0x00000001, 0x00000001)) - nv_warn(gr, "0x409800 wait failed\n"); + if (nvkm_msec(device, 2000, + if (nvkm_rd32(device, 0x409800) & 0x00000001) + break; + ) < 0) + return -EBUSY; nvkm_wr32(device, 0x409840, 0xffffffff); nvkm_wr32(device, 0x409500, 0x7fffffff); @@ -1322,54 +1328,59 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr) nvkm_wr32(device, 0x409840, 0xffffffff); nvkm_wr32(device, 0x409500, 0x00000000); nvkm_wr32(device, 0x409504, 0x00000010); - if (!nv_wait_ne(gr, 0x409800, 0xffffffff, 0x00000000)) { - nv_error(gr, "fuc09 req 0x10 timeout\n"); + if (nvkm_msec(device, 2000, + if ((gr->size = nvkm_rd32(device, 0x409800))) + break; + ) < 0) return -EBUSY; - } - gr->size = nvkm_rd32(device, 0x409800); nvkm_wr32(device, 0x409840, 0xffffffff); nvkm_wr32(device, 0x409500, 0x00000000); nvkm_wr32(device, 0x409504, 0x00000016); - if (!nv_wait_ne(gr, 0x409800, 0xffffffff, 0x00000000)) { - nv_error(gr, "fuc09 req 0x16 timeout\n"); + if (nvkm_msec(device, 2000, + if (nvkm_rd32(device, 0x409800)) + break; + ) < 0) return -EBUSY; - } nvkm_wr32(device, 0x409840, 0xffffffff); nvkm_wr32(device, 0x409500, 0x00000000); nvkm_wr32(device, 0x409504, 0x00000025); - if (!nv_wait_ne(gr, 0x409800, 0xffffffff, 0x00000000)) { - nv_error(gr, "fuc09 req 0x25 timeout\n"); + if (nvkm_msec(device, 2000, + if (nvkm_rd32(device, 0x409800)) + break; + ) < 0) return -EBUSY; - } if (nv_device(gr)->chipset >= 0xe0) { nvkm_wr32(device, 0x409800, 0x00000000); nvkm_wr32(device, 0x409500, 0x00000001); nvkm_wr32(device, 0x409504, 0x00000030); - if (!nv_wait_ne(gr, 0x409800, 0xffffffff, 0x00000000)) { - nv_error(gr, "fuc09 req 0x30 timeout\n"); + if (nvkm_msec(device, 2000, + if (nvkm_rd32(device, 0x409800)) + break; + ) < 0) return -EBUSY; - } nvkm_wr32(device, 0x409810, 0xb00095c8); nvkm_wr32(device, 0x409800, 0x00000000); nvkm_wr32(device, 0x409500, 0x00000001); nvkm_wr32(device, 0x409504, 0x00000031); - if (!nv_wait_ne(gr, 0x409800, 0xffffffff, 0x00000000)) { - nv_error(gr, "fuc09 req 0x31 timeout\n"); + if (nvkm_msec(device, 2000, + if (nvkm_rd32(device, 0x409800)) + break; + ) < 0) return -EBUSY; - } nvkm_wr32(device, 0x409810, 0x00080420); nvkm_wr32(device, 0x409800, 0x00000000); nvkm_wr32(device, 0x409500, 0x00000001); nvkm_wr32(device, 0x409504, 0x00000032); - if (!nv_wait_ne(gr, 0x409800, 0xffffffff, 0x00000000)) { - nv_error(gr, "fuc09 req 0x32 timeout\n"); + if (nvkm_msec(device, 2000, + if (nvkm_rd32(device, 0x409800)) + break; + ) < 0) return -EBUSY; - } nvkm_wr32(device, 0x409614, 0x00000070); nvkm_wr32(device, 0x409614, 0x00000770); @@ -1425,8 +1436,10 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr) /* start HUB ucode running, it'll init the GPCs */ nvkm_wr32(device, 0x40910c, 0x00000000); nvkm_wr32(device, 0x409100, 0x00000002); - if (!nv_wait(gr, 0x409800, 0x80000000, 0x80000000)) { - nv_error(gr, "HUB_INIT timed out\n"); + if (nvkm_msec(device, 2000, + if (nvkm_rd32(device, 0x409800) & 0x80000000) + break; + ) < 0) { gf100_gr_ctxctl_debug(gr); return -EBUSY; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c index 12b34c7a1477..ed944aa6b81b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c @@ -220,12 +220,20 @@ gk20a_gr_dtor(struct nvkm_object *object) static int gk20a_gr_wait_mem_scrubbing(struct gf100_gr *gr) { - if (!nv_wait(gr, 0x40910c, 0x6, 0x0)) { + struct nvkm_device *device = gr->base.engine.subdev.device; + + if (nvkm_msec(device, 2000, + if (!(nvkm_rd32(device, 0x40910c) & 0x00000006)) + break; + ) < 0) { nv_error(gr, "FECS mem scrubbing timeout\n"); return -ETIMEDOUT; } - if (!nv_wait(gr, 0x41a10c, 0x6, 0x0)) { + if (nvkm_msec(device, 2000, + if (!(nvkm_rd32(device, 0x41a10c) & 0x00000006)) + break; + ) < 0) { nv_error(gr, "GPCCS mem scrubbing timeout\n"); return -ETIMEDOUT; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c index d1792ef62712..33f3b8219213 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c @@ -1206,7 +1206,10 @@ nv04_gr_idle(void *obj) if (nv_device(obj)->card_type == NV_40) mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL; - if (!nv_wait(gr, NV04_PGRAPH_STATUS, mask, 0)) { + if (nvkm_msec(device, 2000, + if (!(nvkm_rd32(device, NV04_PGRAPH_STATUS) & mask)) + break; + ) < 0) { nv_error(gr, "idle timed out with status 0x%08x\n", nvkm_rd32(device, NV04_PGRAPH_STATUS)); return false; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c index 8e264f79c0df..21aff9b5ed40 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c @@ -127,7 +127,10 @@ nv20_gr_context_fini(struct nvkm_object *object, bool suspend) if (chan->chid == chid) { nvkm_wr32(device, 0x400784, nv_gpuobj(chan)->addr >> 4); nvkm_wr32(device, 0x400788, 0x00000002); - nv_wait(gr, 0x400700, 0xffffffff, 0x00000000); + nvkm_msec(device, 2000, + if (!nvkm_rd32(device, 0x400700)) + break; + ); nvkm_wr32(device, 0x400144, 0x10000000); nvkm_mask(device, 0x400148, 0xff000000, 0x1f000000); } @@ -289,12 +292,18 @@ nv20_gr_init(struct nvkm_object *object) nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x003d0000); for (i = 0; i < 15; i++) nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, 0x00000000); - nv_wait(gr, 0x400700, 0xffffffff, 0x00000000); + nvkm_msec(device, 2000, + if (!nvkm_rd32(device, 0x400700)) + break; + ); } else { nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x02c80000); for (i = 0; i < 32; i++) nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, 0x00000000); - nv_wait(gr, 0x400700, 0xffffffff, 0x00000000); + nvkm_msec(device, 2000, + if (!nvkm_rd32(device, 0x400700)) + break; + ); } nvkm_wr32(device, NV03_PGRAPH_INTR , 0xFFFFFFFF); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c index edcaa65b1e09..7455049ff178 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c @@ -167,7 +167,10 @@ nv40_gr_context_fini(struct nvkm_object *object, bool suspend) nvkm_wr32(device, 0x400784, inst); nvkm_mask(device, 0x400310, 0x00000020, 0x00000020); nvkm_mask(device, 0x400304, 0x00000001, 0x00000001); - if (!nv_wait(gr, 0x400300, 0x00000001, 0x00000000)) { + if (nvkm_msec(device, 2000, + if (!(nvkm_rd32(device, 0x400300) & 0x00000001)) + break; + ) < 0) { u32 insn = nvkm_rd32(device, 0x400308); nv_warn(gr, "ctxprog timeout 0x%08x\n", insn); ret = -EBUSY; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c index ade34d8a4ea0..3b482bcc22d0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c @@ -285,8 +285,10 @@ g84_gr_tlb_flush(struct nvkm_engine *engine) nvkm_wr32(device, 0x100c80, 0x00000001); - if (!nv_wait(gr, 0x100c80, 0x00000001, 0x00000000)) - nv_error(gr, "vm flush timeout\n"); + nvkm_msec(device, 2000, + if (!(nvkm_rd32(device, 0x100c80) & 0x00000001)) + break; + ); nvkm_mask(device, 0x400500, 0x00000001, 0x00000001); spin_unlock_irqrestore(&gr->lock, flags); return timeout ? -EBUSY : 0; -- cgit v1.2.3