diff options
author | Vince Hsu <vince.h@nvidia.com> | 2016-06-28 15:04:51 +0800 |
---|---|---|
committer | Alexander Potapenko <glider@google.com> | 2016-09-22 10:54:26 +0200 |
commit | 7d33cd733aa9c0b079eedfc5edf0f7698fa05201 (patch) | |
tree | d983c94c3520640ed5cdf6b726d2d5e0e5c99ac2 | |
parent | 34097ba7521458d52599cefc79648aa12a520009 (diff) | |
download | tegra-7d33cd733aa9c0b079eedfc5edf0f7698fa05201.tar.gz |
CHROMIUM: drm/nouveau/bo: fix warning while enabling lock debugging
We hit the warning below when the lock debugging is enabled. The
nouveau_fence_sync holds the RCU read lock and then calls into mutex_lock.
To get rid of this warning, we don't hold the vma_list lock if that's not
necessary.
BUG: sleeping function called from invalid context at kernel/locking/mutex.c:97
in_atomic(): 0, irqs_disabled(): 0, pid: 7064, name: nouveau_pushbuf
Preemption disabled at:[<ffffffc000a962a8>] printk+0x6c/0x78
CPU: 3 PID: 7064 Comm: nouveau_pushbuf Tainted: G U 3.18.0-00019-gf7ed90de5956 #421
Hardware name: Google Tegra210 Smaug Rev 1,3+ (DT)
Call trace:
[<ffffffc0002073f8>] dump_backtrace+0x0/0x10c
[<ffffffc000207514>] show_stack+0x10/0x1c
[<ffffffc000a96f9c>] dump_stack+0x74/0xb8
[<ffffffc0002449ac>] __might_sleep+0x158/0x168
[<ffffffc000a9cc18>] mutex_lock+0x20/0x48
[<ffffffc0005f1f1c>] nouveau_bo_vma_find+0x24/0x80
[<ffffffc000603c6c>] nv50_dma_push_bo+0x3c/0xa0
[<ffffffc000606afc>] nvc0_fence_sync32+0xec/0x158
[<ffffffc000605f88>] nv84_fence_sync+0x40/0x4c
[<ffffffc000604f64>] nouveau_fence_sync+0xc8/0xfc
[<ffffffc0005f4644>] nouveau_gem_pushbuf_queue_kthread_fn+0x2b0/0x554
[<ffffffc000237790>] kthread+0xdc/0xe8
BUG=chrome-os-partner:54432,chromium:635609
TEST=Boot to UI
Change-Id: I65b693d0f115808486d744aa06bf7df1358cf737
Signed-off-by: Vince Hsu <vince.h@nvidia.com>
Reviewed-on: https://chromium-review.googlesource.com/356610
Commit-Ready: Alexander Potapenko <glider@chromium.org>
Reviewed-by: Tomasz Figa <tfiga@chromium.org>
Reviewed-by: Alexander Potapenko <glider@chromium.org>
-rw-r--r-- | drivers/gpu/drm/nouveau/dispnv04/crtc.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_bo.c | 57 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_bo.h | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_chan.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_gem.c | 23 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_prime.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv17_fence.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_display.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_fence.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv84_fence.c | 4 |
10 files changed, 66 insertions, 37 deletions
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c index 15b123e9aaa4..14df584c8350 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c +++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c @@ -1128,7 +1128,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num) drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, - 0, 0x0000, NULL, NULL, &nv_crtc->cursor.nvbo); + 0, 0x0000, NULL, NULL, &nv_crtc->cursor.nvbo, true); if (!ret) { ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, false); if (!ret) { diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 0585a86e484c..3758635204fd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -154,7 +154,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) * are still in the list at this point are really leaked and can be * deleted safely. */ - mutex_lock(&nvbo->vma_list_lock); + nouveau_bo_vma_list_lock(nvbo); list_for_each_entry(vma, &nvbo->vma_list, head) { DRM_INFO("Cleaning up leaked mapping offset 0x%llx\n", vma->offset); @@ -163,7 +163,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) nvkm_vm_put(vma); kfree(vma); } - mutex_unlock(&nvbo->vma_list_lock); + nouveau_bo_vma_list_unlock(nvbo); nv10_bo_put_tile_region(dev, nvbo->tile, NULL); kfree(nvbo); @@ -207,7 +207,7 @@ int nouveau_bo_new(struct drm_device *dev, int size, int align, uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, struct sg_table *sg, struct reservation_object *robj, - struct nouveau_bo **pnvbo) + struct nouveau_bo **pnvbo, bool unchanged_vma_list) { struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_bo *nvbo; @@ -236,6 +236,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, INIT_LIST_HEAD(&nvbo->entry); INIT_LIST_HEAD(&nvbo->vma_list); mutex_init(&nvbo->vma_list_lock); + nvbo->vma_immutable = unchanged_vma_list; nvbo->tile_mode = tile_mode; nvbo->tile_flags = tile_flags; nvbo->gpu_cacheable = !(flags & TTM_PL_FLAG_UNCACHED); @@ -601,6 +602,20 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, return 0; } +void +nouveau_bo_vma_list_lock(struct nouveau_bo *nvbo) +{ + if (!nvbo->vma_immutable) + mutex_lock(&nvbo->vma_list_lock); +} + +void +nouveau_bo_vma_list_unlock(struct nouveau_bo *nvbo) +{ + if (!nvbo->vma_immutable) + mutex_unlock(&nvbo->vma_list_lock); +} + static inline void * _nouveau_bo_mem_index(struct nouveau_bo *nvbo, unsigned index, void *mem, u8 sz) { @@ -1338,7 +1353,7 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) if (bo->destroy != nouveau_bo_del_ttm) return; - mutex_lock(&nvbo->vma_list_lock); + nouveau_bo_vma_list_lock(nvbo); list_for_each_entry(vma, &nvbo->vma_list, head) { if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM && @@ -1350,7 +1365,7 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) } } - mutex_unlock(&nvbo->vma_list_lock); + nouveau_bo_vma_list_unlock(nvbo); } static int @@ -1723,17 +1738,17 @@ nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nvkm_vm *vm) { struct nvkm_vma *vma; - mutex_lock(&nvbo->vma_list_lock); + nouveau_bo_vma_list_lock(nvbo); list_for_each_entry(vma, &nvbo->vma_list, head) { if (vma->implicit && (vma->vm == vm)) { - mutex_unlock(&nvbo->vma_list_lock); + nouveau_bo_vma_list_unlock(nvbo); return vma; } } - mutex_unlock(&nvbo->vma_list_lock); + nouveau_bo_vma_list_unlock(nvbo); return NULL; } @@ -1744,7 +1759,7 @@ nouveau_bo_subvma_find(struct nouveau_bo *nvbo, struct nvkm_vm *vm, u64 offset, { struct nvkm_vma *vma; - mutex_lock(&nvbo->vma_list_lock); + nouveau_bo_vma_list_lock(nvbo); /* * Look for an existing subvma that we can reuse. The delta and length @@ -1760,11 +1775,11 @@ nouveau_bo_subvma_find(struct nouveau_bo *nvbo, struct nvkm_vm *vm, u64 offset, (vma->length == length) && (!offset || (vma->offset == offset)) && !vma->unmap_pending) { - mutex_unlock(&nvbo->vma_list_lock); + nouveau_bo_vma_list_unlock(nvbo); return vma; } - mutex_unlock(&nvbo->vma_list_lock); + nouveau_bo_vma_list_unlock(nvbo); return NULL; } @@ -1775,18 +1790,18 @@ nouveau_bo_subvma_find_offset(struct nouveau_bo *nvbo, struct nvkm_vm *vm, { struct nvkm_vma *vma; - mutex_lock(&nvbo->vma_list_lock); + nouveau_bo_vma_list_lock(nvbo); list_for_each_entry(vma, &nvbo->vma_list, head) if (!vma->implicit && (vma->vm == vm) && (vma->offset == offset) && !vma->unmap_pending) { - mutex_unlock(&nvbo->vma_list_lock); + nouveau_bo_vma_list_unlock(nvbo); return vma; } - mutex_unlock(&nvbo->vma_list_lock); + nouveau_bo_vma_list_unlock(nvbo); return NULL; } @@ -1876,9 +1891,9 @@ nouveau_bo_vma_add_offset(struct nouveau_bo *nvbo, struct nvkm_vm *vm, nvkm_vm_map(vma, nvbo->bo.mem.mm_node); } - mutex_lock(&nvbo->vma_list_lock); + nouveau_bo_vma_list_lock(nvbo); list_add_tail(&vma->head, &nvbo->vma_list); - mutex_unlock(&nvbo->vma_list_lock); + nouveau_bo_vma_list_unlock(nvbo); vma->refcount = 1; vma->implicit = true; return 0; @@ -1898,9 +1913,9 @@ nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nvkm_vma *vma) if (vma->mapped && nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) nvkm_vm_unmap(vma); nvkm_vm_put(vma); - mutex_lock(&nvbo->vma_list_lock); + nouveau_bo_vma_list_lock(nvbo); list_del(&vma->head); - mutex_unlock(&nvbo->vma_list_lock); + nouveau_bo_vma_list_unlock(nvbo); } } @@ -1927,9 +1942,10 @@ nouveau_bo_subvma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm, nvkm_vm_map(vma, nvbo->bo.mem.mm_node); } - mutex_lock(&nvbo->vma_list_lock); + nouveau_bo_vma_list_lock(nvbo); + WARN_ON(nvbo->vma_immutable); list_add_tail(&vma->head, &nvbo->vma_list); - mutex_unlock(&nvbo->vma_list_lock); + nouveau_bo_vma_list_unlock(nvbo); vma->refcount = 1; return 0; } @@ -1937,5 +1953,6 @@ nouveau_bo_subvma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm, void nouveau_bo_subvma_del(struct nouveau_bo *nvbo, struct nvkm_vma *vma) { + WARN_ON(nvbo->vma_immutable); nouveau_bo_vma_del(nvbo, vma); } diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h index 8dc5096bc9b7..307c5a848e89 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.h +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h @@ -26,6 +26,7 @@ struct nouveau_bo { struct list_head vma_list; struct mutex vma_list_lock; + bool vma_immutable; unsigned page_shift; u32 tile_mode; @@ -74,7 +75,7 @@ void nouveau_bo_move_init(struct nouveau_drm *); int nouveau_bo_new(struct drm_device *, int size, int align, u32 flags, u32 tile_mode, u32 tile_flags, struct sg_table *sg, struct reservation_object *robj, - struct nouveau_bo **); + struct nouveau_bo **, bool unchanged_vma_list); int nouveau_bo_pin(struct nouveau_bo *, u32 flags, bool contig); int nouveau_bo_unpin(struct nouveau_bo *); int nouveau_bo_map(struct nouveau_bo *); @@ -114,6 +115,8 @@ int nouveau_bo_subvma_add(struct nouveau_bo *, struct nvkm_vm *, void nouveau_bo_subvma_del(struct nouveau_bo *, struct nvkm_vma *); void nouveau_defer_vm_map(struct nvkm_vma *vma, struct nouveau_bo *nvbo); void nouveau_cancel_defer_vm_map(struct nvkm_vma *vma, struct nouveau_bo *nvbo); +void nouveau_bo_vma_list_lock(struct nouveau_bo *); +void nouveau_bo_vma_list_unlock(struct nouveau_bo *); /* TODO: submit equivalent to TTM generic API upstream? */ static inline void __iomem * diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index d93438f69000..bdd49985a563 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -201,7 +201,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device, target = TTM_PL_FLAG_VRAM; ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL, NULL, - &chan->push.buffer); + &chan->push.buffer, true); if (ret == 0) { ret = nouveau_bo_pin(chan->push.buffer, target, false); if (ret == 0) diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 519b901b7d86..c3912823e960 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -264,9 +264,10 @@ static void gem_unmap_work(struct work_struct *__work) fobj = reservation_object_get_list(resv); - mutex_lock(&nvbo->vma_list_lock); + nouveau_bo_vma_list_lock(nvbo); + WARN_ON(nvbo->vma_immutable); list_del(&vma->head); - mutex_unlock(&nvbo->vma_list_lock); + nouveau_bo_vma_list_unlock(nvbo); if (fobj && fobj->shared_count > 1) ttm_bo_wait(&nvbo->bo, true, false, false); @@ -363,7 +364,7 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) } } - mutex_lock(&nvbo->vma_list_lock); + nouveau_bo_vma_list_lock(nvbo); list_for_each_entry(vma, &nvbo->vma_list, head) if (!vma->implicit && (vma->vm == cli->vm) && @@ -371,7 +372,7 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) nouveau_gem_object_unmap(nvbo, vma); drm_gem_object_unreference(&nvbo->gem); } - mutex_unlock(&nvbo->vma_list_lock); + nouveau_bo_vma_list_unlock(nvbo); ttm_bo_unreserve(&nvbo->bo); } @@ -458,7 +459,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, flags |= TTM_PL_FLAG_UNCACHED; ret = nouveau_bo_new(dev, size, align, flags, tile_mode, - tile_flags, NULL, NULL, pnvbo); + tile_flags, NULL, NULL, pnvbo, true); if (ret) return ret; nvbo = *pnvbo; @@ -526,7 +527,11 @@ static int nouveau_gem_remap(struct nouveau_drm *drm, struct nvkm_vm *vm, /* Unmap the old vma. */ nouveau_cancel_defer_vm_map(vma, nvbo); - nouveau_bo_vma_del(nvbo, vma); + /* + * Calling nouveau_bo_subvma_del instead of nouveau_bo_vma_del to + * reflect the fact that we're changing the bo's vma_list. + */ + nouveau_bo_subvma_del(nvbo, vma); /* * If this offset falls within an address space allocation, then honor @@ -1920,7 +1925,11 @@ success: vma = nouveau_bo_vma_find(nvbo, cli->vm); if (vma) { nouveau_cancel_defer_vm_map(vma, nvbo); - nouveau_bo_vma_del(nvbo, vma); + /* + * Calling nouveau_bo_subvma_del instead of nouveau_bo_vma_del to + * reflect the fact that we're changing the bo's vma_list. + */ + nouveau_bo_subvma_del(nvbo, vma); kfree(vma); } diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c index dd32ad6db53d..44d679a7f99e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_prime.c +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c @@ -69,7 +69,7 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, ww_mutex_lock(&robj->lock, NULL); ret = nouveau_bo_new(dev, attach->dmabuf->size, 0, flags, 0, 0, - sg, robj, &nvbo); + sg, robj, &nvbo, false); ww_mutex_unlock(&robj->lock); if (ret) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c index 57860cfa1de5..d6074c321c2e 100644 --- a/drivers/gpu/drm/nouveau/nv17_fence.c +++ b/drivers/gpu/drm/nouveau/nv17_fence.c @@ -129,7 +129,7 @@ nv17_fence_create(struct nouveau_drm *drm) spin_lock_init(&priv->lock); ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, - 0, 0x0000, NULL, NULL, &priv->bo); + 0, 0x0000, NULL, NULL, &priv->bo, true); if (!ret) { ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false); if (!ret) { diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index bff804189d27..ac42c1c3c668 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -1444,7 +1444,7 @@ nv50_crtc_create(struct drm_device *dev, int index) drm_mode_crtc_set_gamma_size(crtc, 256); ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM, - 0, 0x0000, NULL, NULL, &head->base.lut.nvbo); + 0, 0x0000, NULL, NULL, &head->base.lut.nvbo, true); if (!ret) { ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM, true); if (!ret) { @@ -2494,7 +2494,7 @@ nv50_display_create(struct drm_device *dev) /* small shared memory area we use for notifiers and semaphores */ ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, - 0, 0x0000, NULL, NULL, &disp->sync); + 0, 0x0000, NULL, NULL, &disp->sync, true); if (!ret) { ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true); if (!ret) { diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c index a82d9ea7c6fd..dc34b176f377 100644 --- a/drivers/gpu/drm/nouveau/nv50_fence.c +++ b/drivers/gpu/drm/nouveau/nv50_fence.c @@ -100,7 +100,7 @@ nv50_fence_create(struct nouveau_drm *drm) spin_lock_init(&priv->lock); ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, - 0, 0x0000, NULL, NULL, &priv->bo); + 0, 0x0000, NULL, NULL, &priv->bo, true); if (!ret) { ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false); if (!ret) { diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c index 3e5c131a14de..b15f3e6a7b2b 100644 --- a/drivers/gpu/drm/nouveau/nv84_fence.c +++ b/drivers/gpu/drm/nouveau/nv84_fence.c @@ -241,7 +241,7 @@ nv84_fence_create(struct nouveau_drm *drm) */ TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED; ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0, domain, 0, - 0, NULL, NULL, &priv->bo); + 0, NULL, NULL, &priv->bo, true); if (ret == 0) { ret = nouveau_bo_pin(priv->bo, domain, false); if (ret == 0) { @@ -256,7 +256,7 @@ nv84_fence_create(struct nouveau_drm *drm) if (ret == 0) ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0, TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED, 0, - 0, NULL, NULL, &priv->bo_gart); + 0, NULL, NULL, &priv->bo_gart, true); if (ret == 0) { ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT, false); if (ret == 0) { |