From c4dfe1eb4b34c3aa4eb69b1cc6950eeca0e26a51 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 10 Jun 2011 21:04:13 +0100 Subject: [PATCH] drm/i915: Disable page-faults around the fast pwrite/pread paths These paths hold onto the struct mutex whilst accessing pages. In order, to prevent a recursive dead-lock should we fault-in a GTT mapped page we need to return -EFAULT and fallback to the slow path. Lockdep has complained before about the potential dead-lock, but rvis is the first application found to sufficiently abuse the API to trigger it. Cursory performance regression testing on a 1GiB PineView system using x11perf, cairo-perf-trace, glxgears and a few game benchmarks suggested no large regressions with just a 2% slowdown for firefox. The caveat is that this was an otherwise idle system. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=38115 Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 18 ++++++++++++++++++ 1 files changed, 18 insertions(+), 0 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a49db69..e68e030 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -433,9 +433,15 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, return PTR_ERR(page); vaddr = kmap_atomic(page); + /* We have to disable faulting here in case the user address + * is really a GTT mapping and so we can not enter + * i915_gem_fault() whilst already holding struct_mutex. + */ + pagefault_disable(); ret = __copy_to_user_inatomic(user_data, vaddr + page_offset, page_length); + pagefault_enable(); kunmap_atomic(vaddr); mark_page_accessed(page); @@ -657,8 +663,14 @@ fast_user_write(struct io_mapping *mapping, unsigned long unwritten; vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); + /* We have to disable faulting here in case the user address + * is really a GTT mapping and so we can not enter + * i915_gem_fault() whilst already holding struct_mutex. + */ + pagefault_disable(); unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, user_data, length); + pagefault_enable(); io_mapping_unmap_atomic(vaddr_atomic); return unwritten; } @@ -867,9 +879,15 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, return PTR_ERR(page); vaddr = kmap_atomic(page); + /* We have to disable faulting here in case the user address + * is really a GTT mapping and so we can not enter + * i915_gem_fault() whilst already holding struct_mutex. + */ + pagefault_disable(); ret = __copy_from_user_inatomic(vaddr + page_offset, user_data, page_length); + pagefault_enable(); kunmap_atomic(vaddr); set_page_dirty(page); -- 1.7.5.4