From e75582c40a71a90a2e8ac84178a5c574568120ac Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 10 Jun 2011 21:04:13 +0100 Subject: [PATCH] drm/i915: Disable page-faults around the fast pwrite/pread paths These paths hold onto the struct mutex whilst accessing pages. In order, to prevent a recursive dead-lock should we fault-in a GTT mapped page we need to return -EFAULT and fallback to the slow path. Lockdep has complained before about the potential dead-lock, but rvis is the first application found to sufficiently abuse the API to trigger it. Performance regression testing on a 1GiB PineView system using x11perf, cairo-perf-trace, glxgears and a few game benchmarks suggested no large regressions with just a 2% slowdown for firefox. The caveat is that this was an otherwise idle system. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=38115 Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 32 ++++++++++++++++++++++++-------- 1 files changed, 24 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 890af70..896b26e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -362,9 +362,13 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, page_length = PAGE_SIZE - page_offset; page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, - GFP_HIGHUSER | __GFP_RECLAIMABLE); - if (IS_ERR(page)) - return PTR_ERR(page); + GFP_ATOMIC | GFP_HIGHUSER | __GFP_RECLAIMABLE); + if (IS_ERR(page)) { + /* Discard the original failure code so that we try + * again in the non-atomic slow path. + */ + return -EFAULT; + } vaddr = kmap_atomic(page); ret = __copy_to_user_inatomic(user_data, @@ -556,8 +560,11 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, goto out; ret = -EFAULT; - if (!i915_gem_object_needs_bit17_swizzle(obj)) + if (!i915_gem_object_needs_bit17_swizzle(obj)) { + pagefault_disable(); ret = i915_gem_shmem_pread_fast(dev, obj, args, file); + pagefault_enable(); + } if (ret == -EFAULT) ret = i915_gem_shmem_pread_slow(dev, obj, args, file); @@ -798,9 +805,13 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, page_length = PAGE_SIZE - page_offset; page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, - GFP_HIGHUSER | __GFP_RECLAIMABLE); - if (IS_ERR(page)) - return PTR_ERR(page); + GFP_ATOMIC | GFP_HIGHUSER | __GFP_RECLAIMABLE); + if (IS_ERR(page)) { + /* Discard the original failure code so that we try + * again in the non-atomic slow path. + */ + return -EFAULT; + } vaddr = kmap_atomic(page, KM_USER0); ret = __copy_from_user_inatomic(vaddr + page_offset, @@ -1013,7 +1024,9 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, if (ret) goto out_unpin; + pagefault_disable(); ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); + pagefault_enable(); if (ret == -EFAULT) ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file); @@ -1025,8 +1038,11 @@ out_unpin: goto out; ret = -EFAULT; - if (!i915_gem_object_needs_bit17_swizzle(obj)) + if (!i915_gem_object_needs_bit17_swizzle(obj)) { + pagefault_disable(); ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file); + pagefault_enable(); + } if (ret == -EFAULT) ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file); } -- 1.7.5.4