From 02699d753b9da9cf2290e5664513d276c2f4946b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 14 May 2014 19:35:17 +0100 Subject: [PATCH] drm/i915: Prevent negative relocation deltas from wrapping This is pure evil. Userspace, I'm looking at you SNA, repacks batch buffers on the fly after generation as they are being passed to the kernel for execution. These batches also contain self-referenced relocations as a single buffer encompasses the state commands, kernels, vertices and sampler. During generation the buffers are placed at known offsets within the full batch, and then the relocation deltas (as passed to the kernel) are tweaked as the batch is repacked into a smaller buffer. This means that userspace is passing negative relocations deltas, which subsequently wrap to large values if the batch is at a low address. The GPU hangs when it then tries to use the large value as a base for its address offsets, rather than wrapping back to the real value (as one would hope). As the GPU uses positive offsets from the base, we can treat the relocation address as the minimum address read by the GPU. For the upper bound, we trust that userspace will not read beyond the end of the buffer. This fixes a GPU hang when it tries to use an address (relocation + offset) greater than the GTT size. The issue would occur quite easily with full-ppgtt as each fd gets its own VM space, so low offsets would often be handed out. However, with the rearrangement of the low GTT due to capturing the BIOS framebuffer, it is already affecting kernels 3.14 onwards. I think only IVB+ is susceptible to this bug, but the workaround should only kick in rarely, so it seems sensible to always apply it. Testcase: igt/gem_bad_reloc Signed-off-by: Chris Wilson Cc: stable@vger.kernel.org --- drivers/gpu/drm/i915/i915_gem.c | 13 ++++- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 76 ++++++++++++++++++++++++++++ 2 files changed, 87 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 65de866..cbe4a23 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3621,8 +3621,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 size, fence_size, fence_alignment, unfenced_alignment; - size_t gtt_max = + unsigned long gtt_max = flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; + unsigned long start = alignment; struct i915_vma *vma; int ret; @@ -3644,6 +3645,13 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, DRM_DEBUG("Invalid object alignment requested %u\n", alignment); return ERR_PTR(-EINVAL); } + if (alignment >= gtt_max) { + DRM_DEBUG("Attempting to alignment larger than the aperture: alignment=%d >= %s aperture=%zu\n", + alignment, + flags & PIN_MAPPABLE ? "mappable" : "total", + gtt_max); + return ERR_PTR(-EINVAL); + } size = flags & PIN_MAPPABLE ? fence_size : obj->base.size; @@ -3695,7 +3703,8 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, search_free: ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, size, alignment, - obj->cache_level, 0, gtt_max, + obj->cache_level, + start, gtt_max, search, create); if (ret) { ret = i915_gem_evict_something(dev, vm, size, alignment, diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index e94aa36..26c9d66 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -261,6 +261,14 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) obj->cache_level != I915_CACHE_NONE); } +static bool invalid_offset(struct drm_device *dev, uint64_t offset) +{ + offset &= ~4095; + if (INTEL_INFO(dev)->gen < 8) + offset = lower_32_bits(offset); + return offset >= to_i915(dev)->gtt.base.total; +} + static int relocate_entry_cpu(struct drm_i915_gem_object *obj, struct drm_i915_gem_relocation_entry *reloc, @@ -272,6 +280,9 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj, char *vaddr; int ret; + if (invalid_offset(dev, delta)) + return -EFAULT; + ret = i915_gem_object_set_to_cpu_domain(obj, true); if (ret) return ret; @@ -309,6 +320,9 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj, void __iomem *reloc_page; int ret; + if (invalid_offset(dev, delta)) + return -EFAULT; + ret = i915_gem_object_set_to_gtt_domain(obj, true); if (ret) return ret; @@ -702,6 +716,62 @@ err: } static int +i915_gem_execbuffer_relocate_check_slow(struct i915_vma *vma, + struct drm_i915_gem_relocation_entry *relocs, + int total) +{ + struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; + struct drm_i915_gem_object *obj = vma->obj; + int32_t min = 0; + int i, ret; + + /* This is pure evil. Userspace, I'm looking at you SNA, repacks + * batch buffers on the fly after generation and before + * being passed to the kernel for execution. These batches also + * contain self-referenced relocations as a single buffer encompasses + * the state commands, kernels, vertices and sampler. During + * generation the buffers are placed at known offsets within the full + * batch, and then the relocation deltas (as passed to the kernel) + * are tweaked as the batch is repacked into a smaller buffer. + * This means that userspace is passing negative relocations deltas, + * which subsequently wrap to large values. The GPU hangs when it + * then tries to use the large value as a base for the address offset, + * rather than wrapping back to the real value (as one would hope). + */ + for (i = 0; i < total; i++) { + struct drm_i915_gem_relocation_entry *reloc = &relocs[i]; + int32_t sdelta; + + if (reloc->target_handle != entry->handle) + continue; + + sdelta = reloc->delta; + if (sdelta < min) + min = sdelta; + } + if (min < 0) { + if (drm_mm_node_allocated(&vma->node) && + invalid_offset(obj->base.dev, vma->node.start + (uint32_t)min)) { + ret = i915_vma_unbind(vma); + if (ret) + return ret; + } + if (!drm_mm_node_allocated(&vma->node)) { + if (entry->alignment == 0) + entry->alignment = + i915_gem_get_gtt_alignment(obj->base.dev, + obj->base.size, + obj->tiling_mode, + entry->flags & __EXEC_OBJECT_NEEDS_MAP); + while (entry->alignment < -min) + entry->alignment <<= 1; + } + } + + return 0; +} + +static int i915_gem_execbuffer_relocate_slow(struct drm_device *dev, struct drm_i915_gem_execbuffer2 *args, struct drm_file *file, @@ -792,6 +862,12 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, if (ret) goto err; + list_for_each_entry(vma, &eb->vmas, exec_list) { + ret = i915_gem_execbuffer_relocate_check_slow(vma, reloc, total); + if (ret) + goto err; + } + need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs); if (ret) -- 1.7.9.5