From 6870b8a0bdb2f7dc2c2687d8cda92612997b1b11 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 26 May 2014 07:06:18 +0100 Subject: [PATCH 2/2] sna: Implicit release of upload buffers considered bad MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently upload buffers are automatically decoupled when the buffer is retired. As retiring can happen during command setup after we have selected which bo to render with, this can free the bo we plan to use. Which is bad. Instead of making the release of upload buffers automatic, we manually check whether the buffer is idle before use as a source to consider scrapping it and replacing it with a real GPU bo. This is likely to keep upload buffers alive for longer (limiting reuse between Pixmaps but making reuse of the buffer within a Pixmap more likely) which is both good and bad. (Good - may improve the content cache, bad - may increase the amount of memory used by upload buffers for arbitrary long periods.) Reported-by: Matti Hämäläinen Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=79238 Signed-off-by: Chris Wilson --- src/sna/kgem.c | 31 ------------------------------- src/sna/kgem.h | 1 - src/sna/sna_accel.c | 14 +++++++++----- src/sna/sna_render.c | 4 ++-- 4 files changed, 11 insertions(+), 39 deletions(-) diff --git a/src/sna/kgem.c b/src/sna/kgem.c index 98aa48c..c3177f0 100644 --- a/src/sna/kgem.c +++ b/src/sna/kgem.c @@ -2228,25 +2228,6 @@ static void kgem_bo_unref(struct kgem *kgem, struct kgem_bo *bo) __kgem_bo_destroy(kgem, bo); } -static void kgem_buffer_release(struct kgem *kgem, struct kgem_buffer *bo) -{ - assert(bo->base.io); - while (!list_is_empty(&bo->base.vma)) { - struct kgem_bo *cached; - - cached = list_first_entry(&bo->base.vma, struct kgem_bo, vma); - assert(cached->proxy == &bo->base); - assert(cached != &bo->base); - list_del(&cached->vma); - - assert(*(struct kgem_bo **)cached->map__gtt == cached); - *(struct kgem_bo **)cached->map__gtt = NULL; - cached->map__gtt = NULL; - - kgem_bo_destroy(kgem, cached); - } -} - static bool kgem_retire__buffers(struct kgem *kgem) { bool retired = false; @@ -2267,7 +2248,6 @@ static bool kgem_retire__buffers(struct kgem *kgem) DBG(("%s: releasing upload cache for handle=%d? %d\n", __FUNCTION__, bo->base.handle, !list_is_empty(&bo->base.vma))); list_del(&bo->base.list); - kgem_buffer_release(kgem, bo); kgem_bo_unref(kgem, &bo->base); retired = true; } @@ -6728,17 +6708,6 @@ struct kgem_bo *kgem_upload_source_image(struct kgem *kgem, return bo; } -void kgem_proxy_bo_attach(struct kgem_bo *bo, - struct kgem_bo **ptr) -{ - DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle)); - assert(bo->map__gtt == NULL); - assert(bo->proxy); - list_add(&bo->vma, &bo->proxy->vma); - bo->map__gtt = ptr; - *ptr = kgem_bo_reference(bo); -} - void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo) { struct kgem_buffer *bo; diff --git a/src/sna/kgem.h b/src/sna/kgem.h index e39ce7e..d94640d 100644 --- a/src/sna/kgem.h +++ b/src/sna/kgem.h @@ -258,7 +258,6 @@ struct kgem_bo *kgem_upload_source_image(struct kgem *kgem, const void *data, const BoxRec *box, int stride, int bpp); -void kgem_proxy_bo_attach(struct kgem_bo *bo, struct kgem_bo **ptr); int kgem_choose_tiling(struct kgem *kgem, int tiling, int width, int height, int bpp); diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c index 1651ff5..9109741 100644 --- a/src/sna/sna_accel.c +++ b/src/sna/sna_accel.c @@ -1990,7 +1990,8 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags) sna_pixmap_discard_shadow_damage(priv, NULL); } - if (flags & MOVE_WRITE && priv->gpu_bo && priv->gpu_bo->proxy) { + if ((priv->gpu_bo && priv->gpu_bo->proxy) && + (flags & MOVE_WRITE || priv->gpu_bo->proxy->rq == NULL)) { DBG(("%s: discarding cached upload buffer\n", __FUNCTION__)); assert(DAMAGE_IS_ALL(priv->cpu_damage)); assert(!priv->pinned); @@ -2424,7 +2425,8 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable, assert(priv->gpu_damage == NULL || priv->gpu_bo); - if (flags & MOVE_WRITE && priv->gpu_bo && priv->gpu_bo->proxy) { + if ((priv->gpu_bo && priv->gpu_bo->proxy) && + (flags & MOVE_WRITE || priv->gpu_bo->proxy->rq == NULL)) { DBG(("%s: discarding cached upload buffer\n", __FUNCTION__)); assert(DAMAGE_IS_ALL(priv->cpu_damage)); assert(priv->gpu_damage == NULL); @@ -3179,7 +3181,8 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl goto done; } - if (flags & MOVE_WRITE && priv->gpu_bo && priv->gpu_bo->proxy) { + if ((priv->gpu_bo && priv->gpu_bo->proxy) || + (flags & MOVE_WRITE || priv->gpu_bo->proxy->rq == NULL)) { DBG(("%s: discarding cached upload buffer\n", __FUNCTION__)); assert(priv->gpu_damage == NULL); assert(!priv->pinned); @@ -3916,7 +3919,8 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags) goto active; } - if (flags & MOVE_WRITE && priv->gpu_bo && priv->gpu_bo->proxy) { + if ((priv->gpu_bo && priv->gpu_bo->proxy) || + (flags & MOVE_WRITE || priv->gpu_bo->proxy->rq == NULL)) { DBG(("%s: discarding cached upload buffer\n", __FUNCTION__)); assert(priv->gpu_damage == NULL); assert(!priv->pinned); @@ -6250,7 +6254,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc, __FUNCTION__)); assert(src_priv->gpu_damage == NULL); assert(src_priv->gpu_bo == NULL); - kgem_proxy_bo_attach(src_bo, &src_priv->gpu_bo); + src_priv->gpu_bo = kgem_bo_reference(src_bo); } if (!sna->render.copy_boxes(sna, alu, diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c index b67bd26..d3cb6a0 100644 --- a/src/sna/sna_render.c +++ b/src/sna/sna_render.c @@ -565,7 +565,7 @@ static struct kgem_bo *upload(struct sna *sna, assert(priv->gpu_damage == NULL); assert(priv->gpu_bo == NULL); assert(bo->proxy != NULL); - kgem_proxy_bo_attach(bo, &priv->gpu_bo); + priv->gpu_bo = kgem_bo_reference(bo); } } @@ -1266,7 +1266,7 @@ sna_render_picture_extract(struct sna *sna, assert(priv->gpu_damage == NULL); assert(priv->gpu_bo == NULL); assert(bo->proxy != NULL); - kgem_proxy_bo_attach(bo, &priv->gpu_bo); + priv->gpu_bo = kgem_bo_reference(bo); } } -- 1.9.1