From a5ffdfbd1d92ad905f38d03fe45324a9a36c42b8 Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Thu, 20 Sep 2018 15:12:23 -0400 Subject: drm/amdgpu: Allocate UVD FW BO backup RAM space on init. Avoid big RAM allocations during S3 since kswapd and OOM killer are disabled durting that time. Signed-off-by: Andrey Grodzovsky --- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 22 ++++++++++++++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | 1 + 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index e5a6db6..c1bead7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -253,6 +253,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); for (j = 0; j < adev->uvd.num_uvd_inst; j++) { + unsigned size; if (adev->uvd.harvest_config & (1 << j)) continue; r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, @@ -262,6 +263,13 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); return r; } + + size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo); + adev->uvd.inst[j].saved_bo = kvmalloc(size, GFP_KERNEL); + if (!adev->uvd.inst[j].saved_bo) { + dev_err(adev->dev, "(%d) failed to allocate RAM for BO backup\n", r); + return -ENOMEM; + } } for (i = 0; i < adev->uvd.max_handles; ++i) { @@ -308,6 +316,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) &adev->uvd.inst[j].gpu_addr, (void **)&adev->uvd.inst[j].cpu_addr); + amdgpu_ring_fini(&adev->uvd.inst[j].ring); for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) @@ -368,11 +377,8 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo); ptr = adev->uvd.inst[j].cpu_addr; - adev->uvd.inst[j].saved_bo = kvmalloc(size, GFP_KERNEL); - if (!adev->uvd.inst[j].saved_bo) - return -ENOMEM; - memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size); + adev->uvd.inst[j].suspended = true; } return 0; } @@ -392,11 +398,9 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo); ptr = adev->uvd.inst[i].cpu_addr; - if (adev->uvd.inst[i].saved_bo != NULL) { + if (adev->uvd.inst[i].suspended) memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size); - kvfree(adev->uvd.inst[i].saved_bo); - adev->uvd.inst[i].saved_bo = NULL; - } else { + else { const struct common_firmware_header *hdr; unsigned offset; @@ -411,6 +415,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) memset_io(ptr, 0, size); /* to restore uvd fence seq */ amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring); + + adev->uvd.inst[i].suspended = false; } } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index a3ab1a4..7fe5ce8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h @@ -46,6 +46,7 @@ struct amdgpu_uvd_inst { struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS]; struct amdgpu_irq_src irq; uint32_t srbm_soft_reset; + bool suspended; }; #define AMDGPU_UVD_HARVEST_UVD0 (1 << 0) -- 2.7.4