diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e143004e66d5..5a736d81a5d1 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1193,6 +1193,7 @@ struct i915_psr { struct intel_dp *enabled; bool active; struct delayed_work work; + unsigned long earliest_enable; unsigned busy_frontbuffer_bits; bool psr2_support; bool aux_frame_sync; diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 55ea5eb3b7df..a8c0738fc7b9 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -670,12 +670,17 @@ static void intel_psr_work(struct work_struct *work) struct intel_dp *intel_dp = dev_priv->psr.enabled; struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; enum pipe pipe = to_intel_crtc(crtc)->pipe; + u64 start, end; /* We have to make sure PSR is ready for re-enable * otherwise it keeps disabled until next full enable/disable cycle. * PSR might take some time to get fully disabled * and be ready for re-enable. */ + + DRM_DEBUG_KMS("intel_psr_work: waiting for ready\n"); + start = ktime_get_ns(); + if (HAS_DDI(dev_priv)) { if (dev_priv->psr.psr2_support) { if (intel_wait_for_register(dev_priv, @@ -706,6 +711,7 @@ static void intel_psr_work(struct work_struct *work) return; } } + end = ktime_get_ns(); mutex_lock(&dev_priv->psr.lock); intel_dp = dev_priv->psr.enabled; @@ -717,9 +723,15 @@ static void intel_psr_work(struct work_struct *work) * recheck. Since psr_flush first clears this and then reschedules we * won't ever miss a flush when bailing out here. */ - if (dev_priv->psr.busy_frontbuffer_bits) + if (dev_priv->psr.busy_frontbuffer_bits || + !time_after_eq(jiffies, dev_priv->psr.earliest_enable)) { + DRM_DEBUG_KMS("deferring PSR enable\n"); + schedule_delayed_work(&dev_priv->psr.work, + msecs_to_jiffies(100)); goto unlock; + } + DRM_DEBUG_KMS("intel_psr_work: activating PSR after %lldus wait for ready\n", (end-start)/1000); intel_psr_activate(intel_dp); unlock: mutex_unlock(&dev_priv->psr.lock); @@ -860,8 +872,11 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv, frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits; - if (frontbuffer_bits) + DRM_DEBUG_KMS("intel_psr_invalidate: set frontbuffer bits %x\n", frontbuffer_bits); + if (frontbuffer_bits) { intel_psr_exit(dev_priv); + dev_priv->psr.earliest_enable = jiffies + HZ/20; + } mutex_unlock(&dev_priv->psr.lock); } @@ -900,14 +915,18 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; + DRM_DEBUG_KMS("intel_psr_flush: clear frontbuffer bits %x\n", frontbuffer_bits); + /* By definition flush = invalidate + flush */ - if (frontbuffer_bits) + if (frontbuffer_bits) { intel_psr_exit(dev_priv); + dev_priv->psr.earliest_enable = jiffies + HZ/20; + } if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) - if (!work_busy(&dev_priv->psr.work.work)) - schedule_delayed_work(&dev_priv->psr.work, - msecs_to_jiffies(100)); + schedule_delayed_work(&dev_priv->psr.work, + msecs_to_jiffies(100)); + mutex_unlock(&dev_priv->psr.lock); } @@ -967,4 +986,6 @@ void intel_psr_init(struct drm_i915_private *dev_priv) dev_priv->psr.activate = hsw_psr_activate; dev_priv->psr.setup_vsc = hsw_psr_setup_vsc; } + + dev_priv->psr.earliest_enable = jiffies; }