aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.14.71/0250-drm-amd-display-Refactor-atomic-commit-implementatio.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.14.71/0250-drm-amd-display-Refactor-atomic-commit-implementatio.patch')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/0250-drm-amd-display-Refactor-atomic-commit-implementatio.patch678
1 files changed, 678 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/0250-drm-amd-display-Refactor-atomic-commit-implementatio.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/0250-drm-amd-display-Refactor-atomic-commit-implementatio.patch
new file mode 100644
index 00000000..33b4d9c9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/0250-drm-amd-display-Refactor-atomic-commit-implementatio.patch
@@ -0,0 +1,678 @@
+From d6fd92f1c7faeac7154d97db5b9901ab22be755b Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <Andrey.Grodzovsky@amd.com>
+Date: Thu, 20 Apr 2017 15:57:05 -0400
+Subject: [PATCH 0250/4131] drm/amd/display: Refactor atomic commit
+ implementation. (v2)
+
+Modify amdgpu_dm_atomic_comit to implement
+atomic_comit_tail hook.
+Unify Buffer objects allocation and dealocation
+for surface updates and page flips.
+Simplify wait for fences and target_vbank logic
+for non blockiing commit.
+Remove hacky update surface to page flip synchronization
+we had and rely on atomic framework synchronization logic.
+
+v2:
+Add state->allow_modeset as indicator of page flip call.
+
+Signed-off-by: Andrey Grodzovsky <Andrey.Grodzovsky@amd.com>
+Acked-by: Harry Wentland <Harry.Wentland@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 58 +++-
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_types.c | 343 ++++++++++-----------
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_types.h | 7 +-
+ 3 files changed, 203 insertions(+), 205 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index ab9642e..4cae515 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -149,7 +149,6 @@ static struct amdgpu_crtc *get_crtc_by_otg_inst(
+
+ static void dm_pflip_high_irq(void *interrupt_params)
+ {
+- struct amdgpu_flip_work *works;
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+@@ -165,7 +164,6 @@ static void dm_pflip_high_irq(void *interrupt_params)
+ }
+
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
+- works = amdgpu_crtc->pflip_works;
+
+ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
+ DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
+@@ -177,22 +175,24 @@ static void dm_pflip_high_irq(void *interrupt_params)
+ return;
+ }
+
+- /* page flip completed. clean up */
+- amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+- amdgpu_crtc->pflip_works = NULL;
+
+ /* wakeup usersapce */
+- if (works->event)
+- drm_crtc_send_vblank_event(&amdgpu_crtc->base,
+- works->event);
++ if (amdgpu_crtc->event
++ && amdgpu_crtc->event->event.base.type
++ == DRM_EVENT_FLIP_COMPLETE) {
++ drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
++ /* page flip completed. clean up */
++ amdgpu_crtc->event = NULL;
++ } else
++ WARN_ON(1);
+
++ amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+- DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE, work: %p,\n",
+- __func__, amdgpu_crtc->crtc_id, amdgpu_crtc, works);
++ DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
++ __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
+
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
+- schedule_work(&works->unpin_work);
+ }
+
+ static void dm_crtc_high_irq(void *interrupt_params)
+@@ -719,7 +719,11 @@ static struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
+ .fb_create = amdgpu_user_framebuffer_create,
+ .output_poll_changed = amdgpu_output_poll_changed,
+ .atomic_check = amdgpu_dm_atomic_check,
+- .atomic_commit = amdgpu_dm_atomic_commit
++ .atomic_commit = drm_atomic_helper_commit
++};
++
++static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
++ .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
+ };
+
+ void amdgpu_dm_update_connector_after_detect(
+@@ -1092,6 +1096,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
+ adev->mode_info.mode_config_initialized = true;
+
+ adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
++ adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
+
+ adev->ddev->mode_config.max_width = 16384;
+ adev->ddev->mode_config.max_height = 16384;
+@@ -1345,6 +1350,14 @@ static void dm_page_flip(struct amdgpu_device *adev,
+ acrtc = adev->mode_info.crtcs[crtc_id];
+ stream = acrtc->stream;
+
++
++ if (acrtc->pflip_status != AMDGPU_FLIP_NONE) {
++ DRM_ERROR("flip queue: acrtc %d, already busy\n", acrtc->crtc_id);
++ /* In commit tail framework this cannot happen */
++ BUG_ON(0);
++ }
++
++
+ /*
+ * Received a page flip call after the display has been reset.
+ * Just return in this case. Everything should be clean-up on reset.
+@@ -1359,15 +1372,28 @@ static void dm_page_flip(struct amdgpu_device *adev,
+ addr.address.grph.addr.high_part = upper_32_bits(crtc_base);
+ addr.flip_immediate = async;
+
++
++ if (acrtc->base.state->event &&
++ acrtc->base.state->event->event.base.type ==
++ DRM_EVENT_FLIP_COMPLETE) {
++ acrtc->event = acrtc->base.state->event;
++
++ /* Set the flip status */
++ acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
++
++ /* Mark this event as consumed */
++ acrtc->base.state->event = NULL;
++ }
++
++ dc_flip_surface_addrs(adev->dm.dc,
++ dc_stream_get_status(stream)->surfaces,
++ &addr, 1);
++
+ DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
+ __func__,
+ addr.address.grph.addr.high_part,
+ addr.address.grph.addr.low_part);
+
+- dc_flip_surface_addrs(
+- adev->dm.dc,
+- dc_stream_get_status(stream)->surfaces,
+- &addr, 1);
+ }
+
+ static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c
+index 81887bb..b6dc0ca 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c
+@@ -687,8 +687,18 @@ static void dm_dc_surface_commit(
+ {
+ struct dc_surface *dc_surface;
+ const struct dc_surface *dc_surfaces[1];
+- const struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
++ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ const struct dc_stream *dc_stream = acrtc->stream;
++ unsigned long flags;
++
++ spin_lock_irqsave(&crtc->dev->event_lock, flags);
++ if (acrtc->pflip_status != AMDGPU_FLIP_NONE) {
++ DRM_ERROR("dm_dc_surface_commit: acrtc %d, already busy\n", acrtc->crtc_id);
++ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
++ /* In comit tail framework this cannot happen */
++ BUG_ON(0);
++ }
++ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+ if (!dc_stream) {
+ dm_error(
+@@ -1590,6 +1600,7 @@ static void clear_unrelated_fields(struct drm_plane_state *state)
+ state->fence = NULL;
+ }
+
++/*TODO update because event is always present now */
+ static bool page_flip_needed(
+ const struct drm_plane_state *new_state,
+ const struct drm_plane_state *old_state,
+@@ -1645,11 +1656,13 @@ static bool page_flip_needed(
+ page_flip_required = memcmp(&old_state_tmp,
+ &new_state_tmp,
+ sizeof(old_state_tmp)) == 0 ? true:false;
++
+ if (new_state->crtc && page_flip_required == false) {
+ acrtc_new = to_amdgpu_crtc(new_state->crtc);
+ if (acrtc_new->flip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
+ page_flip_required = true;
+ }
++
+ return page_flip_required;
+ }
+
+@@ -1675,7 +1688,7 @@ static int dm_plane_helper_prepare_fb(
+ if (unlikely(r != 0))
+ return r;
+
+- r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, NULL);
++ r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &afb->address);
+
+ amdgpu_bo_unreserve(rbo);
+
+@@ -1684,6 +1697,7 @@ static int dm_plane_helper_prepare_fb(
+ return r;
+ }
+
++ amdgpu_bo_ref(rbo);
+ return 0;
+ }
+
+@@ -1707,7 +1721,10 @@ static void dm_plane_helper_cleanup_fb(
+ } else {
+ amdgpu_bo_unpin(rbo);
+ amdgpu_bo_unreserve(rbo);
++ amdgpu_bo_unref(&rbo);
+ }
++
++ afb->address = 0;
+ }
+
+ int dm_create_validation_set_for_connector(struct drm_connector *connector,
+@@ -2305,51 +2322,6 @@ static enum dm_commit_action get_dm_commit_action(struct drm_crtc_state *state)
+ }
+ }
+
+-
+-typedef bool (*predicate)(struct amdgpu_crtc *acrtc);
+-
+-static void wait_while_pflip_status(struct amdgpu_device *adev,
+- struct amdgpu_crtc *acrtc, predicate f) {
+- int count = 0;
+- while (f(acrtc)) {
+- /* Spin Wait*/
+- msleep(1);
+- count++;
+- if (count == 1000) {
+- DRM_ERROR("%s - crtc:%d[%p], pflip_stat:%d, probable hang!\n",
+- __func__, acrtc->crtc_id,
+- acrtc,
+- acrtc->pflip_status);
+-
+- /* we do not expect to hit this case except on Polaris with PHY PLL
+- * 1. DP to HDMI passive dongle connected
+- * 2. unplug (headless)
+- * 3. plug in DP
+- * 3a. on plug in, DP will try verify link by training, and training
+- * would disable PHY PLL which HDMI rely on to drive TG
+- * 3b. this will cause flip interrupt cannot be generated, and we
+- * exit when timeout expired. however we do not have code to clean
+- * up flip, flip clean up will happen when the address is written
+- * with the restore mode change
+- */
+- WARN_ON(1);
+- break;
+- }
+- }
+-
+- DRM_DEBUG_DRIVER("%s - Finished waiting for:%d msec, crtc:%d[%p], pflip_stat:%d \n",
+- __func__,
+- count,
+- acrtc->crtc_id,
+- acrtc,
+- acrtc->pflip_status);
+-}
+-
+-static bool pflip_in_progress_predicate(struct amdgpu_crtc *acrtc)
+-{
+- return acrtc->pflip_status != AMDGPU_FLIP_NONE;
+-}
+-
+ static void manage_dm_interrupts(
+ struct amdgpu_device *adev,
+ struct amdgpu_crtc *acrtc,
+@@ -2371,8 +2343,6 @@ static void manage_dm_interrupts(
+ &adev->pageflip_irq,
+ irq_type);
+ } else {
+- wait_while_pflip_status(adev, acrtc,
+- pflip_in_progress_predicate);
+
+ amdgpu_irq_put(
+ adev,
+@@ -2382,12 +2352,6 @@ static void manage_dm_interrupts(
+ }
+ }
+
+-
+-static bool pflip_pending_predicate(struct amdgpu_crtc *acrtc)
+-{
+- return acrtc->pflip_status == AMDGPU_FLIP_PENDING;
+-}
+-
+ static bool is_scaling_state_different(
+ const struct dm_connector_state *dm_state,
+ const struct dm_connector_state *old_dm_state)
+@@ -2425,116 +2389,94 @@ static void remove_stream(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc)
+ acrtc->enabled = false;
+ }
+
+-int amdgpu_dm_atomic_commit(
+- struct drm_device *dev,
+- struct drm_atomic_state *state,
+- bool nonblock)
++
++/*
++ * Executes flip
++ *
++ * Waits on all BO's fences and for proper vblank count
++ */
++static void amdgpu_dm_do_flip(
++ struct drm_crtc *crtc,
++ struct drm_framebuffer *fb,
++ uint32_t target)
+ {
++ unsigned long flags;
++ uint32_t target_vblank;
++ int r, vpos, hpos;
++ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
++ struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
++ struct amdgpu_bo *abo = gem_to_amdgpu_bo(afb->obj);
++ struct amdgpu_device *adev = crtc->dev->dev_private;
++ bool async_flip = (acrtc->flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
++
++
++ /*TODO This might fail and hence better not used, wait
++ * explicitly on fences instead
++ * and in general should be called for
++ * blocking commit to as per framework helpers
++ * */
++ r = amdgpu_bo_reserve(abo, true);
++ if (unlikely(r != 0)) {
++ DRM_ERROR("failed to reserve buffer before flip\n");
++ BUG_ON(0);
++ }
++
++ /* Wait for all fences on this FB */
++ WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
++ MAX_SCHEDULE_TIMEOUT) < 0);
++
++ amdgpu_bo_unreserve(abo);
++
++ /* Wait for target vblank */
++ /* Wait until we're out of the vertical blank period before the one
++ * targeted by the flip
++ */
++ target_vblank = target - drm_crtc_vblank_count(crtc) +
++ amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
++
++ while ((acrtc->enabled &&
++ (amdgpu_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id, 0,
++ &vpos, &hpos, NULL, NULL,
++ &crtc->hwmode)
++ & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
++ (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
++ (int)(target_vblank -
++ amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) {
++ usleep_range(1000, 1100);
++ }
++
++ /* Flip */
++ spin_lock_irqsave(&crtc->dev->event_lock, flags);
++ /* update crtc fb */
++ crtc->primary->fb = fb;
++
++ /* Do the flip (mmio) */
++ adev->mode_info.funcs->page_flip(adev, acrtc->crtc_id, afb->address, async_flip);
++
++ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
++ DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
++ acrtc->crtc_id);
++}
++
++void amdgpu_dm_atomic_commit_tail(
++ struct drm_atomic_state *state)
++{
++ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_plane *plane;
+- struct drm_plane_state *new_plane_state;
+ struct drm_plane_state *old_plane_state;
+ uint32_t i;
+- int32_t ret = 0;
+ uint32_t commit_streams_count = 0;
+ uint32_t new_crtcs_count = 0;
+- uint32_t flip_crtcs_count = 0;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ const struct dc_stream *commit_streams[MAX_STREAMS];
+ struct amdgpu_crtc *new_crtcs[MAX_STREAMS];
+ const struct dc_stream *new_stream;
+- struct drm_crtc *flip_crtcs[MAX_STREAMS];
+- struct amdgpu_flip_work *work[MAX_STREAMS] = {0};
+- struct amdgpu_bo *new_abo[MAX_STREAMS] = {0};
+-
+- /* In this step all new fb would be pinned */
+-
+- /*
+- * TODO: Revisit when we support true asynchronous commit.
+- * Right now we receive async commit only from pageflip, in which case
+- * we should not pin/unpin the fb here, it should be done in
+- * amdgpu_crtc_flip and from the vblank irq handler.
+- */
+- if (!nonblock) {
+- ret = drm_atomic_helper_prepare_planes(dev, state);
+- if (ret)
+- return ret;
+- }
+-
+- /* Page flip if needed */
+- for_each_plane_in_state(state, plane, new_plane_state, i) {
+- struct drm_plane_state *old_plane_state = plane->state;
+- struct drm_crtc *crtc = new_plane_state->crtc;
+- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+- struct drm_framebuffer *fb = new_plane_state->fb;
+- struct drm_crtc_state *crtc_state;
+-
+- if (!fb || !crtc)
+- continue;
+-
+- crtc_state = drm_atomic_get_crtc_state(state, crtc);
++ unsigned long flags;
++ bool wait_for_vblank = true;
+
+- if (!crtc_state->planes_changed || !crtc_state->active)
+- continue;
+-
+- if (page_flip_needed(
+- new_plane_state,
+- old_plane_state,
+- crtc_state->event,
+- false)) {
+- ret = amdgpu_crtc_prepare_flip(crtc,
+- fb,
+- crtc_state->event,
+- acrtc->flip_flags,
+- drm_crtc_vblank_count(crtc),
+- &work[flip_crtcs_count],
+- &new_abo[flip_crtcs_count]);
+-
+- if (ret) {
+- /* According to atomic_commit hook API, EINVAL is not allowed */
+- if (unlikely(ret == -EINVAL))
+- ret = -ENOMEM;
+-
+- DRM_ERROR("Atomic commit: Flip for crtc id %d: [%p], "
+- "failed, errno = %d\n",
+- acrtc->crtc_id,
+- acrtc,
+- ret);
+- /* cleanup all flip configurations which
+- * succeeded in this commit
+- */
+- for (i = 0; i < flip_crtcs_count; i++)
+- amdgpu_crtc_cleanup_flip_ctx(
+- work[i],
+- new_abo[i]);
+-
+- return ret;
+- }
+-
+- flip_crtcs[flip_crtcs_count] = crtc;
+- flip_crtcs_count++;
+- }
+- }
+-
+- /*
+- * This is the point of no return - everything below never fails except
+- * when the hw goes bonghits. Which means we can commit the new state on
+- * the software side now.
+- */
+-
+- drm_atomic_helper_swap_state(state, true);
+-
+- /*
+- * From this point state become old state really. New state is
+- * initialized to appropriate objects and could be accessed from there
+- */
+-
+- /*
+- * there is no fences usage yet in state. We can skip the following line
+- * wait_for_fences(dev, state);
+- */
+
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
+
+@@ -2690,11 +2632,11 @@ int amdgpu_dm_atomic_commit(
+ for_each_plane_in_state(state, plane, old_plane_state, i) {
+ struct drm_plane_state *plane_state = plane->state;
+ struct drm_crtc *crtc = plane_state->crtc;
+- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_connector *connector;
+ struct dm_connector_state *dm_state = NULL;
+ enum dm_commit_action action;
++ bool pflip_needed;
+
+ if (!fb || !crtc || !crtc->state->active)
+ continue;
+@@ -2705,12 +2647,14 @@ int amdgpu_dm_atomic_commit(
+ * 1. This commit is not a page flip.
+ * 2. This commit is a page flip, and streams are created.
+ */
+- if (!page_flip_needed(
++ pflip_needed = !state->allow_modeset &&
++ page_flip_needed(
+ plane_state,
+ old_plane_state,
+- crtc->state->event, true) ||
+- action == DM_COMMIT_ACTION_DPMS_ON ||
+- action == DM_COMMIT_ACTION_SET) {
++ crtc->state->event, true);
++ if (!pflip_needed ||
++ action == DM_COMMIT_ACTION_DPMS_ON ||
++ action == DM_COMMIT_ACTION_SET) {
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list, head) {
+ if (connector->state->crtc == crtc) {
+@@ -2737,14 +2681,6 @@ int amdgpu_dm_atomic_commit(
+ if (!dm_state)
+ continue;
+
+- /*
+- * if flip is pending (ie, still waiting for fence to return
+- * before address is submitted) here, we cannot commit_surface
+- * as commit_surface will pre-maturely write out the future
+- * address. wait until flip is submitted before proceeding.
+- */
+- wait_while_pflip_status(adev, acrtc, pflip_pending_predicate);
+-
+ dm_dc_surface_commit(dm->dc, crtc);
+ }
+ }
+@@ -2764,43 +2700,77 @@ int amdgpu_dm_atomic_commit(
+
+ }
+
+- /* Do actual flip */
+- flip_crtcs_count = 0;
+ for_each_plane_in_state(state, plane, old_plane_state, i) {
+ struct drm_plane_state *plane_state = plane->state;
+ struct drm_crtc *crtc = plane_state->crtc;
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ struct drm_framebuffer *fb = plane_state->fb;
++ bool pflip_needed;
+
+ if (!fb || !crtc || !crtc->state->planes_changed ||
+ !crtc->state->active)
+ continue;
+-
+- if (page_flip_needed(
++ pflip_needed = !state->allow_modeset &&
++ page_flip_needed(
+ plane_state,
+ old_plane_state,
+ crtc->state->event,
+- false)) {
+- amdgpu_crtc_submit_flip(
+- crtc,
+- fb,
+- work[flip_crtcs_count],
+- new_abo[i]);
+- flip_crtcs_count++;
++ false);
++
++ if (pflip_needed) {
++ amdgpu_dm_do_flip(
++ crtc,
++ fb,
++ drm_crtc_vblank_count(crtc));
++
++ wait_for_vblank =
++ acrtc->flip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
++ false : true;
+ /*clean up the flags for next usage*/
+ acrtc->flip_flags = 0;
+ }
+ }
+
+- /* In this state all old framebuffers would be unpinned */
+
+- /* TODO: Revisit when we support true asynchronous commit.*/
+- if (!nonblock)
+- drm_atomic_helper_cleanup_planes(dev, state);
++ /*TODO mark consumed event on all crtc assigned event
++ * in drm_atomic_helper_setup_commit just to signal completion
++ */
++ spin_lock_irqsave(&adev->ddev->event_lock, flags);
++ for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
++ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+- drm_atomic_state_put(state);
++ if (acrtc->base.state->event &&
++ acrtc->base.state->event->event.base.type != DRM_EVENT_FLIP_COMPLETE) {
++ acrtc->event = acrtc->base.state->event;
++ acrtc->base.state->event = NULL;
++ }
++ }
++ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+- return ret;
++ /* Signal HW programming completion */
++ drm_atomic_helper_commit_hw_done(state);
++
++ if (wait_for_vblank)
++ drm_atomic_helper_wait_for_vblanks(dev, state);
++
++ /*TODO send vblank event on all crtc assigned event
++ * in drm_atomic_helper_setup_commit just to signal completion
++ */
++ spin_lock_irqsave(&adev->ddev->event_lock, flags);
++ for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
++ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
++
++ if (acrtc->event &&
++ acrtc->event->event.base.type != DRM_EVENT_FLIP_COMPLETE) {
++ drm_send_event_locked(dev, &acrtc->event->base);
++ acrtc->event = NULL;
++ }
++ }
++ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
++
++ /*TODO Is it to early if actual flip haven't happened yet ?*/
++ /* Release old FB */
++ drm_atomic_helper_cleanup_planes(dev, state);
+ }
+ /*
+ * This functions handle all cases when set mode does not come upon hotplug.
+@@ -3152,6 +3122,7 @@ int amdgpu_dm_atomic_check(struct drm_device *dev,
+ struct dm_connector_state *dm_state = NULL;
+ enum dm_commit_action action;
+ struct drm_crtc_state *crtc_state;
++ bool pflip_needed;
+
+
+ if (!fb || !crtc || crtc_set[i] != crtc ||
+@@ -3165,10 +3136,12 @@ int amdgpu_dm_atomic_check(struct drm_device *dev,
+ * 2. This commit is a page flip, and streams are created.
+ */
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+- if (!page_flip_needed(plane_state, old_plane_state,
+- crtc_state->event, true) ||
+- action == DM_COMMIT_ACTION_DPMS_ON ||
+- action == DM_COMMIT_ACTION_SET) {
++ pflip_needed = !state->allow_modeset &&
++ page_flip_needed(plane_state, old_plane_state,
++ crtc_state->event, true);
++ if (!pflip_needed ||
++ action == DM_COMMIT_ACTION_DPMS_ON ||
++ action == DM_COMMIT_ACTION_SET) {
+ struct dc_surface *surface;
+
+ list_for_each_entry(connector,
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.h
+index 4faa165..1bbeb87 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.h
+@@ -52,10 +52,9 @@ void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder);
+
+ int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
+
+-int amdgpu_dm_atomic_commit(
+- struct drm_device *dev,
+- struct drm_atomic_state *state,
+- bool async);
++void amdgpu_dm_atomic_commit_tail(
++ struct drm_atomic_state *state);
++
+ int amdgpu_dm_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state);
+
+--
+2.7.4
+