diff options
author | Sean Paul <seanpaul@chromium.org> | 2020-02-13 16:15:20 -0500 |
---|---|---|
committer | Sean Paul <seanpaul@chromium.org> | 2020-03-27 13:36:01 -0400 |
commit | 6bb0942e8f46863a745489cce27efe5be2a3885e (patch) | |
tree | bb009f223a4491b5bb81efada39d05992a0f3cce | |
parent | fbc821c4a506a960e85f3e97e32cfab63d43f7d0 (diff) |
drm/dp_mst: Remove single tx msg restriction.
Now that we can support multiple simultaneous replies, remove the
restrictions placed on sending new tx msgs.
This patch essentially just reverts commit
5a64967a2f3b ("drm/dp_mst: Have DP_Tx send one msg at a time")
now that the problem is solved in a different way.
Cc: Wayne Lin <Wayne.Lin@amd.com>
Reviewed-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Wayne Lin <waynelin@amd.com>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20200213211523.156998-4-sean@poorly.run
-rw-r--r-- | drivers/gpu/drm/drm_dp_mst_topology.c | 14 | ||||
-rw-r--r-- | include/drm/drm_dp_mst_helper.h | 5 |
2 files changed, 2 insertions, 17 deletions
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 236a4beb7bd6..840c6370fb39 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1205,8 +1205,6 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb, txmsg->state == DRM_DP_SIDEBAND_TX_SENT) { mstb->tx_slots[txmsg->seqno] = NULL; } - mgr->is_waiting_for_dwn_reply = false; - } out: if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) { @@ -1216,7 +1214,6 @@ out: } mutex_unlock(&mgr->qlock); - drm_dp_mst_kick_tx(mgr); return ret; } @@ -2796,11 +2793,9 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) ret = process_single_tx_qlock(mgr, txmsg, false); if (ret == 1) { /* txmsg is sent it should be in the slots now */ - mgr->is_waiting_for_dwn_reply = true; list_del(&txmsg->next); } else if (ret) { DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); - mgr->is_waiting_for_dwn_reply = false; list_del(&txmsg->next); if (txmsg->seqno != -1) txmsg->dst->tx_slots[txmsg->seqno] = NULL; @@ -2840,8 +2835,7 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, drm_dp_mst_dump_sideband_msg_tx(&p, txmsg); } - if (list_is_singular(&mgr->tx_msg_downq) && - !mgr->is_waiting_for_dwn_reply) + if (list_is_singular(&mgr->tx_msg_downq)) process_single_down_tx_qlock(mgr); mutex_unlock(&mgr->qlock); } @@ -3828,7 +3822,6 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) mutex_lock(&mgr->qlock); txmsg->state = DRM_DP_SIDEBAND_TX_RX; mstb->tx_slots[seqno] = NULL; - mgr->is_waiting_for_dwn_reply = false; mutex_unlock(&mgr->qlock); wake_up_all(&mgr->tx_waitq); @@ -3836,9 +3829,6 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) return 0; out_clear_reply: - mutex_lock(&mgr->qlock); - mgr->is_waiting_for_dwn_reply = false; - mutex_unlock(&mgr->qlock); if (msg) memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx)); out: @@ -4696,7 +4686,7 @@ static void drm_dp_tx_work(struct work_struct *work) struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work); mutex_lock(&mgr->qlock); - if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply) + if (!list_empty(&mgr->tx_msg_downq)) process_single_down_tx_qlock(mgr); mutex_unlock(&mgr->qlock); } diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index cff135070535..bf5e65d2303e 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -591,11 +591,6 @@ struct drm_dp_mst_topology_mgr { bool payload_id_table_cleared : 1; /** - * @is_waiting_for_dwn_reply: whether we're waiting for a down reply. - */ - bool is_waiting_for_dwn_reply : 1; - - /** * @mst_primary: Pointer to the primary/first branch device. */ struct drm_dp_mst_branch *mst_primary; |