diff options
Diffstat (limited to 'drivers/net/wireless/ath/ath10k/htt_tx.c')
-rw-r--r-- | drivers/net/wireless/ath/ath10k/htt_tx.c | 287 |
1 files changed, 152 insertions, 135 deletions
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 656c2546b294..d9335e9d0d04 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -96,7 +96,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt) htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar, pipe); - ath10k_dbg(ATH10K_DBG_HTT, "htt tx max num pending tx %d\n", + ath10k_dbg(ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", htt->max_num_pending_tx); htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) * @@ -117,7 +117,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt) static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt) { - struct sk_buff *txdesc; + struct htt_tx_done tx_done = {0}; int msdu_id; /* No locks needed. Called after communication with the device has @@ -127,18 +127,13 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt) if (!test_bit(msdu_id, htt->used_msdu_ids)) continue; - txdesc = htt->pending_tx[msdu_id]; - if (!txdesc) - continue; - ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id); - if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0) - ATH10K_SKB_CB(txdesc)->htt.refcount = 1; + tx_done.discard = 1; + tx_done.msdu_id = msdu_id; - ATH10K_SKB_CB(txdesc)->htt.discard = true; - ath10k_txrx_tx_unref(htt, txdesc); + ath10k_txrx_tx_unref(htt, &tx_done); } } @@ -152,26 +147,7 @@ void ath10k_htt_tx_detach(struct ath10k_htt *htt) void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) { - struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); - struct ath10k_htt *htt = &ar->htt; - - if (skb_cb->htt.is_conf) { - dev_kfree_skb_any(skb); - return; - } - - if (skb_cb->is_aborted) { - skb_cb->htt.discard = true; - - /* if the skbuff is aborted we need to make sure we'll free up - * the tx resources, we can't simply run tx_unref() 2 times - * because if htt tx completion came in earlier we'd access - * unallocated memory */ - if (skb_cb->htt.refcount > 1) - skb_cb->htt.refcount = 1; - } - - ath10k_txrx_tx_unref(htt, skb); + dev_kfree_skb_any(skb); } int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) @@ -192,10 +168,48 @@ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ; - ATH10K_SKB_CB(skb)->htt.is_conf = true; + ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); + if (ret) { + dev_kfree_skb_any(skb); + return ret; + } + + return 0; +} + +int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie) +{ + struct htt_stats_req *req; + struct sk_buff *skb; + struct htt_cmd *cmd; + int len = 0, ret; + + len += sizeof(cmd->hdr); + len += sizeof(cmd->stats_req); + + skb = ath10k_htc_alloc_skb(len); + if (!skb) + return -ENOMEM; + + skb_put(skb, len); + cmd = (struct htt_cmd *)skb->data; + cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ; + + req = &cmd->stats_req; + + memset(req, 0, sizeof(*req)); + + /* currently we support only max 8 bit masks so no need to worry + * about endian support */ + req->upload_types[0] = mask; + req->reset_types[0] = mask; + req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID; + req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff); + req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { + ath10k_warn("failed to send htt type stats request: %d", ret); dev_kfree_skb_any(skb); return ret; } @@ -279,8 +293,6 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) #undef desc_offset - ATH10K_SKB_CB(skb)->htt.is_conf = true; - ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { dev_kfree_skb_any(skb); @@ -293,10 +305,10 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { struct device *dev = htt->ar->dev; - struct ath10k_skb_cb *skb_cb; struct sk_buff *txdesc = NULL; struct htt_cmd *cmd; - u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id; + struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); + u8 vdev_id = skb_cb->vdev_id; int len = 0; int msdu_id = -1; int res; @@ -304,30 +316,30 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) res = ath10k_htt_tx_inc_pending(htt); if (res) - return res; + goto err; len += sizeof(cmd->hdr); len += sizeof(cmd->mgmt_tx); - txdesc = ath10k_htc_alloc_skb(len); - if (!txdesc) { - res = -ENOMEM; - goto err; - } - spin_lock_bh(&htt->tx_lock); - msdu_id = ath10k_htt_tx_alloc_msdu_id(htt); - if (msdu_id < 0) { + res = ath10k_htt_tx_alloc_msdu_id(htt); + if (res < 0) { spin_unlock_bh(&htt->tx_lock); - res = msdu_id; - goto err; + goto err_tx_dec; } - htt->pending_tx[msdu_id] = txdesc; + msdu_id = res; + htt->pending_tx[msdu_id] = msdu; spin_unlock_bh(&htt->tx_lock); + txdesc = ath10k_htc_alloc_skb(len); + if (!txdesc) { + res = -ENOMEM; + goto err_free_msdu_id; + } + res = ath10k_skb_map(dev, msdu); if (res) - goto err; + goto err_free_txdesc; skb_put(txdesc, len); cmd = (struct htt_cmd *)txdesc->data; @@ -339,31 +351,27 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) memcpy(cmd->mgmt_tx.hdr, msdu->data, min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); - /* refcount is decremented by HTC and HTT completions until it reaches - * zero and is freed */ - skb_cb = ATH10K_SKB_CB(txdesc); - skb_cb->htt.msdu_id = msdu_id; - skb_cb->htt.refcount = 2; - skb_cb->htt.msdu = msdu; + skb_cb->htt.frag_len = 0; + skb_cb->htt.pad_len = 0; res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) - goto err; + goto err_unmap_msdu; return 0; -err: +err_unmap_msdu: ath10k_skb_unmap(dev, msdu); - - if (txdesc) - dev_kfree_skb_any(txdesc); - if (msdu_id >= 0) { - spin_lock_bh(&htt->tx_lock); - htt->pending_tx[msdu_id] = NULL; - ath10k_htt_tx_free_msdu_id(htt, msdu_id); - spin_unlock_bh(&htt->tx_lock); - } +err_free_txdesc: + dev_kfree_skb_any(txdesc); +err_free_msdu_id: + spin_lock_bh(&htt->tx_lock); + htt->pending_tx[msdu_id] = NULL; + ath10k_htt_tx_free_msdu_id(htt, msdu_id); + spin_unlock_bh(&htt->tx_lock); +err_tx_dec: ath10k_htt_tx_dec_pending(htt); +err: return res; } @@ -373,13 +381,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) struct htt_cmd *cmd; struct htt_data_tx_desc_frag *tx_frags; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; - struct ath10k_skb_cb *skb_cb; + struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); struct sk_buff *txdesc = NULL; - struct sk_buff *txfrag = NULL; - u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id; + bool use_frags; + u8 vdev_id = ATH10K_SKB_CB(msdu)->vdev_id; u8 tid; - int prefetch_len, desc_len, frag_len; - dma_addr_t frags_paddr; + int prefetch_len, desc_len; int msdu_id = -1; int res; u8 flags0; @@ -387,69 +394,82 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) res = ath10k_htt_tx_inc_pending(htt); if (res) - return res; + goto err; + + spin_lock_bh(&htt->tx_lock); + res = ath10k_htt_tx_alloc_msdu_id(htt); + if (res < 0) { + spin_unlock_bh(&htt->tx_lock); + goto err_tx_dec; + } + msdu_id = res; + htt->pending_tx[msdu_id] = msdu; + spin_unlock_bh(&htt->tx_lock); prefetch_len = min(htt->prefetch_len, msdu->len); prefetch_len = roundup(prefetch_len, 4); desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len; - frag_len = sizeof(*tx_frags) * 2; txdesc = ath10k_htc_alloc_skb(desc_len); if (!txdesc) { res = -ENOMEM; - goto err; + goto err_free_msdu_id; } - txfrag = dev_alloc_skb(frag_len); - if (!txfrag) { - res = -ENOMEM; - goto err; - } + /* Since HTT 3.0 there is no separate mgmt tx command. However in case + * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx + * fragment list host driver specifies directly frame pointer. */ + use_frags = htt->target_version_major < 3 || + !ieee80211_is_mgmt(hdr->frame_control); if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) { ath10k_warn("htt alignment check failed. dropping packet.\n"); res = -EIO; - goto err; + goto err_free_txdesc; } - spin_lock_bh(&htt->tx_lock); - msdu_id = ath10k_htt_tx_alloc_msdu_id(htt); - if (msdu_id < 0) { - spin_unlock_bh(&htt->tx_lock); - res = msdu_id; - goto err; + if (use_frags) { + skb_cb->htt.frag_len = sizeof(*tx_frags) * 2; + skb_cb->htt.pad_len = (unsigned long)msdu->data - + round_down((unsigned long)msdu->data, 4); + + skb_push(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len); + } else { + skb_cb->htt.frag_len = 0; + skb_cb->htt.pad_len = 0; } - htt->pending_tx[msdu_id] = txdesc; - spin_unlock_bh(&htt->tx_lock); res = ath10k_skb_map(dev, msdu); if (res) - goto err; - - /* tx fragment list must be terminated with zero-entry */ - skb_put(txfrag, frag_len); - tx_frags = (struct htt_data_tx_desc_frag *)txfrag->data; - tx_frags[0].paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); - tx_frags[0].len = __cpu_to_le32(msdu->len); - tx_frags[1].paddr = __cpu_to_le32(0); - tx_frags[1].len = __cpu_to_le32(0); - - res = ath10k_skb_map(dev, txfrag); - if (res) - goto err; + goto err_pull_txfrag; + + if (use_frags) { + dma_sync_single_for_cpu(dev, skb_cb->paddr, msdu->len, + DMA_TO_DEVICE); + + /* tx fragment list must be terminated with zero-entry */ + tx_frags = (struct htt_data_tx_desc_frag *)msdu->data; + tx_frags[0].paddr = __cpu_to_le32(skb_cb->paddr + + skb_cb->htt.frag_len + + skb_cb->htt.pad_len); + tx_frags[0].len = __cpu_to_le32(msdu->len - + skb_cb->htt.frag_len - + skb_cb->htt.pad_len); + tx_frags[1].paddr = __cpu_to_le32(0); + tx_frags[1].len = __cpu_to_le32(0); + + dma_sync_single_for_device(dev, skb_cb->paddr, msdu->len, + DMA_TO_DEVICE); + } - ath10k_dbg(ATH10K_DBG_HTT, "txfrag 0x%llx msdu 0x%llx\n", - (unsigned long long) ATH10K_SKB_CB(txfrag)->paddr, + ath10k_dbg(ATH10K_DBG_HTT, "msdu 0x%llx\n", (unsigned long long) ATH10K_SKB_CB(msdu)->paddr); - ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "txfrag: ", - txfrag->data, frag_len); ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ", msdu->data, msdu->len); skb_put(txdesc, desc_len); cmd = (struct htt_cmd *)txdesc->data; - memset(cmd, 0, desc_len); tid = ATH10K_SKB_CB(msdu)->htt.tid; @@ -459,8 +479,13 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) if (!ieee80211_has_protected(hdr->frame_control)) flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; - flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI, - HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); + + if (use_frags) + flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI, + HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); + else + flags0 |= SM(ATH10K_HW_TXRX_MGMT, + HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); flags1 = 0; flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); @@ -468,45 +493,37 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; - frags_paddr = ATH10K_SKB_CB(txfrag)->paddr; - cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; cmd->data_tx.flags0 = flags0; cmd->data_tx.flags1 = __cpu_to_le16(flags1); - cmd->data_tx.len = __cpu_to_le16(msdu->len); + cmd->data_tx.len = __cpu_to_le16(msdu->len - + skb_cb->htt.frag_len - + skb_cb->htt.pad_len); cmd->data_tx.id = __cpu_to_le16(msdu_id); - cmd->data_tx.frags_paddr = __cpu_to_le32(frags_paddr); + cmd->data_tx.frags_paddr = __cpu_to_le32(skb_cb->paddr); cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID); - memcpy(cmd->data_tx.prefetch, msdu->data, prefetch_len); - - /* refcount is decremented by HTC and HTT completions until it reaches - * zero and is freed */ - skb_cb = ATH10K_SKB_CB(txdesc); - skb_cb->htt.msdu_id = msdu_id; - skb_cb->htt.refcount = 2; - skb_cb->htt.txfrag = txfrag; - skb_cb->htt.msdu = msdu; + memcpy(cmd->data_tx.prefetch, hdr, prefetch_len); res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) - goto err; + goto err_unmap_msdu; return 0; -err: - if (txfrag) - ath10k_skb_unmap(dev, txfrag); - if (txdesc) - dev_kfree_skb_any(txdesc); - if (txfrag) - dev_kfree_skb_any(txfrag); - if (msdu_id >= 0) { - spin_lock_bh(&htt->tx_lock); - htt->pending_tx[msdu_id] = NULL; - ath10k_htt_tx_free_msdu_id(htt, msdu_id); - spin_unlock_bh(&htt->tx_lock); - } - ath10k_htt_tx_dec_pending(htt); + +err_unmap_msdu: ath10k_skb_unmap(dev, msdu); +err_pull_txfrag: + skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len); +err_free_txdesc: + dev_kfree_skb_any(txdesc); +err_free_msdu_id: + spin_lock_bh(&htt->tx_lock); + htt->pending_tx[msdu_id] = NULL; + ath10k_htt_tx_free_msdu_id(htt, msdu_id); + spin_unlock_bh(&htt->tx_lock); +err_tx_dec: + ath10k_htt_tx_dec_pending(htt); +err: return res; } |