summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/intel/iwlwifi
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-02 16:40:27 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-02 16:40:27 -0700
commit8d65b08debc7e62b2c6032d7fe7389d895b92cbc (patch)
tree0c3141b60c3a03cc32742b5750c5e763b9dae489 /drivers/net/wireless/intel/iwlwifi
parent5a0387a8a8efb90ae7fea1e2e5c62de3efa74691 (diff)
parent5d15af6778b8e4ed1fd41b040283af278e7a9a72 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Millar: "Here are some highlights from the 2065 networking commits that happened this development cycle: 1) XDP support for IXGBE (John Fastabend) and thunderx (Sunil Kowuri) 2) Add a generic XDP driver, so that anyone can test XDP even if they lack a networking device whose driver has explicit XDP support (me). 3) Sparc64 now has an eBPF JIT too (me) 4) Add a BPF program testing framework via BPF_PROG_TEST_RUN (Alexei Starovoitov) 5) Make netfitler network namespace teardown less expensive (Florian Westphal) 6) Add symmetric hashing support to nft_hash (Laura Garcia Liebana) 7) Implement NAPI and GRO in netvsc driver (Stephen Hemminger) 8) Support TC flower offload statistics in mlxsw (Arkadi Sharshevsky) 9) Multiqueue support in stmmac driver (Joao Pinto) 10) Remove TCP timewait recycling, it never really could possibly work well in the real world and timestamp randomization really zaps any hint of usability this feature had (Soheil Hassas Yeganeh) 11) Support level3 vs level4 ECMP route hashing in ipv4 (Nikolay Aleksandrov) 12) Add socket busy poll support to epoll (Sridhar Samudrala) 13) Netlink extended ACK support (Johannes Berg, Pablo Neira Ayuso, and several others) 14) IPSEC hw offload infrastructure (Steffen Klassert)" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (2065 commits) tipc: refactor function tipc_sk_recv_stream() tipc: refactor function tipc_sk_recvmsg() net: thunderx: Optimize page recycling for XDP net: thunderx: Support for XDP header adjustment net: thunderx: Add support for XDP_TX net: thunderx: Add support for XDP_DROP net: thunderx: Add basic XDP support net: thunderx: Cleanup receive buffer allocation net: thunderx: Optimize CQE_TX handling net: thunderx: Optimize RBDR descriptor handling net: thunderx: Support for page recycling ipx: call ipxitf_put() in ioctl error path net: sched: add helpers to handle extended actions qed*: Fix issues in the ptp filter config implementation. qede: Fix concurrency issue in PTP Tx path processing. stmmac: Add support for SIMATIC IOT2000 platform net: hns: fix ethtool_get_strings overflow in hns driver tcp: fix wraparound issue in tcp_lp bpf, arm64: fix jit branch offset related to ldimm64 bpf, arm64: implement jiting of BPF_XADD ...
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-7000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-9000.c48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-a000.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info.h203
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h115
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/binding.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h86
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h91
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h107
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c287
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c661
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c72
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h93
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c141
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c117
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c173
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sf.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c671
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tof.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c145
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c152
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c281
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h98
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c374
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c287
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c1018
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c237
63 files changed, 4658 insertions, 1420 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index 92e611841200..411cb91c102f 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -7,6 +7,7 @@ iwlwifi-objs += iwl-notif-wait.o
iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
+iwlwifi-objs += pcie/ctxt-info.o pcie/trans-gen2.o pcie/tx-gen2.o
iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o
iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o iwl-9000.o iwl-a000.o
iwlwifi-objs += iwl-trans.o
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
index 6c2d6da7eec6..74e52f7c5aa1 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
@@ -180,7 +180,7 @@ void iwlagn_dev_txfifo_flush(struct iwl_priv *priv)
goto done;
}
IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
- iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
+ iwl_trans_wait_tx_queues_empty(priv->trans, 0xffffffff);
done:
ieee80211_wake_queues(priv->hw);
mutex_unlock(&priv->mutex);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index 2a04d0cd71ae..444c74371929 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -213,6 +213,8 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
iwl_leds_init(priv);
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
ret = ieee80211_register_hw(priv->hw);
if (ret) {
IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
@@ -1143,7 +1145,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n");
- iwl_trans_wait_tx_queue_empty(priv->trans, scd_queues);
+ iwl_trans_wait_tx_queues_empty(priv->trans, scd_queues);
done:
mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
index ff44ebc5829d..ddcd8c2d66cd 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
@@ -2720,7 +2720,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
/* Get max rate if user set max rate */
if (lq_sta) {
- lq_sta->max_rate_idx = txrc->max_rate_idx;
+ lq_sta->max_rate_idx = fls(txrc->rate_idx_mask) - 1;
if ((sband->band == NL80211_BAND_5GHZ) &&
(lq_sta->max_rate_idx != -1))
lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
index dfa2041cfdac..1ee1ba9931a7 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
@@ -873,7 +873,7 @@ static void iwlagn_rx_reply_rx(struct iwl_priv *priv,
/* set the preamble flag if appropriate */
if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
- rx_status.flag |= RX_FLAG_SHORTPRE;
+ rx_status.enc_flags |= RX_ENC_FLAG_SHORTPRE;
if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) {
/*
@@ -887,13 +887,13 @@ static void iwlagn_rx_reply_rx(struct iwl_priv *priv,
/* Set up the HT phy flags */
if (rate_n_flags & RATE_MCS_HT_MSK)
- rx_status.flag |= RX_FLAG_HT;
+ rx_status.encoding = RX_ENC_HT;
if (rate_n_flags & RATE_MCS_HT40_MSK)
- rx_status.flag |= RX_FLAG_40MHZ;
+ rx_status.enc_flags |= RX_ENC_FLAG_40MHZ;
if (rate_n_flags & RATE_MCS_SGI_MSK)
- rx_status.flag |= RX_FLAG_SHORT_GI;
+ rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (rate_n_flags & RATE_MCS_GF_MSK)
- rx_status.flag |= RX_FLAG_HT_GF;
+ rx_status.enc_flags |= RX_ENC_FLAG_HT_GF;
iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
rxb, &rx_status);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
index a72e58623d3a..3b3e076571d6 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
@@ -73,8 +73,8 @@
/* Highest firmware API version supported */
#define IWL7260_UCODE_API_MAX 17
#define IWL7265_UCODE_API_MAX 17
-#define IWL7265D_UCODE_API_MAX 28
-#define IWL3168_UCODE_API_MAX 28
+#define IWL7265D_UCODE_API_MAX 29
+#define IWL3168_UCODE_API_MAX 29
/* Lowest firmware API version supported */
#define IWL7260_UCODE_API_MIN 17
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index b7953bf55f6f..b9718c0cf174 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -70,8 +70,8 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX 28
-#define IWL8265_UCODE_API_MAX 28
+#define IWL8000_UCODE_API_MAX 30
+#define IWL8265_UCODE_API_MAX 30
/* Lowest firmware API version supported */
#define IWL8000_UCODE_API_MIN 17
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
index a5f0c0bf85ec..110ceefccc15 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015-2016 Intel Deutschland GmbH
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -18,7 +18,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015-2016 Intel Deutschland GmbH
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -55,10 +55,10 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL9000_UCODE_API_MAX 28
+#define IWL9000_UCODE_API_MAX 30
/* Lowest firmware API version supported */
-#define IWL9000_UCODE_API_MIN 17
+#define IWL9000_UCODE_API_MIN 30
/* NVM versions */
#define IWL9000_NVM_VERSION 0x0a1d
@@ -73,14 +73,14 @@
#define IWL9000_SMEM_LEN 0x68000
#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
-#define IWL9260_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
-#define IWL9000LC_FW_PRE "iwlwifi-9000-pu-a0-lc-a0-"
+#define IWL9260A_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
+#define IWL9260B_FW_PRE "iwlwifi-9260-th-b0-jf-b0-"
#define IWL9000_MODULE_FIRMWARE(api) \
IWL9000_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL9260_MODULE_FIRMWARE(api) \
- IWL9260_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL9000LC_MODULE_FIRMWARE(api) \
- IWL9000LC_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL9260A_MODULE_FIRMWARE(api) \
+ IWL9260A_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL9260B_MODULE_FIRMWARE(api) \
+ IWL9260B_FW_PRE "-" __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_9000 10
@@ -148,7 +148,8 @@ static const struct iwl_tt_params iwl9000_tt_params = {
const struct iwl_cfg iwl9160_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9160",
- .fw_name_pre = IWL9260_FW_PRE,
+ .fw_name_pre = IWL9260A_FW_PRE,
+ .fw_name_pre_next_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
@@ -158,7 +159,8 @@ const struct iwl_cfg iwl9160_2ac_cfg = {
const struct iwl_cfg iwl9260_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9260",
- .fw_name_pre = IWL9260_FW_PRE,
+ .fw_name_pre = IWL9260A_FW_PRE,
+ .fw_name_pre_next_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
@@ -168,7 +170,8 @@ const struct iwl_cfg iwl9260_2ac_cfg = {
const struct iwl_cfg iwl9270_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9270",
- .fw_name_pre = IWL9260_FW_PRE,
+ .fw_name_pre = IWL9260A_FW_PRE,
+ .fw_name_pre_next_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
@@ -198,21 +201,6 @@ const struct iwl_cfg iwl9560_2ac_cfg = {
.integrated = true,
};
-/*
- * TODO the struct below is for internal testing only this should be
- * removed by EO 2016~
- */
-const struct iwl_cfg iwl9000lc_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 9000",
- .fw_name_pre = IWL9000LC_FW_PRE,
- IWL_DEVICE_9000,
- .ht_params = &iwl9000_ht_params,
- .nvm_ver = IWL9000_NVM_VERSION,
- .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
- .integrated = true,
-};
-
MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL9260_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL9000LC_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL9260A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL9260B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-a000.c b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c
index 15dd7f6137c8..c648cfb981a3 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-a000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015-2016 Intel Deutschland GmbH
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -18,7 +18,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015-2016 Intel Deutschland GmbH
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -55,7 +55,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL_A000_UCODE_API_MAX 28
+#define IWL_A000_UCODE_API_MAX 30
/* Lowest firmware API version supported */
#define IWL_A000_UCODE_API_MIN 24
@@ -65,15 +65,16 @@
#define IWL_A000_TX_POWER_VERSION 0xffff /* meaningless */
/* Memory offsets and lengths */
-#define IWL_A000_DCCM_OFFSET 0x800000
-#define IWL_A000_DCCM_LEN 0x18000
+#define IWL_A000_DCCM_OFFSET 0x800000 /* LMAC1 */
+#define IWL_A000_DCCM_LEN 0x10000 /* LMAC1 */
#define IWL_A000_DCCM2_OFFSET 0x880000
#define IWL_A000_DCCM2_LEN 0x8000
#define IWL_A000_SMEM_OFFSET 0x400000
-#define IWL_A000_SMEM_LEN 0x68000
+#define IWL_A000_SMEM_LEN 0xD0000
-#define IWL_A000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
-#define IWL_A000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
+#define IWL_A000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
+#define IWL_A000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
+#define IWL_A000_HR_CDB_FW_PRE "iwlwifi-QuIcp-a0-hrcdb-a0-"
#define IWL_A000_HR_MODULE_FIRMWARE(api) \
IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode"
@@ -84,7 +85,7 @@
static const struct iwl_base_params iwl_a000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_A000,
- .num_of_queues = 31,
+ .num_of_queues = 512,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
@@ -121,7 +122,8 @@ static const struct iwl_ht_params iwl_a000_ht_params = {
.vht_mu_mimo_supported = true, \
.mac_addr_from_csr = true, \
.use_tfh = true, \
- .rf_id = true
+ .rf_id = true, \
+ .gen2 = true
const struct iwl_cfg iwla000_2ac_cfg_hr = {
.name = "Intel(R) Dual Band Wireless AC a000",
@@ -133,6 +135,17 @@ const struct iwl_cfg iwla000_2ac_cfg_hr = {
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
+const struct iwl_cfg iwla000_2ac_cfg_hr_cdb = {
+ .name = "Intel(R) Dual Band Wireless AC a000",
+ .fw_name_pre = IWL_A000_HR_CDB_FW_PRE,
+ IWL_DEVICE_A000,
+ .ht_params = &iwl_a000_ht_params,
+ .nvm_ver = IWL_A000_NVM_VERSION,
+ .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .cdb = true,
+};
+
const struct iwl_cfg iwla000_2ac_cfg_jf = {
.name = "Intel(R) Dual Band Wireless AC a000",
.fw_name_pre = IWL_A000_JF_FW_PRE,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 94f8a51b633e..a12197e3ce78 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright (C) 2016 Intel Deutschland GmbH
+ * Copyright (C) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright (C) 2016 Intel Deutschland GmbH
+ * Copyright (C) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -90,16 +90,6 @@ enum iwl_device_family {
IWL_DEVICE_FAMILY_8000,
};
-static inline bool iwl_has_secure_boot(u32 hw_rev,
- enum iwl_device_family family)
-{
- /* return 1 only for family 8000 B0 */
- if ((family == IWL_DEVICE_FAMILY_8000) && (hw_rev & 0xC))
- return true;
-
- return false;
-}
-
/*
* LED mode
* IWL_LED_DEFAULT: use device default
@@ -180,7 +170,7 @@ struct iwl_base_params {
apmg_wake_up_wa:1,
scd_chain_ext_wa:1;
- u8 num_of_queues; /* def: HW dependent */
+ u16 num_of_queues; /* def: HW dependent */
u8 max_ll_items;
u8 led_compensation;
@@ -283,6 +273,8 @@ struct iwl_pwr_tx_backoff {
* @fw_name_pre: Firmware filename prefix. The api version and extension
* (.ucode) will be added to filename before loading from disk. The
* filename is constructed as fw_name_pre<api>.ucode.
+ * @fw_name_pre_next_step: same as @fw_name_pre, only for next step
+ * (if supported)
* @ucode_api_max: Highest version of uCode API supported by driver.
* @ucode_api_min: Lowest version of uCode API supported by driver.
* @max_inst_size: The maximal length of the fw inst section
@@ -321,6 +313,8 @@ struct iwl_pwr_tx_backoff {
* @vht_mu_mimo_supported: VHT MU-MIMO support
* @rf_id: need to read rf_id to determine the firmware image
* @integrated: discrete or integrated
+ * @gen2: a000 and on transport operation
+ * @cdb: CDB support
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -330,6 +324,7 @@ struct iwl_cfg {
/* params specific to an individual device within a device family */
const char *name;
const char *fw_name_pre;
+ const char *fw_name_pre_next_step;
/* params not likely to change within a device family */
const struct iwl_base_params *base_params;
/* params likely to change within a device family */
@@ -365,7 +360,9 @@ struct iwl_cfg {
vht_mu_mimo_supported:1,
rf_id:1,
integrated:1,
- use_tfh:1;
+ use_tfh:1,
+ gen2:1,
+ cdb:1;
u8 valid_tx_ant;
u8 valid_rx_ant;
u8 non_shared_ant;
@@ -449,13 +446,13 @@ extern const struct iwl_cfg iwl4165_2ac_cfg;
extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
extern const struct iwl_cfg iwl8265_2ac_sdio_cfg;
extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
-extern const struct iwl_cfg iwl9000lc_2ac_cfg;
extern const struct iwl_cfg iwl9160_2ac_cfg;
extern const struct iwl_cfg iwl9260_2ac_cfg;
extern const struct iwl_cfg iwl9270_2ac_cfg;
extern const struct iwl_cfg iwl9460_2ac_cfg;
extern const struct iwl_cfg iwl9560_2ac_cfg;
extern const struct iwl_cfg iwla000_2ac_cfg_hr;
+extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb;
extern const struct iwl_cfg iwla000_2ac_cfg_jf;
#endif /* CONFIG_IWLMVM */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
new file mode 100644
index 000000000000..b870c0986744
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
@@ -0,0 +1,203 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_context_info_file_h__
+#define __iwl_context_info_file_h__
+
+/* maximmum number of DRAM map entries supported by FW */
+#define IWL_MAX_DRAM_ENTRY 64
+#define CSR_CTXT_INFO_BA 0x40
+
+/**
+ * enum iwl_context_info_flags - Context information control flags
+ * @IWL_CTXT_INFO_AUTO_FUNC_INIT: If set, FW will not wait before interrupting
+ * the init done for driver command that configures several system modes
+ * @IWL_CTXT_INFO_EARLY_DEBUG: enable early debug
+ * @IWL_CTXT_INFO_ENABLE_CDMP: enable core dump
+ * @IWL_CTXT_INFO_RB_SIZE_4K: Use 4K RB size (the default is 2K)
+ * @IWL_CTXT_INFO_RB_CB_SIZE_POS: position of the RBD Cyclic Buffer Size
+ * exponent, the actual size is 2**value, valid sizes are 8-2048.
+ * The value is four bits long. Maximum valid exponent is 12
+ * @IWL_CTXT_INFO_TFD_FORMAT_LONG: use long TFD Format (the
+ * default is short format - not supported by the driver)
+ */
+enum iwl_context_info_flags {
+ IWL_CTXT_INFO_AUTO_FUNC_INIT = BIT(0),
+ IWL_CTXT_INFO_EARLY_DEBUG = BIT(1),
+ IWL_CTXT_INFO_ENABLE_CDMP = BIT(2),
+ IWL_CTXT_INFO_RB_SIZE_4K = BIT(3),
+ IWL_CTXT_INFO_RB_CB_SIZE_POS = 4,
+ IWL_CTXT_INFO_TFD_FORMAT_LONG = BIT(8),
+};
+
+/*
+ * struct iwl_context_info_version - version structure
+ * @mac_id: SKU and revision id
+ * @version: context information version id
+ * @size: the size of the context information in DWs
+ */
+struct iwl_context_info_version {
+ __le16 mac_id;
+ __le16 version;
+ __le16 size;
+ __le16 reserved;
+} __packed;
+
+/*
+ * struct iwl_context_info_control - version structure
+ * @control_flags: context information flags see &enum iwl_context_info_flags
+ */
+struct iwl_context_info_control {
+ __le32 control_flags;
+ __le32 reserved;
+} __packed;
+
+/*
+ * struct iwl_context_info_dram - images DRAM map
+ * each entry in the map represents a DRAM chunk of up to 32 KB
+ * @umac_img: UMAC image DRAM map
+ * @lmac_img: LMAC image DRAM map
+ * @virtual_img: paged image DRAM map
+ */
+struct iwl_context_info_dram {
+ __le64 umac_img[IWL_MAX_DRAM_ENTRY];
+ __le64 lmac_img[IWL_MAX_DRAM_ENTRY];
+ __le64 virtual_img[IWL_MAX_DRAM_ENTRY];
+} __packed;
+
+/*
+ * struct iwl_context_info_rbd_cfg - RBDs configuration
+ * @free_rbd_addr: default queue free RB CB base address
+ * @used_rbd_addr: default queue used RB CB base address
+ * @status_wr_ptr: default queue used RB status write pointer
+ */
+struct iwl_context_info_rbd_cfg {
+ __le64 free_rbd_addr;
+ __le64 used_rbd_addr;
+ __le64 status_wr_ptr;
+} __packed;
+
+/*
+ * struct iwl_context_info_hcmd_cfg - command queue configuration
+ * @cmd_queue_addr: address of command queue
+ * @cmd_queue_size: number of entries
+ */
+struct iwl_context_info_hcmd_cfg {
+ __le64 cmd_queue_addr;
+ u8 cmd_queue_size;
+ u8 reserved[7];
+} __packed;
+
+/*
+ * struct iwl_context_info_dump_cfg - Core Dump configuration
+ * @core_dump_addr: core dump (debug DRAM address) start address
+ * @core_dump_size: size, in DWs
+ */
+struct iwl_context_info_dump_cfg {
+ __le64 core_dump_addr;
+ __le32 core_dump_size;
+ __le32 reserved;
+} __packed;
+
+/*
+ * struct iwl_context_info_pnvm_cfg - platform NVM data configuration
+ * @platform_nvm_addr: Platform NVM data start address
+ * @platform_nvm_size: size in DWs
+ */
+struct iwl_context_info_pnvm_cfg {
+ __le64 platform_nvm_addr;
+ __le32 platform_nvm_size;
+ __le32 reserved;
+} __packed;
+
+/*
+ * struct iwl_context_info_early_dbg_cfg - early debug configuration for
+ * dumping DRAM addresses
+ * @early_debug_addr: early debug start address
+ * @early_debug_size: size in DWs
+ */
+struct iwl_context_info_early_dbg_cfg {
+ __le64 early_debug_addr;
+ __le32 early_debug_size;
+ __le32 reserved;
+} __packed;
+
+/*
+ * struct iwl_context_info - device INIT configuration
+ * @version: version information of context info and HW
+ * @control: control flags of FH configurations
+ * @rbd_cfg: default RX queue configuration
+ * @hcmd_cfg: command queue configuration
+ * @dump_cfg: core dump data
+ * @edbg_cfg: early debug configuration
+ * @pnvm_cfg: platform nvm configuration
+ * @dram: firmware image addresses in DRAM
+ */
+struct iwl_context_info {
+ struct iwl_context_info_version version;
+ struct iwl_context_info_control control;
+ __le64 reserved0;
+ struct iwl_context_info_rbd_cfg rbd_cfg;
+ struct iwl_context_info_hcmd_cfg hcmd_cfg;
+ __le32 reserved1[4];
+ struct iwl_context_info_dump_cfg dump_cfg;
+ struct iwl_context_info_early_dbg_cfg edbg_cfg;
+ struct iwl_context_info_pnvm_cfg pnvm_cfg;
+ __le32 reserved2[16];
+ struct iwl_context_info_dram dram;
+ __le32 reserved3[16];
+} __packed;
+
+int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *fw);
+void iwl_pcie_ctxt_info_free(struct iwl_trans *trans);
+void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans);
+
+#endif /* __iwl_context_info_file_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index 4ee3b621ec27..fa120fb55373 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -348,7 +348,6 @@ enum {
/* RF_ID value */
#define CSR_HW_RF_ID_TYPE_JF (0x00105000)
-#define CSR_HW_RF_ID_TYPE_LC (0x00101000)
#define CSR_HW_RF_ID_TYPE_HR (0x00109000)
/* EEPROM REG */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index be466a074c1d..5cfacb0bca84 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -211,24 +211,46 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw,
static int iwl_request_firmware(struct iwl_drv *drv, bool first)
{
- const char *name_pre = drv->trans->cfg->fw_name_pre;
+ const struct iwl_cfg *cfg = drv->trans->cfg;
char tag[8];
+ const char *fw_pre_name;
+
+ if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
+ CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP)
+ fw_pre_name = cfg->fw_name_pre_next_step;
+ else
+ fw_pre_name = cfg->fw_name_pre;
if (first) {
- drv->fw_index = drv->trans->cfg->ucode_api_max;
+ drv->fw_index = cfg->ucode_api_max;
sprintf(tag, "%d", drv->fw_index);
} else {
drv->fw_index--;
sprintf(tag, "%d", drv->fw_index);
}
- if (drv->fw_index < drv->trans->cfg->ucode_api_min) {
+ if (drv->fw_index < cfg->ucode_api_min) {
IWL_ERR(drv, "no suitable firmware found!\n");
+
+ if (cfg->ucode_api_min == cfg->ucode_api_max) {
+ IWL_ERR(drv, "%s%d is required\n", fw_pre_name,
+ cfg->ucode_api_max);
+ } else {
+ IWL_ERR(drv, "minimum version required: %s%d\n",
+ fw_pre_name,
+ cfg->ucode_api_min);
+ IWL_ERR(drv, "maximum version supported: %s%d\n",
+ fw_pre_name,
+ cfg->ucode_api_max);
+ }
+
+ IWL_ERR(drv,
+ "check git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git\n");
return -ENOENT;
}
snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
- name_pre, tag);
+ fw_pre_name, tag);
IWL_DEBUG_INFO(drv, "attempting to load firmware '%s'\n",
drv->firmware_name);
@@ -1260,7 +1282,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
if (!pieces)
- return;
+ goto out_free_fw;
if (!ucode_raw)
goto try_again;
@@ -1472,7 +1494,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
* or hangs loading.
*/
if (load_module) {
- err = request_module("%s", op->name);
+ request_module("%s", op->name);
#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
if (err)
IWL_ERR(drv,
@@ -1490,17 +1512,18 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
goto free;
out_free_fw:
- IWL_ERR(drv, "failed to allocate pci memory\n");
iwl_dealloc_ucode(drv);
release_firmware(ucode_raw);
out_unbind:
complete(&drv->request_firmware_complete);
device_release_driver(drv->trans->dev);
free:
- for (i = 0; i < ARRAY_SIZE(pieces->img); i++)
- kfree(pieces->img[i].sec);
- kfree(pieces->dbg_mem_tlv);
- kfree(pieces);
+ if (pieces) {
+ for (i = 0; i < ARRAY_SIZE(pieces->img); i++)
+ kfree(pieces->img[i].sec);
+ kfree(pieces->dbg_mem_tlv);
+ kfree(pieces);
+ }
}
struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 33ef5372d195..62f9fe926d78 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -614,6 +614,8 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define RX_POOL_SIZE (MQ_RX_NUM_RBDS + \
IWL_MAX_RX_HW_QUEUES * \
(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC))
+/* cb size is the exponent */
+#define RX_QUEUE_CB_SIZE(x) ilog2(x)
#define RX_QUEUE_SIZE 256
#define RX_QUEUE_MASK 255
@@ -639,6 +641,8 @@ struct iwl_rb_status {
#define TFD_QUEUE_SIZE_MAX (256)
+/* cb size is the exponent - 3 */
+#define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3)
#define TFD_QUEUE_SIZE_BC_DUP (64)
#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
@@ -647,7 +651,7 @@ struct iwl_rb_status {
static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr)
{
- return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
+ return (sizeof(addr) > sizeof(u32) ? upper_32_bits(addr) : 0) & 0xF;
}
/**
* struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
index d01701ee4777..44419e82da1b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
@@ -241,6 +241,9 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
* iteration complete notification, and the timestamp reported for RX
* received during scan, are reported in TSF of the mac specified in the
* scan request.
+ * @IWL_UCODE_TLV_API_TKIP_MIC_KEYS: This ucode supports version 2 of
+ * ADD_MODIFY_STA_KEY_API_S_VER_2.
+ * @IWL_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement.
*
* @NUM_IWL_UCODE_TLV_API: number of bits used
*/
@@ -250,6 +253,8 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18,
IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20,
IWL_UCODE_TLV_API_SCAN_TSF_REPORT = (__force iwl_ucode_tlv_api_t)28,
+ IWL_UCODE_TLV_API_TKIP_MIC_KEYS = (__force iwl_ucode_tlv_api_t)29,
+ IWL_UCODE_TLV_API_STA_TYPE = (__force iwl_ucode_tlv_api_t)30,
NUM_IWL_UCODE_TLV_API
#ifdef __CHECKER__
@@ -344,6 +349,8 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30,
IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31,
IWL_UCODE_TLV_CAPA_STA_PM_NOTIF = (__force iwl_ucode_tlv_capa_t)38,
+ IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)39,
+ IWL_UCODE_TLV_CAPA_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)40,
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65,
IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index a9f69fdd170b..9c8b09cf1f7b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -54,8 +54,8 @@ IWL_EXPORT_SYMBOL(iwl_write32);
void iwl_write64(struct iwl_trans *trans, u64 ofs, u64 val)
{
trace_iwlwifi_dev_iowrite64(trans->dev, ofs, val);
- iwl_trans_write32(trans, ofs, val & 0xffffffff);
- iwl_trans_write32(trans, ofs + 4, val >> 32);
+ iwl_trans_write32(trans, ofs, lower_32_bits(val));
+ iwl_trans_write32(trans, ofs + 4, upper_32_bits(val));
}
IWL_EXPORT_SYMBOL(iwl_write64);
@@ -246,6 +246,9 @@ void iwl_force_nmi(struct iwl_trans *trans)
DEVICE_SET_NMI_VAL_DRV);
iwl_write_prph(trans, DEVICE_SET_NMI_REG,
DEVICE_SET_NMI_VAL_HW);
+ } else if (trans->cfg->gen2) {
+ iwl_write_prph(trans, UREG_NIC_SET_NMI_DRIVER,
+ DEVICE_SET_NMI_8000_VAL);
} else {
iwl_write_prph(trans, DEVICE_SET_NMI_8000_REG,
DEVICE_SET_NMI_8000_VAL);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c
index 88f260db3744..68412ff2112e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c
@@ -76,8 +76,8 @@ void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait)
}
IWL_EXPORT_SYMBOL(iwl_notification_wait_init);
-void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
- struct iwl_rx_packet *pkt)
+bool iwl_notification_wait(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt)
{
bool triggered = false;
@@ -118,13 +118,11 @@ void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
}
}
spin_unlock(&notif_wait->notif_wait_lock);
-
}
- if (triggered)
- wake_up_all(&notif_wait->notif_waitq);
+ return triggered;
}
-IWL_EXPORT_SYMBOL(iwl_notification_wait_notify);
+IWL_EXPORT_SYMBOL(iwl_notification_wait);
void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h
index 0f9995ed71cd..368884be4e7c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,6 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -89,10 +90,10 @@ struct iwl_notif_wait_data {
*
* This structure is not used directly, to wait for a
* notification declare it on the stack, and call
- * iwlagn_init_notification_wait() with appropriate
+ * iwl_init_notification_wait() with appropriate
* parameters. Then do whatever will cause the ucode
* to notify the driver, and to wait for that then
- * call iwlagn_wait_notification().
+ * call iwl_wait_notification().
*
* Each notification is one-shot. If at some point we
* need to support multi-shot notifications (which
@@ -114,10 +115,24 @@ struct iwl_notification_wait {
/* caller functions */
void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_data);
-void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_data,
- struct iwl_rx_packet *pkt);
+bool iwl_notification_wait(struct iwl_notif_wait_data *notif_data,
+ struct iwl_rx_packet *pkt);
void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data);
+static inline void
+iwl_notification_notify(struct iwl_notif_wait_data *notif_data)
+{
+ wake_up_all(&notif_data->notif_waitq);
+}
+
+static inline void
+iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_data,
+ struct iwl_rx_packet *pkt)
+{
+ if (iwl_notification_wait(notif_data, pkt))
+ iwl_notification_notify(notif_data);
+}
+
/* user functions */
void __acquires(wait_entry)
iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 3bd6fc1b76d4..721ae6bef5da 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,6 +34,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -438,25 +439,16 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map;
}
-static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
- struct iwl_nvm_data *data,
- const __le16 *ch_section,
- u8 tx_chains, u8 rx_chains, bool lar_supported)
+void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
+ struct iwl_nvm_data *data, const __le16 *nvm_ch_flags,
+ u8 tx_chains, u8 rx_chains, bool lar_supported)
{
int n_channels;
int n_used = 0;
struct ieee80211_supported_band *sband;
- if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
- n_channels = iwl_init_channel_map(
- dev, cfg, data,
- &ch_section[NVM_CHANNELS], lar_supported);
- else
- n_channels = iwl_init_channel_map(
- dev, cfg, data,
- &ch_section[NVM_CHANNELS_FAMILY_8000],
- lar_supported);
-
+ n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags,
+ lar_supported);
sband = &data->bands[NL80211_BAND_2GHZ];
sband->band = NL80211_BAND_2GHZ;
sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
@@ -482,6 +474,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
n_used, n_channels);
}
+IWL_EXPORT_SYMBOL(iwl_init_sbands);
static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
const __le16 *phy_sku)
@@ -559,8 +552,8 @@ static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest)
dest[5] = hw_addr[0];
}
-static void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
- struct iwl_nvm_data *data)
+void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
+ struct iwl_nvm_data *data)
{
__le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP));
__le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP));
@@ -578,6 +571,7 @@ static void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
}
+IWL_EXPORT_SYMBOL(iwl_set_hw_address_from_csr);
static void iwl_set_hw_address_family_8000(struct iwl_trans *trans,
const struct iwl_cfg *cfg,
@@ -718,7 +712,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
lar_enabled = true;
- ch_section = nvm_sw;
+ ch_section = &nvm_sw[NVM_CHANNELS];
} else {
u16 lar_offset = data->nvm_version < 0xE39 ?
NVM_LAR_OFFSET_FAMILY_8000_OLD :
@@ -728,7 +722,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
data->lar_enabled = !!(lar_config &
NVM_LAR_ENABLED_FAMILY_8000);
lar_enabled = data->lar_enabled;
- ch_section = regulatory;
+ ch_section = &regulatory[NVM_CHANNELS_FAMILY_8000];
}
/* If no valid mac address was found - bail out */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
index 7249e5b403f4..3fd6506a02ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved.
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,6 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -82,6 +83,19 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
u8 tx_chains, u8 rx_chains, bool lar_fw_supported);
/**
+ * iwl_set_hw_address_from_csr - sets HW address for 9000 devices and on
+ */
+void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
+ struct iwl_nvm_data *data);
+
+/**
+ * iwl_init_sbands - parse and set all channel profiles
+ */
+void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
+ struct iwl_nvm_data *data, const __le16 *nvm_ch_flags,
+ u8 tx_chains, u8 rx_chains, bool lar_supported);
+
+/**
* iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
*
* This function parses the regulatory channel data received as a
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 406ef301b8ab..306bc967742e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -114,6 +114,7 @@
#define DEVICE_SET_NMI_VAL_DRV BIT(7)
#define DEVICE_SET_NMI_8000_REG 0x00a01c24
#define DEVICE_SET_NMI_8000_VAL 0x1000000
+#define UREG_NIC_SET_NMI_DRIVER 0x00a05c10
/* Shared registers (0x0..0x3ff, via target indirect or periphery */
#define SHR_BASE 0x00a10000
@@ -294,9 +295,6 @@
/*********************** END TX SCHEDULER *************************************/
-/* tcp checksum offload */
-#define RX_EN_CSUM (0x00a00d88)
-
/* Oscillator clock */
#define OSC_CLK (0xa04068)
#define OSC_CLK_FORCE_CONTROL (0x8)
@@ -309,6 +307,7 @@
* Note this address is cleared after MAC reset.
*/
#define UREG_UCODE_LOAD_STATUS (0xa05c40)
+#define UREG_CPU_INIT_RUN (0xa05c44)
#define LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR (0x1E78)
#define LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR (0x1E7C)
@@ -316,6 +315,8 @@
#define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000)
#define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400)
+#define LMAC2_PRPH_OFFSET (0x100000)
+
/* Rx FIFO */
#define RXF_SIZE_ADDR (0xa00c88)
#define RXF_RD_D_SPACE (0xa00c40)
@@ -378,6 +379,7 @@
#define RADIO_REG_SYS_MANUAL_DFT_0 0xAD4078
#define RFIC_REG_RD 0xAD0470
#define WFPM_CTRL_REG 0xA03030
+#define WFPM_GP2 0xA030B4
enum {
ENABLE_WFPM = BIT(31),
WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 0x80000000,
@@ -398,6 +400,8 @@ enum aux_misc_master1_en {
#define PREG_AUX_BUS_WPROT_0 0xA04CC0
#define SB_CPU_1_STATUS 0xA01E30
#define SB_CPU_2_STATUS 0xA01E34
+#define UMAG_SB_CPU_1_STATUS 0xA038C0
+#define UMAG_SB_CPU_2_STATUS 0xA038C4
/* FW chicken bits */
#define LMPM_CHICK 0xA01FF8
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index d42cab291025..0bde26bab15d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -70,8 +70,7 @@
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
const struct iwl_cfg *cfg,
- const struct iwl_trans_ops *ops,
- size_t dev_cmd_headroom)
+ const struct iwl_trans_ops *ops)
{
struct iwl_trans *trans;
#ifdef CONFIG_LOCKDEP
@@ -90,15 +89,13 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
trans->dev = dev;
trans->cfg = cfg;
trans->ops = ops;
- trans->dev_cmd_headroom = dev_cmd_headroom;
trans->num_rx_queues = 1;
snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
"iwl_cmd_pool:%s", dev_name(trans->dev));
trans->dev_cmd_pool =
kmem_cache_create(trans->dev_cmd_pool_name,
- sizeof(struct iwl_device_cmd)
- + trans->dev_cmd_headroom,
+ sizeof(struct iwl_device_cmd),
sizeof(void *),
SLAB_HWCACHE_ALIGN,
NULL);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 0296124a7f9c..0ebfdbb22992 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -396,7 +396,10 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
* currently supports
*/
#define IWL_MAX_HW_QUEUES 32
+#define IWL_MAX_TVQM_QUEUES 512
+
#define IWL_MAX_TID_COUNT 8
+#define IWL_MGMT_TID 15
#define IWL_FRAME_LIMIT 64
#define IWL_MAX_RX_HW_QUEUES 16
@@ -530,6 +533,44 @@ struct iwl_trans_txq_scd_cfg {
int frame_limit;
};
+/* Available options for &struct iwl_tx_queue_cfg_cmd */
+enum iwl_tx_queue_cfg_actions {
+ TX_QUEUE_CFG_ENABLE_QUEUE = BIT(0),
+ TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1),
+};
+
+/**
+ * struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command
+ * @sta_id: station id
+ * @tid: tid of the queue
+ * @flags: Bit 0 - on enable, off - disable, Bit 1 - short TFD format
+ * @cb_size: size of TFD cyclic buffer. Value is exponent - 3.
+ * Minimum value 0 (8 TFDs), maximum value 5 (256 TFDs)
+ * @byte_cnt_addr: address of byte count table
+ * @tfdq_addr: address of TFD circular buffer
+ */
+struct iwl_tx_queue_cfg_cmd {
+ u8 sta_id;
+ u8 tid;
+ __le16 flags;
+ __le32 cb_size;
+ __le64 byte_cnt_addr;
+ __le64 tfdq_addr;
+} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_tx_queue_cfg_rsp - response to txq hw scheduler config
+ * @queue_number: queue number assigned to this RA -TID
+ * @flags: set on failure
+ * @write_pointer: initial value for write pointer
+ */
+struct iwl_tx_queue_cfg_rsp {
+ __le16 queue_number;
+ __le16 flags;
+ __le16 write_pointer;
+ __le16 reserved;
+} __packed; /* TX_QUEUE_CFG_RSP_API_S_VER_2 */
+
/**
* struct iwl_trans_ops - transport specific operations
*
@@ -640,13 +681,17 @@ struct iwl_trans_ops {
unsigned int queue_wdg_timeout);
void (*txq_disable)(struct iwl_trans *trans, int queue,
bool configure_scd);
+ /* a000 functions */
+ int (*txq_alloc)(struct iwl_trans *trans,
+ struct iwl_tx_queue_cfg_cmd *cmd,
+ int cmd_id,
+ unsigned int queue_wdg_timeout);
+ void (*txq_free)(struct iwl_trans *trans, int queue);
void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
bool shared);
- dma_addr_t (*get_txq_byte_table)(struct iwl_trans *trans, int txq_id);
-
- int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
+ int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
bool freeze);
void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
@@ -774,9 +819,6 @@ enum iwl_plat_pm_mode {
* the transport must set this before calling iwl_drv_start()
* @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
* The user should use iwl_trans_{alloc,free}_tx_cmd.
- * @dev_cmd_headroom: room needed for the transport's private use before the
- * device_cmd for Tx - for internal use only
- * The user should use iwl_trans_{alloc,free}_tx_cmd.
* @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
* starting the firmware, used for tracing
* @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
@@ -827,7 +869,6 @@ struct iwl_trans {
/* The following fields are internal only */
struct kmem_cache *dev_cmd_pool;
- size_t dev_cmd_headroom;
char dev_cmd_pool_name[50];
struct dentry *dbgfs_dir;
@@ -1000,13 +1041,13 @@ iwl_trans_dump_data(struct iwl_trans *trans,
static inline struct iwl_device_cmd *
iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
{
- u8 *dev_cmd_ptr = kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
+ struct iwl_device_cmd *dev_cmd_ptr =
+ kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
if (unlikely(dev_cmd_ptr == NULL))
return NULL;
- return (struct iwl_device_cmd *)
- (dev_cmd_ptr + trans->dev_cmd_headroom);
+ return dev_cmd_ptr;
}
int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
@@ -1014,9 +1055,7 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
struct iwl_device_cmd *dev_cmd)
{
- u8 *dev_cmd_ptr = (u8 *)dev_cmd - trans->dev_cmd_headroom;
-
- kmem_cache_free(trans->dev_cmd_pool, dev_cmd_ptr);
+ kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
}
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
@@ -1065,20 +1104,39 @@ iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout);
}
-static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
- int queue, bool shared_mode)
+static inline void
+iwl_trans_txq_free(struct iwl_trans *trans, int queue)
{
- if (trans->ops->txq_set_shared_mode)
- trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
+ if (WARN_ON_ONCE(!trans->ops->txq_free))
+ return;
+
+ trans->ops->txq_free(trans, queue);
}
-static inline dma_addr_t iwl_trans_get_txq_byte_table(struct iwl_trans *trans,
- int queue)
+static inline int
+iwl_trans_txq_alloc(struct iwl_trans *trans,
+ struct iwl_tx_queue_cfg_cmd *cmd,
+ int cmd_id,
+ unsigned int queue_wdg_timeout)
{
- /* we should never be called if the trans doesn't support it */
- BUG_ON(!trans->ops->get_txq_byte_table);
+ might_sleep();
+
+ if (WARN_ON_ONCE(!trans->ops->txq_alloc))
+ return -ENOTSUPP;
+
+ if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
+ IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
+ return -EIO;
+ }
- return trans->ops->get_txq_byte_table(trans, queue);
+ return trans->ops->txq_alloc(trans, cmd, cmd_id, queue_wdg_timeout);
+}
+
+static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
+ int queue, bool shared_mode)
+{
+ if (trans->ops->txq_set_shared_mode)
+ trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
}
static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
@@ -1137,15 +1195,15 @@ static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
trans->ops->block_txq_ptrs(trans, block);
}
-static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans,
- u32 txqs)
+static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
+ u32 txqs)
{
if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
return -EIO;
}
- return trans->ops->wait_tx_queue_empty(trans, txqs);
+ return trans->ops->wait_tx_queues_empty(trans, txqs);
}
static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
@@ -1248,8 +1306,7 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans)
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
const struct iwl_cfg *cfg,
- const struct iwl_trans_ops *ops,
- size_t dev_cmd_headroom);
+ const struct iwl_trans_ops *ops);
void iwl_trans_free(struct iwl_trans *trans);
/*****************************************************
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c
index 7cb68f6ed1b0..75d35f6b041e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,6 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -82,9 +84,22 @@ static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action,
struct iwl_mvm_phy_ctxt *phyctxt = data->phyctxt;
int i, ret;
u32 status;
+ int size;
memset(&cmd, 0, sizeof(cmd));
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) {
+ size = sizeof(cmd);
+ if (phyctxt->channel->band == NL80211_BAND_2GHZ ||
+ !iwl_mvm_is_cdb_supported(mvm))
+ cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX);
+ else
+ cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX);
+ } else {
+ size = IWL_BINDING_CMD_SIZE_V1;
+ }
+
cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id,
phyctxt->color));
cmd.action = cpu_to_le32(action);
@@ -99,7 +114,7 @@ static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action,
status = 0;
ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
- sizeof(cmd), &cmd, &status);
+ size, &cmd, &status);
if (ret) {
IWL_ERR(mvm, "Failed to send binding (action:%d): %d\n",
action, ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 5bdb6c2c8390..49b4418e6c35 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -756,7 +756,7 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* Rssi update while not associated - can happen since the statistics
* are handled asynchronously
*/
- if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
+ if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA)
return;
/* No BT - reports should be disabled */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index c7eb1983c4f9..119a3bd92c50 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -665,6 +665,19 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_binding_cmd binding_cmd = {};
struct iwl_time_quota_cmd quota_cmd = {};
u32 status;
+ int size;
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) {
+ size = sizeof(binding_cmd);
+ if (mvmvif->phy_ctxt->channel->band == NL80211_BAND_2GHZ ||
+ !iwl_mvm_is_cdb_supported(mvm))
+ binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX);
+ else
+ binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX);
+ } else {
+ size = IWL_BINDING_CMD_SIZE_V1;
+ }
/* add back the PHY */
if (WARN_ON(!mvmvif->phy_ctxt))
@@ -711,8 +724,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
status = 0;
ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
- sizeof(binding_cmd), &binding_cmd,
- &status);
+ size, &binding_cmd, &status);
if (ret) {
IWL_ERR(mvm, "Failed to add binding: %d\n", ret);
return ret;
@@ -986,7 +998,9 @@ int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
goto out;
}
- if (key_data.use_tkip) {
+ if (key_data.use_tkip &&
+ !fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) {
ret = iwl_mvm_send_cmd_pdu(mvm,
WOWLAN_TKIP_PARAM,
cmd_flags, sizeof(tkip_cmd),
@@ -1194,7 +1208,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvmvif = iwl_mvm_vif_from_mac80211(vif);
- if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) {
+ if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) {
/* if we're not associated, this must be netdetect */
if (!wowlan->nd_config) {
ret = 1;
@@ -2102,6 +2116,10 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
*/
iwl_mvm_update_changed_regdom(mvm);
+ if (!unified_image)
+ /* Re-configure default SAR profile */
+ iwl_mvm_sar_select_profile(mvm, 1, 1);
+
if (mvm->net_detect) {
/* If this is a non-unified image, we restart the FW,
* so no need to stop the netdetect scan. If that
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index f4d75ffe3d8a..5d475b4850ae 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -280,7 +280,7 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
mvmvif->queue_params[i].uapsd);
if (vif->type == NL80211_IFTYPE_STATION &&
- ap_sta_id != IWL_MVM_STATION_COUNT) {
+ ap_sta_id != IWL_MVM_INVALID_STA) {
struct iwl_mvm_sta *mvm_sta;
mvm_sta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 077bfd8f4c0c..402846650cbe 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -330,7 +330,7 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
mutex_lock(&mvm->mutex);
- for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i);
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h
index 480a54af4534..970b030ed28d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h
@@ -73,7 +73,9 @@
#define NUM_MAC_INDEX (NUM_MAC_INDEX_DRIVER + 1)
#define NUM_MAC_INDEX_CDB (NUM_MAC_INDEX_DRIVER + 2)
-#define IWL_MVM_STATION_COUNT 16
+#define IWL_MVM_STATION_COUNT 16
+#define IWL_MVM_INVALID_STA 0xFF
+
#define IWL_MVM_TDLS_STA_COUNT 4
enum iwl_ac {
@@ -155,7 +157,8 @@ enum iwl_tsf_id {
* @bi_reciprocal: 2^32 / bi
* @dtim_interval: dtim transmit time in TU
* @dtim_reciprocal: 2^32 / dtim_interval
- * @mcast_qid: queue ID for multicast traffic
+ * @mcast_qid: queue ID for multicast traffic.
+ * NOTE: obsolete from VER2 and on
* @beacon_template: beacon template ID
*/
struct iwl_mac_data_ap {
@@ -167,7 +170,7 @@ struct iwl_mac_data_ap {
__le32 dtim_reciprocal;
__le32 mcast_qid;
__le32 beacon_template;
-} __packed; /* AP_MAC_DATA_API_S_VER_1 */
+} __packed; /* AP_MAC_DATA_API_S_VER_2 */
/**
* struct iwl_mac_data_ibss - configuration data for IBSS MAC context
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h
index 3fa43d1348a2..750510aff70b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -351,6 +351,45 @@ struct iwl_dev_tx_power_cmd {
u8 reserved[3];
} __packed; /* TX_REDUCED_POWER_API_S_VER_4 */
+#define IWL_NUM_GEO_PROFILES 3
+
+/**
+ * enum iwl_geo_per_chain_offset_operation - type of operation
+ * @IWL_PER_CHAIN_OFFSET_SET_TABLES: send the tables from the host to the FW.
+ * @IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE: retrieve the last configured table.
+ */
+enum iwl_geo_per_chain_offset_operation {
+ IWL_PER_CHAIN_OFFSET_SET_TABLES,
+ IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE,
+}; /* GEO_TX_POWER_LIMIT FLAGS TYPE */
+
+/**
+ * struct iwl_per_chain_offset - embedded struct for GEO_TX_POWER_LIMIT.
+ * @max_tx_power: maximum allowed tx power.
+ * @chain_a: tx power offset for chain a.
+ * @chain_b: tx power offset for chain b.
+ */
+struct iwl_per_chain_offset {
+ __le16 max_tx_power;
+ u8 chain_a;
+ u8 chain_b;
+} __packed; /* PER_CHAIN_LIMIT_OFFSET_PER_CHAIN_S_VER_1 */
+
+struct iwl_per_chain_offset_group {
+ struct iwl_per_chain_offset lb;
+ struct iwl_per_chain_offset hb;
+} __packed; /* PER_CHAIN_LIMIT_OFFSET_GROUP_S_VER_1 */
+
+/**
+ * struct iwl_geo_tx_power_profile_cmd - struct for GEO_TX_POWER_LIMIT cmd.
+ * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
+ * @table: offset profile per band.
+ */
+struct iwl_geo_tx_power_profiles_cmd {
+ __le32 ops;
+ struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES];
+} __packed; /* GEO_TX_POWER_LIMIT */
+
/**
* struct iwl_beacon_filter_cmd
* REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
index ad9cc03e16c4..1b7d265ffb0a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,6 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -227,6 +229,9 @@ enum {
*/
#define RATE_LEGACY_RATE_MSK 0xff
+/* Bit 10 - OFDM HE */
+#define RATE_MCS_OFDM_HE_POS 10
+#define RATE_MCS_OFDM_HE_MSK BIT(RATE_MCS_OFDM_HE_POS)
/*
* Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz
@@ -255,18 +260,29 @@ enum {
#define RATE_MCS_ANT_MSK RATE_MCS_ANT_ABC_MSK
#define RATE_MCS_ANT_NUM 3
-/* Bit 17-18: (0) SS, (1) SS*2 */
+/* Bit 17: (0) SS, (1) SS*2 */
#define RATE_MCS_STBC_POS 17
-#define RATE_MCS_HT_STBC_MSK (3 << RATE_MCS_STBC_POS)
-#define RATE_MCS_VHT_STBC_MSK (1 << RATE_MCS_STBC_POS)
+#define RATE_MCS_STBC_MSK BIT(RATE_MCS_STBC_POS)
+
+/* Bit 18: OFDM-HE dual carrier mode */
+#define RATE_HE_DUAL_CARRIER_MODE 18
+#define RATE_HE_DUAL_CARRIER_MODE_MSK BIT(RATE_HE_DUAL_CARRIER_MODE)
/* Bit 19: (0) Beamforming is off, (1) Beamforming is on */
#define RATE_MCS_BF_POS 19
#define RATE_MCS_BF_MSK (1 << RATE_MCS_BF_POS)
-/* Bit 20: (0) ZLF is off, (1) ZLF is on */
-#define RATE_MCS_ZLF_POS 20
-#define RATE_MCS_ZLF_MSK (1 << RATE_MCS_ZLF_POS)
+/*
+ * Bit 20-21: HE guard interval and LTF type.
+ * (0) 1xLTF+1.6us, (1) 2xLTF+0.8us,
+ * (2) 2xLTF+1.6us, (3) 4xLTF+3.2us
+ */
+#define RATE_MCS_HE_GI_LTF_POS 20
+#define RATE_MCS_HE_GI_LTF_MSK (3 << RATE_MCS_HE_GI_LTF_POS)
+
+/* Bit 22-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */
+#define RATE_MCS_HE_TYPE_POS 22
+#define RATE_MCS_HE_TYPE_MSK (3 << RATE_MCS_HE_TYPE_POS)
/* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */
#define RATE_MCS_DUP_POS 24
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h
index c78a0c499459..3178eb96e395 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h
@@ -516,7 +516,7 @@ struct iwl_scan_dwell {
* scan_config_channel_flag
* @channel_array: default supported channels
*/
-struct iwl_scan_config {
+struct iwl_scan_config_v1 {
__le32 flags;
__le32 tx_chains;
__le32 rx_chains;
@@ -532,7 +532,7 @@ struct iwl_scan_config {
#define SCAN_TWO_LMACS 2
-struct iwl_scan_config_cdb {
+struct iwl_scan_config {
__le32 flags;
__le32 tx_chains;
__le32 rx_chains;
@@ -669,7 +669,7 @@ struct iwl_scan_req_umac {
u8 n_channels;
__le16 reserved;
u8 data[];
- } no_cdb; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
+ } v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
struct {
__le32 max_out_time[SCAN_TWO_LMACS];
__le32 suspend_time[SCAN_TWO_LMACS];
@@ -679,13 +679,13 @@ struct iwl_scan_req_umac {
u8 n_channels;
__le16 reserved;
u8 data[];
- } cdb; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_5 */
+ } v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */
};
} __packed;
-#define IWL_SCAN_REQ_UMAC_SIZE_CDB sizeof(struct iwl_scan_req_umac)
-#define IWL_SCAN_REQ_UMAC_SIZE (sizeof(struct iwl_scan_req_umac) - \
- 2 * sizeof(__le32))
+#define IWL_SCAN_REQ_UMAC_SIZE sizeof(struct iwl_scan_req_umac)
+#define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \
+ 2 * sizeof(__le32))
/**
* struct iwl_umac_scan_abort
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
index 3b5150e9975d..421b9dd1fb66 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -179,7 +179,7 @@ enum iwl_sta_key_flag {
* enum iwl_sta_modify_flag - indicate to the fw what flag are being changed
* @STA_MODIFY_QUEUE_REMOVAL: this command removes a queue
* @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx
- * @STA_MODIFY_UAPSD_ACS: this command modifies %uapsd_trigger_acs
+ * @STA_MODIFY_UAPSD_ACS: this command modifies %uapsd_acs
* @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid
* @STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid
* @STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count
@@ -214,20 +214,6 @@ enum iwl_sta_sleep_flag {
STA_SLEEP_STATE_MOREDATA = BIT(2),
};
-/* STA ID and color bits definitions */
-#define STA_ID_SEED (0x0f)
-#define STA_ID_POS (0)
-#define STA_ID_MSK (STA_ID_SEED << STA_ID_POS)
-
-#define STA_COLOR_SEED (0x7)
-#define STA_COLOR_POS (4)
-#define STA_COLOR_MSK (STA_COLOR_SEED << STA_COLOR_POS)
-
-#define STA_ID_N_COLOR_GET_COLOR(id_n_color) \
- (((id_n_color) & STA_COLOR_MSK) >> STA_COLOR_POS)
-#define STA_ID_N_COLOR_GET_ID(id_n_color) \
- (((id_n_color) & STA_ID_MSK) >> STA_ID_POS)
-
#define STA_KEY_MAX_NUM (16)
#define STA_KEY_IDX_INVALID (0xff)
#define STA_KEY_MAX_DATA_KEY_NUM (4)
@@ -324,6 +310,24 @@ struct iwl_mvm_add_sta_cmd_v7 {
} __packed; /* ADD_STA_CMD_API_S_VER_7 */
/**
+ * enum iwl_sta_type - FW station types
+ * ( REPLY_ADD_STA = 0x18 )
+ * @IWL_STA_LINK: Link station - normal RX and TX traffic.
+ * @IWL_STA_GENERAL_PURPOSE: General purpose. In AP mode used for beacons
+ * and probe responses.
+ * @IWL_STA_MULTICAST: multicast traffic,
+ * @IWL_STA_TDLS_LINK: TDLS link station
+ * @IWL_STA_AUX_ACTIVITY: auxilary station (scan, ROC and so on).
+ */
+enum iwl_sta_type {
+ IWL_STA_LINK,
+ IWL_STA_GENERAL_PURPOSE,
+ IWL_STA_MULTICAST,
+ IWL_STA_TDLS_LINK,
+ IWL_STA_AUX_ACTIVITY,
+};
+
+/**
* struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table.
* ( REPLY_ADD_STA = 0x18 )
* @add_modify: 1: modify existing, 0: add new station
@@ -347,14 +351,17 @@ struct iwl_mvm_add_sta_cmd_v7 {
* @sleep_tx_count: number of packets to transmit to station even though it is
* asleep. Used to synchronise PS-poll and u-APSD responses while ucode
* keeps track of STA sleep state.
+ * @station_type: type of this station. See &enum iwl_sta_type.
* @sleep_state_flags: Look at %iwl_sta_sleep_flag.
* @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP
* mac-addr.
* @beamform_flags: beam forming controls
- * @tfd_queue_msk: tfd queues used by this station
+ * @tfd_queue_msk: tfd queues used by this station.
+ * Obselete for new TX API (9 and above).
* @rx_ba_window: aggregation window size
- * @scd_queue_bank: queue bank in used. Each bank contains 32 queues. 0 means
- * that the queues used by this station are in the first 32.
+ * @sp_length: the size of the SP as it appears in the WME IE
+ * @uapsd_acs: 4 LS bits are trigger enabled ACs, 4 MS bits are the deliver
+ * enabled ACs.
*
* The device contains an internal table of per-station information, with info
* on security keys, aggregation parameters, and Tx rates for initial Tx
@@ -379,38 +386,61 @@ struct iwl_mvm_add_sta_cmd {
u8 remove_immediate_ba_tid;
__le16 add_immediate_ba_ssn;
__le16 sleep_tx_count;
- __le16 sleep_state_flags;
+ u8 sleep_state_flags;
+ u8 station_type;
__le16 assoc_id;
__le16 beamform_flags;
__le32 tfd_queue_msk;
__le16 rx_ba_window;
- u8 scd_queue_bank;
- u8 uapsd_trigger_acs;
-} __packed; /* ADD_STA_CMD_API_S_VER_8 */
+ u8 sp_length;
+ u8 uapsd_acs;
+} __packed; /* ADD_STA_CMD_API_S_VER_10 */
/**
- * struct iwl_mvm_add_sta_key_cmd - add/modify sta key
+ * struct iwl_mvm_add_sta_key_common - add/modify sta key common part
* ( REPLY_ADD_STA_KEY = 0x17 )
* @sta_id: index of station in uCode's station table
* @key_offset: key offset in key storage
* @key_flags: type %iwl_sta_key_flag
* @key: key material data
* @rx_secur_seq_cnt: RX security sequence counter for the key
- * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
- * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
*/
-struct iwl_mvm_add_sta_key_cmd {
+struct iwl_mvm_add_sta_key_common {
u8 sta_id;
u8 key_offset;
__le16 key_flags;
u8 key[32];
u8 rx_secur_seq_cnt[16];
+} __packed;
+
+/**
+ * struct iwl_mvm_add_sta_key_cmd_v1 - add/modify sta key
+ * @common: see &struct iwl_mvm_add_sta_key_common
+ * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
+ * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
+ */
+struct iwl_mvm_add_sta_key_cmd_v1 {
+ struct iwl_mvm_add_sta_key_common common;
u8 tkip_rx_tsc_byte2;
u8 reserved;
__le16 tkip_rx_ttak[5];
} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_1 */
/**
+ * struct iwl_mvm_add_sta_key_cmd - add/modify sta key
+ * @common: see &struct iwl_mvm_add_sta_key_common
+ * @rx_mic_key: TKIP RX unicast or multicast key
+ * @tx_mic_key: TKIP TX key
+ * @transmit_seq_cnt: TSC, transmit packet number
+ */
+struct iwl_mvm_add_sta_key_cmd {
+ struct iwl_mvm_add_sta_key_common common;
+ __le64 rx_mic_key;
+ __le64 tx_mic_key;
+ __le64 transmit_seq_cnt;
+} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_2 */
+
+/**
* enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command
* @ADD_STA_SUCCESS: operation was executed successfully
* @ADD_STA_STATIONS_OVERLOAD: no room left in the fw's station table
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
index b38cc073adcc..81b98915b1a4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,6 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -124,6 +125,20 @@ enum iwl_tx_flags {
}; /* TX_FLAGS_BITS_API_S_VER_1 */
/**
+ * enum iwl_tx_cmd_flags - bitmasks for tx_flags in TX command for a000
+ * @IWL_TX_FLAGS_CMD_RATE: use rate from the TX command
+ * @IWL_TX_FLAGS_ENCRYPT_DIS: frame should not be encrypted, even if it belongs
+ * to a secured STA
+ * @IWL_TX_FLAGS_HIGH_PRI: high priority frame (like EAPOL) - can affect rate
+ * selection, retry limits and BT kill
+ */
+enum iwl_tx_cmd_flags {
+ IWL_TX_FLAGS_CMD_RATE = BIT(0),
+ IWL_TX_FLAGS_ENCRYPT_DIS = BIT(1),
+ IWL_TX_FLAGS_HIGH_PRI = BIT(2),
+}; /* TX_FLAGS_BITS_API_S_VER_3 */
+
+/**
* enum iwl_tx_pm_timeouts - pm timeout values in TX command
* @PM_FRAME_NONE: no need to suspend sleep mode
* @PM_FRAME_MGMT: fw suspend sleep mode for 100TU
@@ -159,7 +174,7 @@ enum iwl_tx_cmd_sec_ctrl {
TX_CMD_SEC_EXT = 0x04,
TX_CMD_SEC_GCMP = 0x05,
TX_CMD_SEC_KEY128 = 0x08,
- TX_CMD_SEC_KEY_FROM_TABLE = 0x08,
+ TX_CMD_SEC_KEY_FROM_TABLE = 0x10,
};
/* TODO: how does these values are OK with only 16 bit variable??? */
@@ -301,6 +316,31 @@ struct iwl_tx_cmd {
struct ieee80211_hdr hdr[0];
} __packed; /* TX_CMD_API_S_VER_6 */
+struct iwl_dram_sec_info {
+ __le32 pn_low;
+ __le16 pn_high;
+ __le16 aux_info;
+} __packed; /* DRAM_SEC_INFO_API_S_VER_1 */
+
+/**
+ * struct iwl_tx_cmd_gen2 - TX command struct to FW for a000 devices
+ * ( TX_CMD = 0x1c )
+ * @len: in bytes of the payload, see below for details
+ * @offload_assist: TX offload configuration
+ * @tx_flags: combination of &iwl_tx_cmd_flags
+ * @dram_info: FW internal DRAM storage
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ * cleared. Combination of RATE_MCS_*
+ */
+struct iwl_tx_cmd_gen2 {
+ __le16 len;
+ __le16 offload_assist;
+ __le32 flags;
+ struct iwl_dram_sec_info dram_info;
+ __le32 rate_n_flags;
+ struct ieee80211_hdr hdr[0];
+} __packed; /* TX_CMD_API_S_VER_7 */
+
/*
* TX response related data
*/
@@ -508,9 +548,11 @@ struct agg_tx_status {
* @tlc_info: TLC rate info
* @ra_tid: bits [3:0] = ra, bits [7:4] = tid
* @frame_ctrl: frame control
+ * @tx_queue: TX queue for this response
* @status: for non-agg: frame status TX_STATUS_*
* for agg: status of 1st frame, AGG_TX_STATE_*; other frame status fields
* follow this one, up to frame_count.
+ * For version 6 TX response isn't received for aggregation at all.
*
* After the array of statuses comes the SSN of the SCD. Look at
* %iwl_mvm_get_scd_ssn for more details.
@@ -537,9 +579,17 @@ struct iwl_mvm_tx_resp {
u8 tlc_info;
u8 ra_tid;
__le16 frame_ctrl;
-
- struct agg_tx_status status;
-} __packed; /* TX_RSP_API_S_VER_3 */
+ union {
+ struct {
+ struct agg_tx_status status;
+ } v3;/* TX_RSP_API_S_VER_3 */
+ struct {
+ __le16 tx_queue;
+ __le16 reserved2;
+ struct agg_tx_status status;
+ } v6;
+ };
+} __packed; /* TX_RSP_API_S_VER_6 */
/**
* struct iwl_mvm_ba_notif - notifies about reception of BA
@@ -579,11 +629,14 @@ struct iwl_mvm_ba_notif {
* struct iwl_mvm_compressed_ba_tfd - progress of a TFD queue
* @q_num: TFD queue number
* @tfd_index: Index of first un-acked frame in the TFD queue
+ * @scd_queue: For debug only - the physical queue the TFD queue is bound to
*/
struct iwl_mvm_compressed_ba_tfd {
- u8 q_num;
- u8 reserved;
+ __le16 q_num;
__le16 tfd_index;
+ u8 scd_queue;
+ u8 reserved;
+ __le16 reserved2;
} __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */
/**
@@ -635,6 +688,10 @@ enum iwl_mvm_ba_resp_flags {
* @tx_rate: the rate the aggregation was sent at
* @tfd_cnt: number of TFD-Q elements
* @ra_tid_cnt: number of RATID-Q elements
+ * @ba_tfd: array of TFD queue status updates. See &iwl_mvm_compressed_ba_tfd
+ * for details.
+ * @ra_tid: array of RA-TID queue status updates. For debug purposes only. See
+ * &iwl_mvm_compressed_ba_ratid for more details.
*/
struct iwl_mvm_compressed_ba_notif {
__le32 flags;
@@ -646,6 +703,7 @@ struct iwl_mvm_compressed_ba_notif {
__le16 query_frame_cnt;
__le16 txed;
__le16 done;
+ __le16 reserved;
__le32 wireless_time;
__le32 tx_rate;
__le16 tfd_cnt;
@@ -754,25 +812,6 @@ struct iwl_tx_path_flush_cmd {
__le16 reserved;
} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_1 */
-/**
- * iwl_mvm_get_scd_ssn - returns the SSN of the SCD
- * @tx_resp: the Tx response from the fw (agg or non-agg)
- *
- * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
- * it can't know that everything will go well until the end of the AMPDU, it
- * can't know in advance the number of MPDUs that will be sent in the current
- * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
- * Hence, it can't know in advance what the SSN of the SCD will be at the end
- * of the batch. This is why the SSN of the SCD is written at the end of the
- * whole struct at a variable offset. This function knows how to cope with the
- * variable offset and returns the SSN of the SCD.
- */
-static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp)
-{
- return le32_to_cpup((__le32 *)&tx_resp->status +
- tx_resp->frame_count) & 0xfff;
-}
-
/* Available options for the SCD_QUEUE_CFG HCMD */
enum iwl_scd_cfg_actions {
SCD_CFG_DISABLE_QUEUE = 0x0,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
index cf2b836f3888..f545c5f9e4e3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -320,12 +320,14 @@ enum iwl_phy_ops_subcmd_ids {
CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
CTDP_CONFIG_CMD = 0x03,
TEMP_REPORTING_THRESHOLDS_CMD = 0x04,
+ GEO_TX_POWER_LIMIT = 0x05,
CT_KILL_NOTIFICATION = 0xFE,
DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
};
enum iwl_system_subcmd_ids {
SHARED_MEM_CFG_CMD = 0x0,
+ INIT_EXTENDED_CFG_CMD = 0x03,
};
enum iwl_data_path_subcmd_ids {
@@ -345,9 +347,10 @@ enum iwl_regulatory_and_nvm_subcmd_ids {
NVM_ACCESS_COMPLETE = 0x0,
};
-enum iwl_fmac_debug_cmds {
+enum iwl_debug_cmds {
LMAC_RD_WR = 0x0,
UMAC_RD_WR = 0x1,
+ MFU_ASSERT_DUMP_NTF = 0xFE,
};
/* command groups */
@@ -673,10 +676,8 @@ struct iwl_error_resp {
/* Common PHY, MAC and Bindings definitions */
-
#define MAX_MACS_IN_BINDING (3)
#define MAX_BINDINGS (4)
-#define AUX_BINDING_INDEX (3)
/* Used to extract ID and color from the context dword */
#define FW_CTXT_ID_POS (0)
@@ -689,7 +690,7 @@ struct iwl_error_resp {
(_color << FW_CTXT_COLOR_POS))
/* Possible actions on PHYs, MACs and Bindings */
-enum {
+enum iwl_phy_ctxt_action {
FW_CTXT_ACTION_STUB = 0,
FW_CTXT_ACTION_ADD,
FW_CTXT_ACTION_MODIFY,
@@ -960,6 +961,7 @@ struct iwl_time_event_notif {
* @action: action to perform, one of FW_CTXT_ACTION_*
* @macs: array of MAC id and colors which belong to the binding
* @phy: PHY id and color which belongs to the binding
+ * @lmac_id: the lmac id the binding belongs to
*/
struct iwl_binding_cmd {
/* COMMON_INDEX_HDR_API_S_VER_1 */
@@ -968,7 +970,13 @@ struct iwl_binding_cmd {
/* BINDING_DATA_API_S_VER_1 */
__le32 macs[MAX_MACS_IN_BINDING];
__le32 phy;
-} __packed; /* BINDING_CMD_API_S_VER_1 */
+ /* BINDING_CMD_API_S_VER_1 */
+ __le32 lmac_id;
+} __packed; /* BINDING_CMD_API_S_VER_2 */
+
+#define IWL_BINDING_CMD_SIZE_V1 offsetof(struct iwl_binding_cmd, lmac_id)
+#define IWL_LMAC_24G_INDEX 0
+#define IWL_LMAC_5G_INDEX 1
/* The maximal number of fragments in the FW's schedule session */
#define IWL_MVM_MAX_QUOTA 128
@@ -990,6 +998,9 @@ struct iwl_time_quota_data {
* struct iwl_time_quota_cmd - configuration of time quota between bindings
* ( TIME_QUOTA_CMD = 0x2c )
* @quotas: allocations per binding
+ * Note: on non-CDB the fourth one is the auxilary mac and is
+ * essentially zero.
+ * On CDB the fourth one is a regular binding.
*/
struct iwl_time_quota_cmd {
struct iwl_time_quota_data quotas[MAX_BINDINGS];
@@ -1231,6 +1242,25 @@ struct iwl_mfuart_load_notif {
} __packed; /*MFU_LOADER_NTFY_API_S_VER_2*/
/**
+ * struct iwl_mfu_assert_dump_notif - mfuart dump logs
+ * ( MFU_ASSERT_DUMP_NTF = 0xfe )
+ * @assert_id: mfuart assert id that cause the notif
+ * @curr_reset_num: number of asserts since uptime
+ * @index_num: current chunk id
+ * @parts_num: total number of chunks
+ * @data_size: number of data bytes sent
+ * @data: data buffer
+ */
+struct iwl_mfu_assert_dump_notif {
+ __le32 assert_id;
+ __le32 curr_reset_num;
+ __le16 index_num;
+ __le16 parts_num;
+ __le32 data_size;
+ __le32 data[0];
+} __packed; /*MFU_DUMP_ASSERT_API_S_VER_1*/
+
+/**
* struct iwl_set_calib_default_cmd - set default value for calibration.
* ( SET_CALIB_DEFAULT_CMD = 0x8e )
* @calib_index: the calibration to set value for
@@ -1998,19 +2028,48 @@ struct iwl_shared_mem_cfg_v1 {
__le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */
+/**
+ * struct iwl_shared_mem_lmac_cfg - LMAC shared memory configuration
+ *
+ * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB)
+ * @txfifo_size: size of TX FIFOs
+ * @rxfifo1_addr: RXF1 addr
+ * @rxfifo1_size: RXF1 size
+ */
+struct iwl_shared_mem_lmac_cfg {
+ __le32 txfifo_addr;
+ __le32 txfifo_size[TX_FIFO_MAX_NUM];
+ __le32 rxfifo1_addr;
+ __le32 rxfifo1_size;
+
+} __packed; /* SHARED_MEM_ALLOC_LMAC_API_S_VER_1 */
+
+/**
+ * Shared memory configuration information from the FW
+ *
+ * @shared_mem_addr: shared memory address
+ * @shared_mem_size: shared memory size
+ * @sample_buff_addr: internal sample (mon/adc) buff addr
+ * @sample_buff_size: internal sample buff size
+ * @rxfifo2_addr: start addr of RXF2
+ * @rxfifo2_size: size of RXF2
+ * @page_buff_addr: used by UMAC and performance debug (page miss analysis),
+ * when paging is not supported this should be 0
+ * @page_buff_size: size of %page_buff_addr
+ * @lmac_num: number of LMACs (1 or 2)
+ * @lmac_smem: per - LMAC smem data
+ */
struct iwl_shared_mem_cfg {
__le32 shared_mem_addr;
__le32 shared_mem_size;
__le32 sample_buff_addr;
__le32 sample_buff_size;
- __le32 txfifo_addr;
- __le32 txfifo_size[TX_FIFO_MAX_NUM];
- __le32 rxfifo_size[RX_FIFO_MAX_NUM];
+ __le32 rxfifo2_addr;
+ __le32 rxfifo2_size;
__le32 page_buff_addr;
__le32 page_buff_size;
- __le32 rxfifo_addr;
- __le32 internal_txfifo_addr;
- __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
+ __le32 lmac_num;
+ struct iwl_shared_mem_lmac_cfg lmac_smem[2];
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */
/**
@@ -2178,4 +2237,26 @@ struct iwl_nvm_access_complete_cmd {
__le32 reserved;
} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */
+/**
+ * enum iwl_extended_cfg_flag - commands driver may send before
+ * finishing init flow
+ * @IWL_INIT_DEBUG_CFG: driver is going to send debug config command
+ * @IWL_INIT_NVM: driver is going to send NVM_ACCESS commands
+ * @IWL_INIT_PHY: driver is going to send PHY_DB commands
+ */
+enum iwl_extended_cfg_flags {
+ IWL_INIT_DEBUG_CFG,
+ IWL_INIT_NVM,
+ IWL_INIT_PHY,
+};
+
+/**
+ * struct iwl_extended_cfg_cmd - mark what commands ucode should wait for
+ * before finishing init flows
+ * @init_flags: values from iwl_extended_cfg_flags
+ */
+struct iwl_init_extended_cfg_cmd {
+ __le32 init_flags;
+} __packed; /* INIT_EXTENDED_CFG_CMD_API_S_VER_1 */
+
#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
index a027b11bbdb3..7b86a4f1b574 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -99,10 +99,120 @@ static void iwl_mvm_read_radio_reg(struct iwl_mvm *mvm,
iwl_trans_release_nic_access(mvm->trans, &flags);
}
+static void iwl_mvm_dump_rxf(struct iwl_mvm *mvm,
+ struct iwl_fw_error_dump_data **dump_data,
+ int size, u32 offset, int fifo_num)
+{
+ struct iwl_fw_error_dump_fifo *fifo_hdr;
+ u32 *fifo_data;
+ u32 fifo_len;
+ int i;
+
+ fifo_hdr = (void *)(*dump_data)->data;
+ fifo_data = (void *)fifo_hdr->data;
+ fifo_len = size;
+
+ /* No need to try to read the data if the length is 0 */
+ if (fifo_len == 0)
+ return;
+
+ /* Add a TLV for the RXF */
+ (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
+ (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
+
+ fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
+ fifo_hdr->available_bytes =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ RXF_RD_D_SPACE + offset));
+ fifo_hdr->wr_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ RXF_RD_WR_PTR + offset));
+ fifo_hdr->rd_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ RXF_RD_RD_PTR + offset));
+ fifo_hdr->fence_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ RXF_RD_FENCE_PTR + offset));
+ fifo_hdr->fence_mode =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ RXF_SET_FENCE_MODE + offset));
+
+ /* Lock fence */
+ iwl_trans_write_prph(mvm->trans, RXF_SET_FENCE_MODE + offset, 0x1);
+ /* Set fence pointer to the same place like WR pointer */
+ iwl_trans_write_prph(mvm->trans, RXF_LD_WR2FENCE + offset, 0x1);
+ /* Set fence offset */
+ iwl_trans_write_prph(mvm->trans,
+ RXF_LD_FENCE_OFFSET_ADDR + offset, 0x0);
+
+ /* Read FIFO */
+ fifo_len /= sizeof(u32); /* Size in DWORDS */
+ for (i = 0; i < fifo_len; i++)
+ fifo_data[i] = iwl_trans_read_prph(mvm->trans,
+ RXF_FIFO_RD_FENCE_INC +
+ offset);
+ *dump_data = iwl_fw_error_next_data(*dump_data);
+}
+
+static void iwl_mvm_dump_txf(struct iwl_mvm *mvm,
+ struct iwl_fw_error_dump_data **dump_data,
+ int size, u32 offset, int fifo_num)
+{
+ struct iwl_fw_error_dump_fifo *fifo_hdr;
+ u32 *fifo_data;
+ u32 fifo_len;
+ int i;
+
+ fifo_hdr = (void *)(*dump_data)->data;
+ fifo_data = (void *)fifo_hdr->data;
+ fifo_len = size;
+
+ /* No need to try to read the data if the length is 0 */
+ if (fifo_len == 0)
+ return;
+
+ /* Add a TLV for the FIFO */
+ (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
+ (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
+
+ fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
+ fifo_hdr->available_bytes =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_FIFO_ITEM_CNT + offset));
+ fifo_hdr->wr_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_WR_PTR + offset));
+ fifo_hdr->rd_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_RD_PTR + offset));
+ fifo_hdr->fence_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_FENCE_PTR + offset));
+ fifo_hdr->fence_mode =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_LOCK_FENCE + offset));
+
+ /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
+ iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR + offset,
+ TXF_WR_PTR + offset);
+
+ /* Dummy-read to advance the read pointer to the head */
+ iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA + offset);
+
+ /* Read FIFO */
+ fifo_len /= sizeof(u32); /* Size in DWORDS */
+ for (i = 0; i < fifo_len; i++)
+ fifo_data[i] = iwl_trans_read_prph(mvm->trans,
+ TXF_READ_MODIFY_DATA +
+ offset);
+ *dump_data = iwl_fw_error_next_data(*dump_data);
+}
+
static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
struct iwl_fw_error_dump_data **dump_data)
{
struct iwl_fw_error_dump_fifo *fifo_hdr;
+ struct iwl_mvm_shared_mem_cfg *cfg = &mvm->smem_cfg;
u32 *fifo_data;
u32 fifo_len;
unsigned long flags;
@@ -111,126 +221,47 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
if (!iwl_trans_grab_nic_access(mvm->trans, &flags))
return;
- /* Pull RXF data from all RXFs */
- for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) {
- /*
- * Keep aside the additional offset that might be needed for
- * next RXF
- */
- u32 offset_diff = RXF_DIFF_FROM_PREV * i;
-
- fifo_hdr = (void *)(*dump_data)->data;
- fifo_data = (void *)fifo_hdr->data;
- fifo_len = mvm->shared_mem_cfg.rxfifo_size[i];
-
- /* No need to try to read the data if the length is 0 */
- if (fifo_len == 0)
- continue;
-
- /* Add a TLV for the RXF */
- (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
- (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
-
- fifo_hdr->fifo_num = cpu_to_le32(i);
- fifo_hdr->available_bytes =
- cpu_to_le32(iwl_trans_read_prph(mvm->trans,
- RXF_RD_D_SPACE +
- offset_diff));
- fifo_hdr->wr_ptr =
- cpu_to_le32(iwl_trans_read_prph(mvm->trans,
- RXF_RD_WR_PTR +
- offset_diff));
- fifo_hdr->rd_ptr =
- cpu_to_le32(iwl_trans_read_prph(mvm->trans,
- RXF_RD_RD_PTR +
- offset_diff));
- fifo_hdr->fence_ptr =
- cpu_to_le32(iwl_trans_read_prph(mvm->trans,
- RXF_RD_FENCE_PTR +
- offset_diff));
- fifo_hdr->fence_mode =
- cpu_to_le32(iwl_trans_read_prph(mvm->trans,
- RXF_SET_FENCE_MODE +
- offset_diff));
-
- /* Lock fence */
- iwl_trans_write_prph(mvm->trans,
- RXF_SET_FENCE_MODE + offset_diff, 0x1);
- /* Set fence pointer to the same place like WR pointer */
- iwl_trans_write_prph(mvm->trans,
- RXF_LD_WR2FENCE + offset_diff, 0x1);
- /* Set fence offset */
- iwl_trans_write_prph(mvm->trans,
- RXF_LD_FENCE_OFFSET_ADDR + offset_diff,
- 0x0);
-
- /* Read FIFO */
- fifo_len /= sizeof(u32); /* Size in DWORDS */
- for (j = 0; j < fifo_len; j++)
- fifo_data[j] = iwl_trans_read_prph(mvm->trans,
- RXF_FIFO_RD_FENCE_INC +
- offset_diff);
- *dump_data = iwl_fw_error_next_data(*dump_data);
- }
-
- /* Pull TXF data from all TXFs */
- for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) {
+ /* Pull RXF1 */
+ iwl_mvm_dump_rxf(mvm, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0);
+ /* Pull RXF2 */
+ iwl_mvm_dump_rxf(mvm, dump_data, cfg->rxfifo2_size,
+ RXF_DIFF_FROM_PREV, 1);
+ /* Pull LMAC2 RXF1 */
+ if (mvm->smem_cfg.num_lmacs > 1)
+ iwl_mvm_dump_rxf(mvm, dump_data, cfg->lmac[1].rxfifo1_size,
+ LMAC2_PRPH_OFFSET, 2);
+
+ /* Pull TXF data from LMAC1 */
+ for (i = 0; i < mvm->smem_cfg.num_txfifo_entries; i++) {
/* Mark the number of TXF we're pulling now */
iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i);
+ iwl_mvm_dump_txf(mvm, dump_data, cfg->lmac[0].txfifo_size[i],
+ 0, i);
+ }
- fifo_hdr = (void *)(*dump_data)->data;
- fifo_data = (void *)fifo_hdr->data;
- fifo_len = mvm->shared_mem_cfg.txfifo_size[i];
-
- /* No need to try to read the data if the length is 0 */
- if (fifo_len == 0)
- continue;
-
- /* Add a TLV for the FIFO */
- (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
- (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
-
- fifo_hdr->fifo_num = cpu_to_le32(i);
- fifo_hdr->available_bytes =
- cpu_to_le32(iwl_trans_read_prph(mvm->trans,
- TXF_FIFO_ITEM_CNT));
- fifo_hdr->wr_ptr =
- cpu_to_le32(iwl_trans_read_prph(mvm->trans,
- TXF_WR_PTR));
- fifo_hdr->rd_ptr =
- cpu_to_le32(iwl_trans_read_prph(mvm->trans,
- TXF_RD_PTR));
- fifo_hdr->fence_ptr =
- cpu_to_le32(iwl_trans_read_prph(mvm->trans,
- TXF_FENCE_PTR));
- fifo_hdr->fence_mode =
- cpu_to_le32(iwl_trans_read_prph(mvm->trans,
- TXF_LOCK_FENCE));
-
- /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
- iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR,
- TXF_WR_PTR);
-
- /* Dummy-read to advance the read pointer to the head */
- iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA);
-
- /* Read FIFO */
- fifo_len /= sizeof(u32); /* Size in DWORDS */
- for (j = 0; j < fifo_len; j++)
- fifo_data[j] = iwl_trans_read_prph(mvm->trans,
- TXF_READ_MODIFY_DATA);
- *dump_data = iwl_fw_error_next_data(*dump_data);
+ /* Pull TXF data from LMAC2 */
+ if (mvm->smem_cfg.num_lmacs > 1) {
+ for (i = 0; i < mvm->smem_cfg.num_txfifo_entries; i++) {
+ /* Mark the number of TXF we're pulling now */
+ iwl_trans_write_prph(mvm->trans,
+ TXF_LARC_NUM + LMAC2_PRPH_OFFSET,
+ i);
+ iwl_mvm_dump_txf(mvm, dump_data,
+ cfg->lmac[1].txfifo_size[i],
+ LMAC2_PRPH_OFFSET,
+ i + cfg->num_txfifo_entries);
+ }
}
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
/* Pull UMAC internal TXF data from all TXFs */
for (i = 0;
- i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
+ i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size);
i++) {
fifo_hdr = (void *)(*dump_data)->data;
fifo_data = (void *)fifo_hdr->data;
- fifo_len = mvm->shared_mem_cfg.internal_txfifo_size[i];
+ fifo_len = mvm->smem_cfg.internal_txfifo_size[i];
/* No need to try to read the data if the length is 0 */
if (fifo_len == 0)
@@ -246,7 +277,7 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
/* Mark the number of TXF we're pulling now */
iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i +
- ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size));
+ mvm->smem_cfg.num_txfifo_entries);
fifo_hdr->available_bytes =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
@@ -553,31 +584,45 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
/* reading RXF/TXF sizes */
if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) {
- struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->shared_mem_cfg;
+ struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->smem_cfg;
fifo_data_len = 0;
- /* Count RXF size */
- for (i = 0; i < ARRAY_SIZE(mem_cfg->rxfifo_size); i++) {
- if (!mem_cfg->rxfifo_size[i])
- continue;
-
+ /* Count RXF2 size */
+ if (mem_cfg->rxfifo2_size) {
/* Add header info */
- fifo_data_len += mem_cfg->rxfifo_size[i] +
+ fifo_data_len += mem_cfg->rxfifo2_size +
sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_fifo);
}
- for (i = 0; i < mem_cfg->num_txfifo_entries; i++) {
- if (!mem_cfg->txfifo_size[i])
+ /* Count RXF1 sizes */
+ for (i = 0; i < mem_cfg->num_lmacs; i++) {
+ if (!mem_cfg->lmac[i].rxfifo1_size)
continue;
/* Add header info */
- fifo_data_len += mem_cfg->txfifo_size[i] +
+ fifo_data_len += mem_cfg->lmac[i].rxfifo1_size +
sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_fifo);
}
+ /* Count TXF sizes */
+ for (i = 0; i < mem_cfg->num_lmacs; i++) {
+ int j;
+
+ for (j = 0; j < mem_cfg->num_txfifo_entries; j++) {
+ if (!mem_cfg->lmac[i].txfifo_size[j])
+ continue;
+
+ /* Add header info */
+ fifo_data_len +=
+ mem_cfg->lmac[i].txfifo_size[j] +
+ sizeof(*dump_data) +
+ sizeof(struct iwl_fw_error_dump_fifo);
+ }
+ }
+
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
for (i = 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 45cb4f476e76..e6c9528eeeda 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,6 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -271,6 +272,27 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
return 0;
}
+void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
+ __le32 *dump_data = mfu_dump_notif->data;
+ int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32);
+ int i;
+
+ if (mfu_dump_notif->index_num == 0)
+ IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
+ le32_to_cpu(mfu_dump_notif->assert_id));
+
+ for (i = 0; i < n_words; i++)
+ IWL_DEBUG_INFO(mvm,
+ "MFUART assert dump, dword %u: 0x%08x\n",
+ le16_to_cpu(mfu_dump_notif->index_num) *
+ n_words + i,
+ le32_to_cpu(dump_data[i]));
+}
+
static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
const struct fw_img *image)
{
@@ -617,11 +639,18 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
MVM_UCODE_ALIVE_TIMEOUT);
if (ret) {
- if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ struct iwl_trans *trans = mvm->trans;
+
+ if (trans->cfg->gen2)
+ IWL_ERR(mvm,
+ "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
+ iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS),
+ iwl_read_prph(trans, UMAG_SB_CPU_2_STATUS));
+ else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
IWL_ERR(mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
- iwl_read_prph(mvm->trans, SB_CPU_1_STATUS),
- iwl_read_prph(mvm->trans, SB_CPU_2_STATUS));
+ iwl_read_prph(trans, SB_CPU_1_STATUS),
+ iwl_read_prph(trans, SB_CPU_2_STATUS));
mvm->cur_ucode = old_type;
return ret;
}
@@ -669,6 +698,82 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
return 0;
}
+static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
+{
+ struct iwl_notification_wait init_wait;
+ struct iwl_nvm_access_complete_cmd nvm_complete = {};
+ struct iwl_init_extended_cfg_cmd init_cfg = {
+ .init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)),
+ };
+ static const u16 init_complete[] = {
+ INIT_COMPLETE_NOTIF,
+ };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ iwl_init_notification_wait(&mvm->notif_wait,
+ &init_wait,
+ init_complete,
+ ARRAY_SIZE(init_complete),
+ iwl_wait_init_complete,
+ NULL);
+
+ /* Will also start the device */
+ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
+ goto error;
+ }
+
+ /* Send init config command to mark that we are sending NVM access
+ * commands
+ */
+ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP,
+ INIT_EXTENDED_CFG_CMD), 0,
+ sizeof(init_cfg), &init_cfg);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to run init config command: %d\n",
+ ret);
+ goto error;
+ }
+
+ /* Read the NVM only at driver load time, no need to do this twice */
+ if (read_nvm) {
+ /* Read nvm */
+ ret = iwl_nvm_init(mvm, true);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
+ goto error;
+ }
+ }
+
+ /* In case we read the NVM from external file, load it to the NIC */
+ if (mvm->nvm_file_name)
+ iwl_mvm_load_nvm_to_nic(mvm);
+
+ ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
+ if (WARN_ON(ret))
+ goto error;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ NVM_ACCESS_COMPLETE), 0,
+ sizeof(nvm_complete), &nvm_complete);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
+ ret);
+ goto error;
+ }
+
+ /* We wait for the INIT complete notification */
+ return iwl_wait_notification(&mvm->notif_wait, &init_wait,
+ MVM_UCODE_ALIVE_TIMEOUT);
+
+error:
+ iwl_remove_notification(&mvm->notif_wait, &init_wait);
+ return ret;
+}
+
static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
{
struct iwl_phy_cfg_cmd phy_cfg_cmd;
@@ -697,6 +802,9 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
};
int ret;
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return iwl_run_unified_mvm_ucode(mvm, true);
+
lockdep_assert_held(&mvm->mutex);
if (WARN_ON_ONCE(mvm->calibrating))
@@ -803,97 +911,31 @@ out:
return ret;
}
-int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
-{
- struct iwl_notification_wait init_wait;
- struct iwl_nvm_access_complete_cmd nvm_complete = {};
- static const u16 init_complete[] = {
- INIT_COMPLETE_NOTIF,
- };
- int ret;
-
- lockdep_assert_held(&mvm->mutex);
-
- iwl_init_notification_wait(&mvm->notif_wait,
- &init_wait,
- init_complete,
- ARRAY_SIZE(init_complete),
- iwl_wait_init_complete,
- NULL);
-
- /* Will also start the device */
- ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
- if (ret) {
- IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
- goto error;
- }
-
- /* TODO: remove when integrating context info */
- ret = iwl_mvm_init_paging(mvm);
- if (ret) {
- IWL_ERR(mvm, "Failed to init paging: %d\n",
- ret);
- goto error;
- }
-
- /* Read the NVM only at driver load time, no need to do this twice */
- if (read_nvm) {
- /* Read nvm */
- ret = iwl_nvm_init(mvm, true);
- if (ret) {
- IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
- goto error;
- }
- }
-
- /* In case we read the NVM from external file, load it to the NIC */
- if (mvm->nvm_file_name)
- iwl_mvm_load_nvm_to_nic(mvm);
-
- ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
- if (WARN_ON(ret))
- goto error;
-
- ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
- NVM_ACCESS_COMPLETE), 0,
- sizeof(nvm_complete), &nvm_complete);
- if (ret) {
- IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
- ret);
- goto error;
- }
-
- /* We wait for the INIT complete notification */
- return iwl_wait_notification(&mvm->notif_wait, &init_wait,
- MVM_UCODE_ALIVE_TIMEOUT);
-
-error:
- iwl_remove_notification(&mvm->notif_wait, &init_wait);
- return ret;
-}
-
static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
- int i;
+ int i, lmac;
+ int lmac_num = le32_to_cpu(mem_cfg->lmac_num);
- mvm->shared_mem_cfg.num_txfifo_entries =
- ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size);
- for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
- mvm->shared_mem_cfg.txfifo_size[i] =
- le32_to_cpu(mem_cfg->txfifo_size[i]);
- for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
- mvm->shared_mem_cfg.rxfifo_size[i] =
- le32_to_cpu(mem_cfg->rxfifo_size[i]);
+ if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem)))
+ return;
+
+ mvm->smem_cfg.num_lmacs = lmac_num;
+ mvm->smem_cfg.num_txfifo_entries =
+ ARRAY_SIZE(mem_cfg->lmac_smem[0].txfifo_size);
+ mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size);
- BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
- sizeof(mem_cfg->internal_txfifo_size));
+ for (lmac = 0; lmac < lmac_num; lmac++) {
+ struct iwl_shared_mem_lmac_cfg *lmac_cfg =
+ &mem_cfg->lmac_smem[lmac];
- for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
- i++)
- mvm->shared_mem_cfg.internal_txfifo_size[i] =
- le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
+ for (i = 0; i < ARRAY_SIZE(lmac_cfg->txfifo_size); i++)
+ mvm->smem_cfg.lmac[lmac].txfifo_size[i] =
+ le32_to_cpu(lmac_cfg->txfifo_size[i]);
+ mvm->smem_cfg.lmac[lmac].rxfifo1_size =
+ le32_to_cpu(lmac_cfg->rxfifo1_size);
+ }
}
static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm,
@@ -902,25 +944,27 @@ static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm,
struct iwl_shared_mem_cfg_v1 *mem_cfg = (void *)pkt->data;
int i;
- mvm->shared_mem_cfg.num_txfifo_entries =
- ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size);
+ mvm->smem_cfg.num_lmacs = 1;
+
+ mvm->smem_cfg.num_txfifo_entries = ARRAY_SIZE(mem_cfg->txfifo_size);
for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
- mvm->shared_mem_cfg.txfifo_size[i] =
+ mvm->smem_cfg.lmac[0].txfifo_size[i] =
le32_to_cpu(mem_cfg->txfifo_size[i]);
- for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
- mvm->shared_mem_cfg.rxfifo_size[i] =
- le32_to_cpu(mem_cfg->rxfifo_size[i]);
+
+ mvm->smem_cfg.lmac[0].rxfifo1_size =
+ le32_to_cpu(mem_cfg->rxfifo_size[0]);
+ mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo_size[1]);
/* new API has more data, from rxfifo_addr field and on */
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
- BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
+ BUILD_BUG_ON(sizeof(mvm->smem_cfg.internal_txfifo_size) !=
sizeof(mem_cfg->internal_txfifo_size));
for (i = 0;
- i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
+ i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size);
i++)
- mvm->shared_mem_cfg.internal_txfifo_size[i] =
+ mvm->smem_cfg.internal_txfifo_size[i] =
le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
}
}
@@ -969,85 +1013,94 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
sizeof(cmd), &cmd);
}
-#define ACPI_WRDS_METHOD "WRDS"
-#define ACPI_WRDS_WIFI (0x07)
-#define ACPI_WRDS_TABLE_SIZE 10
+#ifdef CONFIG_ACPI
+#define ACPI_WRDS_METHOD "WRDS"
+#define ACPI_EWRD_METHOD "EWRD"
+#define ACPI_WGDS_METHOD "WGDS"
+#define ACPI_WIFI_DOMAIN (0x07)
+#define ACPI_WRDS_WIFI_DATA_SIZE (IWL_MVM_SAR_TABLE_SIZE + 2)
+#define ACPI_EWRD_WIFI_DATA_SIZE ((IWL_MVM_SAR_PROFILE_NUM - 1) * \
+ IWL_MVM_SAR_TABLE_SIZE + 3)
+#define ACPI_WGDS_WIFI_DATA_SIZE 18
+#define ACPI_WGDS_NUM_BANDS 2
+#define ACPI_WGDS_TABLE_SIZE 3
+
+static int iwl_mvm_sar_set_profile(struct iwl_mvm *mvm,
+ union acpi_object *table,
+ struct iwl_mvm_sar_profile *profile,
+ bool enabled)
+{
+ int i;
-struct iwl_mvm_sar_table {
- bool enabled;
- u8 values[ACPI_WRDS_TABLE_SIZE];
-};
+ profile->enabled = enabled;
-#ifdef CONFIG_ACPI
-static int iwl_mvm_sar_get_wrds(struct iwl_mvm *mvm, union acpi_object *wrds,
- struct iwl_mvm_sar_table *sar_table)
+ for (i = 0; i < IWL_MVM_SAR_TABLE_SIZE; i++) {
+ if ((table[i].type != ACPI_TYPE_INTEGER) ||
+ (table[i].integer.value > U8_MAX))
+ return -EINVAL;
+
+ profile->table[i] = table[i].integer.value;
+ }
+
+ return 0;
+}
+
+static union acpi_object *iwl_mvm_sar_find_wifi_pkg(struct iwl_mvm *mvm,
+ union acpi_object *data,
+ int data_size)
{
- union acpi_object *data_pkg;
- u32 i;
+ int i;
+ union acpi_object *wifi_pkg;
- /* We need at least two packages, one for the revision and one
+ /*
+ * We need at least two packages, one for the revision and one
* for the data itself. Also check that the revision is valid
* (i.e. it is an integer set to 0).
- */
- if (wrds->type != ACPI_TYPE_PACKAGE ||
- wrds->package.count < 2 ||
- wrds->package.elements[0].type != ACPI_TYPE_INTEGER ||
- wrds->package.elements[0].integer.value != 0) {
- IWL_DEBUG_RADIO(mvm, "Unsupported wrds structure\n");
- return -EINVAL;
+ */
+ if (data->type != ACPI_TYPE_PACKAGE ||
+ data->package.count < 2 ||
+ data->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ data->package.elements[0].integer.value != 0) {
+ IWL_DEBUG_RADIO(mvm, "Unsupported packages structure\n");
+ return ERR_PTR(-EINVAL);
}
/* loop through all the packages to find the one for WiFi */
- for (i = 1; i < wrds->package.count; i++) {
+ for (i = 1; i < data->package.count; i++) {
union acpi_object *domain;
- data_pkg = &wrds->package.elements[i];
+ wifi_pkg = &data->package.elements[i];
/* Skip anything that is not a package with the right
* amount of elements (i.e. domain_type,
- * enabled/disabled plus the sar table size.
+ * enabled/disabled plus the actual data size.
*/
- if (data_pkg->type != ACPI_TYPE_PACKAGE ||
- data_pkg->package.count != ACPI_WRDS_TABLE_SIZE + 2)
+ if (wifi_pkg->type != ACPI_TYPE_PACKAGE ||
+ wifi_pkg->package.count != data_size)
continue;
- domain = &data_pkg->package.elements[0];
+ domain = &wifi_pkg->package.elements[0];
if (domain->type == ACPI_TYPE_INTEGER &&
- domain->integer.value == ACPI_WRDS_WIFI)
+ domain->integer.value == ACPI_WIFI_DOMAIN)
break;
- data_pkg = NULL;
+ wifi_pkg = NULL;
}
- if (!data_pkg)
- return -ENOENT;
-
- if (data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
- return -EINVAL;
-
- sar_table->enabled = !!(data_pkg->package.elements[1].integer.value);
-
- for (i = 0; i < ACPI_WRDS_TABLE_SIZE; i++) {
- union acpi_object *entry;
-
- entry = &data_pkg->package.elements[i + 2];
- if ((entry->type != ACPI_TYPE_INTEGER) ||
- (entry->integer.value > U8_MAX))
- return -EINVAL;
-
- sar_table->values[i] = entry->integer.value;
- }
+ if (!wifi_pkg)
+ return ERR_PTR(-ENOENT);
- return 0;
+ return wifi_pkg;
}
-static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm,
- struct iwl_mvm_sar_table *sar_table)
+static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
{
+ union acpi_object *wifi_pkg, *table;
acpi_handle root_handle;
acpi_handle handle;
struct acpi_buffer wrds = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_status status;
+ bool enabled;
int ret;
root_handle = ACPI_HANDLE(mvm->dev);
@@ -1072,62 +1125,307 @@ static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm,
return -ENOENT;
}
- ret = iwl_mvm_sar_get_wrds(mvm, wrds.pointer, sar_table);
+ wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, wrds.pointer,
+ ACPI_WRDS_WIFI_DATA_SIZE);
+ if (IS_ERR(wifi_pkg)) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ enabled = !!(wifi_pkg->package.elements[1].integer.value);
+
+ /* position of the actual table */
+ table = &wifi_pkg->package.elements[2];
+
+ /* The profile from WRDS is officially profile 1, but goes
+ * into sar_profiles[0] (because we don't have a profile 0).
+ */
+ ret = iwl_mvm_sar_set_profile(mvm, table, &mvm->sar_profiles[0],
+ enabled);
+
+out_free:
kfree(wrds.pointer);
+ return ret;
+}
+static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
+{
+ union acpi_object *wifi_pkg;
+ acpi_handle root_handle;
+ acpi_handle handle;
+ struct acpi_buffer ewrd = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_status status;
+ bool enabled;
+ int i, n_profiles, ret;
+
+ root_handle = ACPI_HANDLE(mvm->dev);
+ if (!root_handle) {
+ IWL_DEBUG_RADIO(mvm,
+ "Could not retrieve root port ACPI handle\n");
+ return -ENOENT;
+ }
+
+ /* Get the method's handle */
+ status = acpi_get_handle(root_handle, (acpi_string)ACPI_EWRD_METHOD,
+ &handle);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_RADIO(mvm, "EWRD method not found\n");
+ return -ENOENT;
+ }
+
+ /* Call EWRD with no arguments */
+ status = acpi_evaluate_object(handle, NULL, NULL, &ewrd);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_RADIO(mvm, "EWRD invocation failed (0x%x)\n", status);
+ return -ENOENT;
+ }
+
+ wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, ewrd.pointer,
+ ACPI_EWRD_WIFI_DATA_SIZE);
+ if (IS_ERR(wifi_pkg)) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ if ((wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) ||
+ (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER)) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ enabled = !!(wifi_pkg->package.elements[1].integer.value);
+ n_profiles = wifi_pkg->package.elements[2].integer.value;
+
+ /* in case of BIOS bug */
+ if (n_profiles <= 0) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ for (i = 0; i < n_profiles; i++) {
+ /* the tables start at element 3 */
+ static int pos = 3;
+
+ /* The EWRD profiles officially go from 2 to 4, but we
+ * save them in sar_profiles[1-3] (because we don't
+ * have profile 0). So in the array we start from 1.
+ */
+ ret = iwl_mvm_sar_set_profile(mvm,
+ &wifi_pkg->package.elements[pos],
+ &mvm->sar_profiles[i + 1],
+ enabled);
+ if (ret < 0)
+ break;
+
+ /* go to the next table */
+ pos += IWL_MVM_SAR_TABLE_SIZE;
+ }
+
+out_free:
+ kfree(ewrd.pointer);
return ret;
}
-#else /* CONFIG_ACPI */
-static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm,
- struct iwl_mvm_sar_table *sar_table)
+
+static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm,
+ struct iwl_mvm_geo_table *geo_table)
{
- return -ENOENT;
+ union acpi_object *wifi_pkg;
+ acpi_handle root_handle;
+ acpi_handle handle;
+ struct acpi_buffer wgds = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_status status;
+ int i, ret;
+
+ root_handle = ACPI_HANDLE(mvm->dev);
+ if (!root_handle) {
+ IWL_DEBUG_RADIO(mvm,
+ "Could not retrieve root port ACPI handle\n");
+ return -ENOENT;
+ }
+
+ /* Get the method's handle */
+ status = acpi_get_handle(root_handle, (acpi_string)ACPI_WGDS_METHOD,
+ &handle);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_RADIO(mvm, "WGDS method not found\n");
+ return -ENOENT;
+ }
+
+ /* Call WGDS with no arguments */
+ status = acpi_evaluate_object(handle, NULL, NULL, &wgds);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_RADIO(mvm, "WGDS invocation failed (0x%x)\n", status);
+ return -ENOENT;
+ }
+
+ wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, wgds.pointer,
+ ACPI_WGDS_WIFI_DATA_SIZE);
+ if (IS_ERR(wifi_pkg)) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ for (i = 0; i < ACPI_WGDS_WIFI_DATA_SIZE; i++) {
+ union acpi_object *entry;
+
+ entry = &wifi_pkg->package.elements[i + 1];
+ if ((entry->type != ACPI_TYPE_INTEGER) ||
+ (entry->integer.value > U8_MAX))
+ return -EINVAL;
+
+ geo_table->values[i] = entry->integer.value;
+ }
+ ret = 0;
+out_free:
+ kfree(wgds.pointer);
+ return ret;
}
-#endif /* CONFIG_ACPI */
-static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
+int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
{
- struct iwl_mvm_sar_table sar_table;
struct iwl_dev_tx_power_cmd cmd = {
.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
};
- int ret, i, j, idx;
+ int i, j, idx;
+ int profs[IWL_NUM_CHAIN_LIMITS] = { prof_a, prof_b };
int len = sizeof(cmd);
+ BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS < 2);
+ BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS * IWL_NUM_SUB_BANDS !=
+ IWL_MVM_SAR_TABLE_SIZE);
+
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
len = sizeof(cmd.v3);
- ret = iwl_mvm_sar_get_table(mvm, &sar_table);
+ for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
+ struct iwl_mvm_sar_profile *prof;
+
+ /* don't allow SAR to be disabled (profile 0 means disable) */
+ if (profs[i] == 0)
+ return -EPERM;
+
+ /* we are off by one, so allow up to IWL_MVM_SAR_PROFILE_NUM */
+ if (profs[i] > IWL_MVM_SAR_PROFILE_NUM)
+ return -EINVAL;
+
+ /* profiles go from 1 to 4, so decrement to access the array */
+ prof = &mvm->sar_profiles[profs[i] - 1];
+
+ /* if the profile is disabled, do nothing */
+ if (!prof->enabled) {
+ IWL_DEBUG_RADIO(mvm, "SAR profile %d is disabled.\n",
+ profs[i]);
+ /* if one of the profiles is disabled, we fail all */
+ return -ENOENT;
+ }
+
+ IWL_DEBUG_RADIO(mvm, " Chain[%d]:\n", i);
+ for (j = 0; j < IWL_NUM_SUB_BANDS; j++) {
+ idx = (i * IWL_NUM_SUB_BANDS) + j;
+ cmd.v3.per_chain_restriction[i][j] =
+ cpu_to_le16(prof->table[idx]);
+ IWL_DEBUG_RADIO(mvm, " Band[%d] = %d * .125dBm\n",
+ j, prof->table[idx]);
+ }
+ }
+
+ IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
+
+ return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
+}
+
+static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_geo_table geo_table;
+ struct iwl_geo_tx_power_profiles_cmd cmd = {
+ .ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES),
+ };
+ int ret, i, j, idx;
+ u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
+
+ ret = iwl_mvm_sar_get_wgds_table(mvm, &geo_table);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
- "SAR BIOS table invalid or unavailable. (%d)\n",
+ "Geo SAR BIOS table invalid or unavailable. (%d)\n",
ret);
/* we don't fail if the table is not available */
return 0;
}
- if (!sar_table.enabled)
- return 0;
+ IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n");
- IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
+ BUILD_BUG_ON(IWL_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
+ ACPI_WGDS_TABLE_SIZE != ACPI_WGDS_WIFI_DATA_SIZE);
- BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS * IWL_NUM_SUB_BANDS !=
- ACPI_WRDS_TABLE_SIZE);
+ for (i = 0; i < IWL_NUM_GEO_PROFILES; i++) {
+ struct iwl_per_chain_offset *chain =
+ (struct iwl_per_chain_offset *)&cmd.table[i];
- for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
- IWL_DEBUG_RADIO(mvm, " Chain[%d]:\n", i);
- for (j = 0; j < IWL_NUM_SUB_BANDS; j++) {
- idx = (i * IWL_NUM_SUB_BANDS) + j;
- cmd.v3.per_chain_restriction[i][j] =
- cpu_to_le16(sar_table.values[idx]);
- IWL_DEBUG_RADIO(mvm, " Band[%d] = %d * .125dBm\n",
- j, sar_table.values[idx]);
+ for (j = 0; j < ACPI_WGDS_NUM_BANDS; j++) {
+ u8 *value;
+
+ idx = i * ACPI_WGDS_NUM_BANDS * ACPI_WGDS_TABLE_SIZE +
+ j * ACPI_WGDS_TABLE_SIZE;
+ value = &geo_table.values[idx];
+ chain[j].max_tx_power = cpu_to_le16(value[0]);
+ chain[j].chain_a = value[1];
+ chain[j].chain_b = value[2];
+ IWL_DEBUG_RADIO(mvm,
+ "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
+ i, j, value[1], value[2], value[0]);
}
}
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, sizeof(cmd), &cmd);
+}
- ret = iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
- if (ret)
- IWL_ERR(mvm, "failed to set per-chain TX power: %d\n", ret);
+#else /* CONFIG_ACPI */
+static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
+{
+ return -ENOENT;
+}
+
+static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
+{
+ return -ENOENT;
+}
+
+static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
+{
+ return 0;
+}
+#endif /* CONFIG_ACPI */
+
+static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
+{
+ int ret;
+
+ ret = iwl_mvm_sar_get_wrds_table(mvm);
+ if (ret < 0) {
+ IWL_DEBUG_RADIO(mvm,
+ "WRDS SAR BIOS table invalid or unavailable. (%d)\n",
+ ret);
+ /* if not available, don't fail and don't bother with EWRD */
+ return 0;
+ }
+
+ ret = iwl_mvm_sar_get_ewrd_table(mvm);
+ /* if EWRD is not available, we can still use WRDS, so don't fail */
+ if (ret < 0)
+ IWL_DEBUG_RADIO(mvm,
+ "EWRD SAR BIOS table invalid or unavailable. (%d)\n",
+ ret);
+
+ /* choose profile 1 (WRDS) as default for both chains */
+ ret = iwl_mvm_sar_select_profile(mvm, 1, 1);
+
+ /* if we don't have profile 0 from BIOS, just skip it */
+ if (ret == -ENOENT)
+ return 0;
return ret;
}
@@ -1219,7 +1517,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
}
/* Init RSS configuration */
- if (iwl_mvm_has_new_rx_api(mvm)) {
+ /* TODO - remove a000 disablement when we have RXQ config API */
+ if (iwl_mvm_has_new_rx_api(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
ret = iwl_send_rss_cfg_cmd(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
@@ -1229,10 +1528,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
}
/* init the fw <-> mac80211 STA mapping */
- for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
- mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
+ mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
/* reset quota debouncing buffer - 0xff will yield invalid data */
memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
@@ -1313,10 +1612,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
}
- if (iwl_mvm_is_csum_supported(mvm) &&
- mvm->cfg->features & NETIF_F_RXCSUM)
- iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3);
-
/* allow FW/transport low power modes if not during restart */
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
@@ -1325,6 +1620,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret)
goto error;
+ ret = iwl_mvm_sar_geo_init(mvm);
+ if (ret)
+ goto error;
+
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
return 0;
error:
@@ -1362,7 +1661,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
goto error;
/* init the fw <-> mac80211 STA mapping */
- for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
/* Add auxiliary station for scanning */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index c5734e1a02d2..0f1831b41915 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -467,13 +467,19 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
queue = IWL_MVM_DQA_GCAST_QUEUE;
}
+ /*
+ * For TVQM this will be overwritten later with the FW assigned
+ * queue value (when queue is enabled).
+ */
+ mvmvif->cab_queue = queue;
vif->cab_queue = queue;
} else {
vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
}
- mvmvif->bcast_sta.sta_id = IWL_MVM_STATION_COUNT;
- mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+ mvmvif->bcast_sta.sta_id = IWL_MVM_INVALID_STA;
+ mvmvif->mcast_sta.sta_id = IWL_MVM_INVALID_STA;
+ mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++)
mvmvif->smps_requests[i] = IEEE80211_SMPS_AUTOMATIC;
@@ -901,7 +907,7 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
/* Allocate sniffer station */
ret = iwl_mvm_allocate_int_sta(mvm, &mvm->snif_sta, tfd_queue_msk,
- vif->type);
+ vif->type, IWL_STA_GENERAL_PURPOSE);
if (ret)
return ret;
@@ -1222,7 +1228,9 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int *
vif->bss_conf.dtim_period));
- ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue);
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_STA_TYPE))
+ ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue);
/*
* Only set the beacon time when the MAC is being added, when we
@@ -1442,6 +1450,7 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
struct iwl_mvm_tx_resp *beacon_notify_hdr;
struct ieee80211_vif *csa_vif;
struct ieee80211_vif *tx_blocked_vif;
+ struct agg_tx_status *agg_status;
u16 status;
lockdep_assert_held(&mvm->mutex);
@@ -1449,7 +1458,8 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
beacon_notify_hdr = &beacon->beacon_notify_hdr;
mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2);
- status = le16_to_cpu(beacon_notify_hdr->status.status) & TX_STATUS_MSK;
+ agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr);
+ status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK;
IWL_DEBUG_RX(mvm,
"beacon status %#x retries:%d tsf:0x%16llX gp2:0x%X rate:%d\n",
status, beacon_notify_hdr->failure_frame,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 486dcceed17a..a67aa1f5a51c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -6,8 +6,8 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -33,7 +33,8 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -619,7 +620,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
else
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
- hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+ hw->wiphy->max_sched_scan_reqs = 1;
hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
/* we create the 802.11 header and zero length SSID IE. */
@@ -766,7 +767,7 @@ static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
goto out;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
- if (mvmsta->sta_id == IWL_MVM_STATION_COUNT ||
+ if (mvmsta->sta_id == IWL_MVM_INVALID_STA ||
mvmsta->sta_id != mvm->d0i3_ap_sta_id)
goto out;
@@ -1010,7 +1011,7 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
mvmvif->uploaded = false;
- mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+ mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
spin_lock_bh(&mvm->time_event_lock);
iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
@@ -1053,7 +1054,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
mvm->p2p_device_vif = NULL;
- mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
+ mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
iwl_mvm_reset_phy_ctxts(mvm);
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
@@ -1351,6 +1352,18 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
goto out_release;
}
+ if (iwl_mvm_is_dqa_supported(mvm)) {
+ /*
+ * Only queue for this station is the mcast queue,
+ * which shouldn't be in TFD mask anyway
+ */
+ ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta,
+ 0, vif->type,
+ IWL_STA_MULTICAST);
+ if (ret)
+ goto out_release;
+ }
+
iwl_mvm_vif_dbgfs_register(mvm, vif);
goto out_unlock;
}
@@ -1465,7 +1478,7 @@ static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
* already marked as draining, so to complete the draining, we
* just need to wait until the transport is empty.
*/
- iwl_trans_wait_tx_queue_empty(mvm->trans, tfd_msk);
+ iwl_trans_wait_tx_queues_empty(mvm->trans, tfd_msk);
}
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
@@ -1516,6 +1529,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
mvm->noa_duration = 0;
}
#endif
+ iwl_mvm_dealloc_int_sta(mvm, &mvmvif->mcast_sta);
iwl_mvm_dealloc_bcast_sta(mvm, vif);
goto out_release;
}
@@ -1952,7 +1966,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IWL_MVM_SMPS_REQ_PROT,
IEEE80211_SMPS_DYNAMIC);
}
- } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
+ } else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
/*
* If update fails - SF might be running in associated
* mode while disassociated - which is forbidden.
@@ -1966,8 +1980,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IWL_ERR(mvm, "failed to remove AP station\n");
if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
- mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
- mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+ mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
+ mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
/* remove quota for this interface */
ret = iwl_mvm_update_quotas(mvm, false, NULL);
if (ret)
@@ -2098,11 +2112,15 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
if (ret)
goto out_remove;
+ ret = iwl_mvm_add_mcast_sta(mvm, vif);
+ if (ret)
+ goto out_unbind;
+
/* Send the bcast station. At this stage the TBTT and DTIM time events
* are added and applied to the scheduler */
ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
if (ret)
- goto out_unbind;
+ goto out_rm_mcast;
/* must be set before quota calculations */
mvmvif->ap_ibss_active = true;
@@ -2132,6 +2150,8 @@ out_quota_failed:
iwl_mvm_power_update_mac(mvm);
mvmvif->ap_ibss_active = false;
iwl_mvm_send_rm_bcast_sta(mvm, vif);
+out_rm_mcast:
+ iwl_mvm_rm_mcast_sta(mvm, vif);
out_unbind:
iwl_mvm_binding_remove_vif(mvm, vif);
out_remove:
@@ -2177,7 +2197,20 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
iwl_mvm_update_quotas(mvm, false, NULL);
+
+ /*
+ * This is not very nice, but the simplest:
+ * For older FWs removing the mcast sta before the bcast station may
+ * cause assert 0x2b00.
+ * This is fixed in later FW (which will stop beaconing when removing
+ * bcast station).
+ * So make the order of removal depend on the TLV
+ */
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+ iwl_mvm_rm_mcast_sta(mvm, vif);
iwl_mvm_send_rm_bcast_sta(mvm, vif);
+ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+ iwl_mvm_rm_mcast_sta(mvm, vif);
iwl_mvm_binding_remove_vif(mvm, vif);
iwl_mvm_power_update_mac(mvm);
@@ -2343,6 +2376,9 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
continue;
+ if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE)
+ continue;
+
__set_bit(tid_data->txq_id, &txqs);
if (iwl_mvm_tid_queued(tid_data) == 0)
@@ -2368,7 +2404,7 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
*/
break;
case STA_NOTIFY_AWAKE:
- if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
+ if (WARN_ON(mvmsta->sta_id == IWL_MVM_INVALID_STA))
break;
if (txqs)
@@ -3711,7 +3747,8 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
int err;
u32 noa_duration;
- err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy);
+ err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy,
+ NULL);
if (err)
return err;
@@ -3938,7 +3975,7 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
mvmvif = iwl_mvm_vif_from_mac80211(vif);
/* flush the AP-station and all TDLS peers */
- for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta))
@@ -3964,7 +4001,7 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
/* this can take a while, and we may need/want other operations
* to succeed while doing this, so do it without the mutex held
*/
- iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
+ iwl_trans_wait_tx_queues_empty(mvm->trans, msk);
}
}
@@ -4195,7 +4232,8 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
- if (!iwl_mvm_has_new_rx_api(mvm))
+ /* TODO - remove a000 disablement when we have RXQ config API */
+ if (!iwl_mvm_has_new_rx_api(mvm) || iwl_mvm_has_new_tx_api(mvm))
return;
notif->cookie = mvm->queue_sync_cookie;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 73a216524af2..4e74a6b90e70 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -380,6 +380,8 @@ struct iwl_mvm_vif {
bool associated;
u8 ap_assoc_sta_count;
+ u16 cab_queue;
+
bool uploaded;
bool ap_ibss_active;
bool pm_enabled;
@@ -407,6 +409,7 @@ struct iwl_mvm_vif {
struct iwl_mvm_time_event_data hs_time_event_data;
struct iwl_mvm_int_sta bcast_sta;
+ struct iwl_mvm_int_sta mcast_sta;
/*
* Assigned while mac80211 has the interface in a channel context,
@@ -603,10 +606,15 @@ enum iwl_mvm_tdls_cs_state {
IWL_MVM_TDLS_SW_ACTIVE,
};
+#define MAX_NUM_LMAC 2
struct iwl_mvm_shared_mem_cfg {
+ int num_lmacs;
int num_txfifo_entries;
- u32 txfifo_size[TX_FIFO_MAX_NUM];
- u32 rxfifo_size[RX_FIFO_MAX_NUM];
+ struct {
+ u32 txfifo_size[TX_FIFO_MAX_NUM];
+ u32 rxfifo1_size;
+ } lmac[MAX_NUM_LMAC];
+ u32 rxfifo2_size;
u32 internal_txfifo_addr;
u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
};
@@ -625,6 +633,7 @@ struct iwl_mvm_shared_mem_cfg {
* @reorder_timer: timer for frames are in the reorder buffer. For AMSDU
* it is the time of last received sub-frame
* @removed: prevent timer re-arming
+ * @valid: reordering is valid for this queue
* @lock: protect reorder buffer internal state
* @mvm: mvm pointer, needed for frame timer context
*/
@@ -640,6 +649,7 @@ struct iwl_mvm_reorder_buffer {
unsigned long reorder_time[IEEE80211_MAX_AMPDU_BUF];
struct timer_list reorder_timer;
bool removed;
+ bool valid;
spinlock_t lock;
struct iwl_mvm *mvm;
} ____cacheline_aligned_in_smp;
@@ -707,8 +717,25 @@ enum iwl_mvm_queue_status {
};
#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ)
+#define IWL_MVM_INVALID_QUEUE 0xFFFF
+
#define IWL_MVM_NUM_CIPHERS 10
+#ifdef CONFIG_ACPI
+#define IWL_MVM_SAR_TABLE_SIZE 10
+#define IWL_MVM_SAR_PROFILE_NUM 4
+#define IWL_MVM_GEO_TABLE_SIZE 18
+
+struct iwl_mvm_sar_profile {
+ bool enabled;
+ u8 table[IWL_MVM_SAR_TABLE_SIZE];
+};
+
+struct iwl_mvm_geo_table {
+ u8 values[IWL_MVM_GEO_TABLE_SIZE];
+};
+#endif
+
struct iwl_mvm {
/* for logger access */
struct device *dev;
@@ -761,9 +788,9 @@ struct iwl_mvm {
u64 on_time_scan;
} radio_stats, accu_radio_stats;
+ u8 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES];
+
struct {
- /* Map to HW queue */
- u32 hw_queue_to_mac80211;
u8 hw_queue_refcount;
u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
bool reserved; /* Is this the TXQ reserved for a STA */
@@ -975,7 +1002,10 @@ struct iwl_mvm {
#endif
/* Tx queues */
- u8 aux_queue;
+ u16 aux_queue;
+ u16 probe_queue;
+ u16 p2p_dev_queue;
+
u8 first_agg_queue;
u8 last_agg_queue;
@@ -1018,7 +1048,7 @@ struct iwl_mvm {
} peer;
} tdls_cs;
- struct iwl_mvm_shared_mem_cfg shared_mem_cfg;
+ struct iwl_mvm_shared_mem_cfg smem_cfg;
u32 ciphers[IWL_MVM_NUM_CIPHERS];
struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
@@ -1035,6 +1065,9 @@ struct iwl_mvm {
bool drop_bcn_ap_mode;
struct delayed_work cs_tx_unblock_dwork;
+#ifdef CONFIG_ACPI
+ struct iwl_mvm_sar_profile sar_profiles[IWL_MVM_SAR_PROFILE_NUM];
+#endif
};
/* Extract MVM priv from op_mode and _hw */
@@ -1222,13 +1255,25 @@ static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm)
{
/*
* TODO:
- * The issue of how to determine CDB support is still not well defined.
- * It may be that it will be for all next HW devices and it may be per
- * FW compilation and it may also differ between different devices.
- * For now take a ride on the new TX API and get back to it when
- * it is well defined.
+ * The issue of how to determine CDB APIs and usage is still not fully
+ * defined.
+ * There is a compilation for CDB and non-CDB FW, but there may
+ * be also runtime check.
+ * For now there is a TLV for checking compilation mode, but a
+ * runtime check will also have to be here - once defined.
*/
- return iwl_mvm_has_new_tx_api(mvm);
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CDB_SUPPORT);
+}
+
+static inline struct agg_tx_status*
+iwl_mvm_get_agg_status(struct iwl_mvm *mvm,
+ struct iwl_mvm_tx_resp *tx_resp)
+{
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return &tx_resp->v6.status;
+ else
+ return &tx_resp->v3.status;
}
static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm)
@@ -1271,7 +1316,6 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm);
******************/
/* uCode */
int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
-int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
/* Utils */
int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
@@ -1389,6 +1433,8 @@ int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
int queue);
void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
@@ -1668,6 +1714,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout);
+int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
+ u8 sta_id, u8 tid, unsigned int timeout);
+
/*
* Disable a TXQ.
* Note that in non-DQA mode the %mac80211_queue and %tid params are ignored.
@@ -1701,7 +1750,8 @@ void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
{
- iwl_free_fw_paging(mvm);
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ iwl_free_fw_paging(mvm);
mvm->ucode_loaded = false;
iwl_trans_stop_device(mvm->trans);
}
@@ -1781,6 +1831,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
u32 size);
void iwl_mvm_reorder_timer_expired(unsigned long data);
struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
+bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm);
void iwl_mvm_inactivity_check(struct iwl_mvm *mvm);
@@ -1797,4 +1848,14 @@ int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
u32 duration, u32 timeout);
bool iwl_mvm_lqm_active(struct iwl_mvm *mvm);
+#ifdef CONFIG_ACPI
+int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b);
+#else
+static inline
+int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
+{
+ return -ENOENT;
+}
+#endif /* CONFIG_ACPI */
+
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index eade099b6dbf..283c41df622c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -817,6 +817,11 @@ void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
+ if (iwl_mvm_is_vif_assoc(mvm) && notif->source_id == MCC_SOURCE_WIFI) {
+ IWL_DEBUG_LAR(mvm, "Ignore mcc update while associated\n");
+ return;
+ }
+
if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 4cd72d4cdc47..9ffff6ed8133 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -302,6 +302,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER_SYNC),
RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler,
RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF,
+ iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC),
RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC),
RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
@@ -426,6 +428,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
*/
static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
HCMD_NAME(SHARED_MEM_CFG_CMD),
+ HCMD_NAME(INIT_EXTENDED_CFG_CMD),
};
/* Please keep this array *SORTED* by hex value.
@@ -444,6 +447,7 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
HCMD_NAME(CTDP_CONFIG_CMD),
HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
+ HCMD_NAME(GEO_TX_POWER_LIMIT),
HCMD_NAME(CT_KILL_NOTIFICATION),
HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
};
@@ -452,6 +456,7 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
+ HCMD_NAME(DQA_ENABLE_CMD),
HCMD_NAME(UPDATE_MU_GROUPS_CMD),
HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
HCMD_NAME(STA_PM_NOTIF),
@@ -462,6 +467,13 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
+static const struct iwl_hcmd_names iwl_mvm_debug_names[] = {
+ HCMD_NAME(MFU_ASSERT_DUMP_NTF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
HCMD_NAME(STORED_BEACON_NTF),
};
@@ -602,6 +614,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
}
} else {
mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
+ mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
+ mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
mvm->first_agg_queue = IWL_MVM_DQA_MIN_DATA_QUEUE;
mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE;
}
@@ -732,10 +746,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mutex_lock(&mvm->mutex);
iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
- if (iwl_mvm_has_new_tx_api(mvm))
- err = iwl_run_unified_mvm_ucode(mvm, true);
- else
- err = iwl_run_init_mvm_ucode(mvm, true);
+ err = iwl_run_init_mvm_ucode(mvm, true);
if (!err || !iwlmvm_mod_params.init_dbg)
iwl_mvm_stop_device(mvm);
iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
@@ -1033,7 +1044,7 @@ static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
unsigned long mq;
spin_lock_bh(&mvm->queue_info_lock);
- mq = mvm->queue_info[hw_queue].hw_queue_to_mac80211;
+ mq = mvm->hw_queue_to_mac80211[hw_queue];
spin_unlock_bh(&mvm->queue_info_lock);
iwl_mvm_stop_mac_queues(mvm, mq);
@@ -1063,7 +1074,7 @@ static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
unsigned long mq;
spin_lock_bh(&mvm->queue_info_lock);
- mq = mvm->queue_info[hw_queue].hw_queue_to_mac80211;
+ mq = mvm->hw_queue_to_mac80211[hw_queue];
spin_unlock_bh(&mvm->queue_info_lock);
iwl_mvm_start_mac_queues(mvm, mq);
@@ -1256,7 +1267,7 @@ static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
u8 tid;
if (WARN_ON(vif->type != NL80211_IFTYPE_STATION ||
- mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
+ mvmvif->ap_sta_id == IWL_MVM_INVALID_STA))
return false;
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
@@ -1344,7 +1355,7 @@ static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
struct ieee80211_sta *ap_sta;
struct iwl_mvm_sta *mvm_ap_sta;
- if (iter_data->ap_sta_id == IWL_MVM_STATION_COUNT)
+ if (iter_data->ap_sta_id == IWL_MVM_INVALID_STA)
return;
rcu_read_lock();
@@ -1414,7 +1425,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading;
} else {
WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
- mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
+ mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
mvm->d0i3_offloading = false;
}
@@ -1427,7 +1438,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
return ret;
/* configure wowlan configuration only if needed */
- if (mvm->d0i3_ap_sta_id != IWL_MVM_STATION_COUNT) {
+ if (mvm->d0i3_ap_sta_id != IWL_MVM_INVALID_STA) {
/* wake on beacons only if beacon storing isn't supported */
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_BEACON_STORING))
@@ -1504,7 +1515,7 @@ void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
spin_lock_bh(&mvm->d0i3_tx_lock);
- if (mvm->d0i3_ap_sta_id == IWL_MVM_STATION_COUNT)
+ if (mvm->d0i3_ap_sta_id == IWL_MVM_INVALID_STA)
goto out;
IWL_DEBUG_RPM(mvm, "re-enqueue packets\n");
@@ -1542,7 +1553,7 @@ out:
}
clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
wake_up(&mvm->d0i3_exit_waitq);
- mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
+ mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
if (wake_queues)
ieee80211_wake_queues(mvm->hw);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
index 95138830b9f8..d59efe804356 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -250,12 +251,30 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
struct cfg80211_chan_def *chandef,
u8 chains_static, u8 chains_dynamic)
{
+ enum iwl_phy_ctxt_action action = FW_CTXT_ACTION_MODIFY;
+
lockdep_assert_held(&mvm->mutex);
+ /* In CDB mode we cannot modify PHY context between bands so... */
+ if (iwl_mvm_has_new_tx_api(mvm) &&
+ ctxt->channel->band != chandef->chan->band) {
+ int ret;
+
+ /* ... remove it here ...*/
+ ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+ chains_static, chains_dynamic,
+ FW_CTXT_ACTION_REMOVE, 0);
+ if (ret)
+ return ret;
+
+ /* ... and proceed to add it again */
+ action = FW_CTXT_ACTION_ADD;
+ }
+
ctxt->channel = chandef->chan;
return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
chains_static, chains_dynamic,
- FW_CTXT_ACTION_MODIFY, 0);
+ action, 0);
}
void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index ce907c58ebf6..7788eefcd2bd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -826,7 +826,7 @@ static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
if (is_siso(rate) && rate->stbc) {
/* To enable STBC we need to set both a flag and ANT_AB */
ucode_rate |= RATE_MCS_ANT_AB_MSK;
- ucode_rate |= RATE_MCS_VHT_STBC_MSK;
+ ucode_rate |= RATE_MCS_STBC_MSK;
}
ucode_rate |= rate->bw;
@@ -873,7 +873,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
rate->sgi = true;
if (ucode_rate & RATE_MCS_LDPC_MSK)
rate->ldpc = true;
- if (ucode_rate & RATE_MCS_VHT_STBC_MSK)
+ if (ucode_rate & RATE_MCS_STBC_MSK)
rate->stbc = true;
if (ucode_rate & RATE_MCS_BF_MSK)
rate->bfer = true;
@@ -3641,13 +3641,12 @@ int rs_pretty_print_rate(char *buf, const u32 rate)
bw = "BAD BW";
}
- return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s\n",
+ return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s\n",
type, rs_pretty_ant(ant), bw, mcs, nss,
(rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
- (rate & RATE_MCS_HT_STBC_MSK) ? "STBC " : "",
+ (rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
(rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
- (rate & RATE_MCS_BF_MSK) ? "BF " : "",
- (rate & RATE_MCS_ZLF_MSK) ? "ZLF " : "");
+ (rate & RATE_MCS_BF_MSK) ? "BF " : "");
}
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 20473df79c94..fd1dd06c4f18 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -104,7 +104,20 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
u8 crypt_len,
struct iwl_rx_cmd_buffer *rxb)
{
- unsigned int hdrlen, fraglen;
+ unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ unsigned int fraglen;
+
+ /*
+ * The 'hdrlen' (plus the 8 bytes for the SNAP and the crypt_len,
+ * but those are all multiples of 4 long) all goes away, but we
+ * want the *end* of it, which is going to be the start of the IP
+ * header, to be aligned when it gets pulled in.
+ * The beginning of the skb->data is aligned on at least a 4-byte
+ * boundary after allocation. Everything here is aligned at least
+ * on a 2-byte boundary so we can just take hdrlen & 3 and pad by
+ * the result.
+ */
+ skb_reserve(skb, hdrlen & 3);
/* If frame is small enough to fit in skb->head, pull it completely.
* If not, only pull ieee80211_hdr (including crypto if present, and
@@ -118,8 +131,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
* If the latter changes (there are efforts in the standards group
* to do so) we should revisit this and ieee80211_data_to_8023().
*/
- hdrlen = (len <= skb_tailroom(skb)) ? len :
- sizeof(*hdr) + crypt_len + 8;
+ hdrlen = (len <= skb_tailroom(skb)) ? len : hdrlen + crypt_len + 8;
memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
fraglen = len - hdrlen;
@@ -339,7 +351,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
id >>= RX_MDPU_RES_STATUS_STA_ID_SHIFT;
- if (!WARN_ON_ONCE(id >= IWL_MVM_STATION_COUNT)) {
+ if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
if (IS_ERR(sta))
sta = NULL;
@@ -398,7 +410,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
/* set the preamble flag if appropriate */
if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE))
- rx_status->flag |= RX_FLAG_SHORTPRE;
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
/*
@@ -415,42 +427,49 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
case RATE_MCS_CHAN_WIDTH_20:
break;
case RATE_MCS_CHAN_WIDTH_40:
- rx_status->flag |= RX_FLAG_40MHZ;
+ rx_status->bw = RATE_INFO_BW_40;
break;
case RATE_MCS_CHAN_WIDTH_80:
- rx_status->vht_flag |= RX_VHT_FLAG_80MHZ;
+ rx_status->bw = RATE_INFO_BW_80;
break;
case RATE_MCS_CHAN_WIDTH_160:
- rx_status->vht_flag |= RX_VHT_FLAG_160MHZ;
+ rx_status->bw = RATE_INFO_BW_160;
break;
}
if (rate_n_flags & RATE_MCS_SGI_MSK)
- rx_status->flag |= RX_FLAG_SHORT_GI;
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (rate_n_flags & RATE_HT_MCS_GF_MSK)
- rx_status->flag |= RX_FLAG_HT_GF;
+ rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
if (rate_n_flags & RATE_MCS_LDPC_MSK)
- rx_status->flag |= RX_FLAG_LDPC;
+ rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
if (rate_n_flags & RATE_MCS_HT_MSK) {
- u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >>
+ u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
RATE_MCS_STBC_POS;
- rx_status->flag |= RX_FLAG_HT;
+ rx_status->encoding = RX_ENC_HT;
rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
- rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
} else if (rate_n_flags & RATE_MCS_VHT_MSK) {
- u8 stbc = (rate_n_flags & RATE_MCS_VHT_STBC_MSK) >>
+ u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
RATE_MCS_STBC_POS;
- rx_status->vht_nss =
+ rx_status->nss =
((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
RATE_VHT_MCS_NSS_POS) + 1;
rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
- rx_status->flag |= RX_FLAG_VHT;
- rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
+ rx_status->encoding = RX_ENC_VHT;
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
if (rate_n_flags & RATE_MCS_BF_MSK)
- rx_status->vht_flag |= RX_VHT_FLAG_BF;
+ rx_status->enc_flags |= RX_ENC_FLAG_BF;
} else {
- rx_status->rate_idx =
- iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
- rx_status->band);
+ int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
+ rx_status->band);
+
+ if (WARN(rate < 0 || rate > 0xFF,
+ "Invalid rate flags 0x%x, band %d,\n",
+ rate_n_flags, rx_status->band)) {
+ kfree_skb(skb);
+ return;
+ }
+ rx_status->rate_idx = rate;
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -637,6 +656,9 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
.mvm = mvm,
};
int expected_size;
+ int i;
+ u8 *energy;
+ __le32 *bytes, *air_time;
if (iwl_mvm_is_cdb_supported(mvm))
expected_size = sizeof(*stats);
@@ -645,8 +667,11 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
else
expected_size = sizeof(struct iwl_notif_statistics_v10);
- if (iwl_rx_packet_payload_len(pkt) != expected_size)
- goto invalid;
+ if (iwl_rx_packet_payload_len(pkt) != expected_size) {
+ IWL_ERR(mvm, "received invalid statistics size (%d)!\n",
+ iwl_rx_packet_payload_len(pkt));
+ return;
+ }
data.mac_id = stats->rx.general.mac_id;
data.beacon_filter_average_energy =
@@ -662,38 +687,6 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
le64_to_cpu(stats->general.common.on_time_scan);
data.general = &stats->general;
- if (iwl_mvm_has_new_rx_api(mvm)) {
- int i;
- u8 *energy;
- __le32 *bytes, *air_time;
-
- if (!iwl_mvm_is_cdb_supported(mvm)) {
- struct iwl_notif_statistics_v11 *v11 =
- (void *)&pkt->data;
-
- energy = (void *)&v11->load_stats.avg_energy;
- bytes = (void *)&v11->load_stats.byte_count;
- air_time = (void *)&v11->load_stats.air_time;
- } else {
- energy = (void *)&stats->load_stats.avg_energy;
- bytes = (void *)&stats->load_stats.byte_count;
- air_time = (void *)&stats->load_stats.air_time;
- }
-
- rcu_read_lock();
- for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
- struct iwl_mvm_sta *sta;
-
- if (!energy[i])
- continue;
-
- sta = iwl_mvm_sta_from_staid_rcu(mvm, i);
- if (!sta)
- continue;
- sta->avg_energy = energy[i];
- }
- rcu_read_unlock();
- }
iwl_mvm_rx_stats_check_trigger(mvm, pkt);
@@ -701,10 +694,36 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_stat_iterator,
&data);
- return;
- invalid:
- IWL_ERR(mvm, "received invalid statistics size (%d)!\n",
- iwl_rx_packet_payload_len(pkt));
+
+ if (!iwl_mvm_has_new_rx_api(mvm))
+ return;
+
+ if (!iwl_mvm_is_cdb_supported(mvm)) {
+ struct iwl_notif_statistics_v11 *v11 =
+ (void *)&pkt->data;
+
+ energy = (void *)&v11->load_stats.avg_energy;
+ bytes = (void *)&v11->load_stats.byte_count;
+ air_time = (void *)&v11->load_stats.air_time;
+ } else {
+ energy = (void *)&stats->load_stats.avg_energy;
+ bytes = (void *)&stats->load_stats.byte_count;
+ air_time = (void *)&stats->load_stats.air_time;
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ struct iwl_mvm_sta *sta;
+
+ if (!energy[i])
+ continue;
+
+ sta = iwl_mvm_sta_from_staid_rcu(mvm, i);
+ if (!sta)
+ continue;
+ sta->avg_energy = energy[i];
+ }
+ rcu_read_unlock();
}
void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index d79e9c2a2654..966cd7543629 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -462,6 +462,7 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
int i;
u16 sn = 0, index = 0;
bool expired = false;
+ bool cont = false;
spin_lock(&buf->lock);
@@ -473,12 +474,21 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
for (i = 0; i < buf->buf_size ; i++) {
index = (buf->head_sn + i) % buf->buf_size;
- if (skb_queue_empty(&buf->entries[index]))
+ if (skb_queue_empty(&buf->entries[index])) {
+ /*
+ * If there is a hole and the next frame didn't expire
+ * we want to break and not advance SN
+ */
+ cont = false;
continue;
- if (!time_after(jiffies, buf->reorder_time[index] +
- RX_REORDER_BUF_TIMEOUT_MQ))
+ }
+ if (!cont && !time_after(jiffies, buf->reorder_time[index] +
+ RX_REORDER_BUF_TIMEOUT_MQ))
break;
+
expired = true;
+ /* continue until next hole after this expired frames */
+ cont = true;
sn = ieee80211_sn_add(buf->head_sn, i + 1);
}
@@ -626,9 +636,13 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
return false;
baid_data = rcu_dereference(mvm->baid_map[baid]);
- if (WARN(!baid_data,
- "Received baid %d, but no data exists for this BAID\n", baid))
+ if (!baid_data) {
+ WARN(!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN),
+ "Received baid %d, but no data exists for this BAID\n",
+ baid);
return false;
+ }
+
if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id,
"baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n",
baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id,
@@ -643,6 +657,14 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
spin_lock_bh(&buffer->lock);
+ if (!buffer->valid) {
+ if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN) {
+ spin_unlock_bh(&buffer->lock);
+ return false;
+ }
+ buffer->valid = true;
+ }
+
if (ieee80211_is_back_req(hdr->frame_control)) {
iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn);
goto drop;
@@ -727,7 +749,8 @@ drop:
return true;
}
-static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u8 baid)
+static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm,
+ u32 reorder_data, u8 baid)
{
unsigned long now = jiffies;
unsigned long timeout;
@@ -736,8 +759,10 @@ static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u8 baid)
rcu_read_lock();
data = rcu_dereference(mvm->baid_map[baid]);
- if (WARN_ON(!data))
+ if (!data) {
+ WARN_ON(!(reorder_data & IWL_RX_MPDU_REORDER_BA_OLD_SN));
goto out;
+ }
if (!data->timeout)
goto out;
@@ -799,7 +824,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
}
/* set the preamble flag if appropriate */
if (phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
- rx_status->flag |= RX_FLAG_SHORTPRE;
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise);
@@ -831,7 +856,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (le16_to_cpu(desc->status) & IWL_RX_MPDU_STATUS_SRC_STA_FOUND) {
u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK;
- if (!WARN_ON_ONCE(id >= IWL_MVM_STATION_COUNT)) {
+ if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
if (IS_ERR(sta))
sta = NULL;
@@ -893,26 +918,39 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) {
kfree_skb(skb);
- rcu_read_unlock();
- return;
+ goto out;
}
/*
* Our hardware de-aggregates AMSDUs but copies the mac header
* as it to the de-aggregated MPDUs. We need to turn off the
* AMSDU bit in the QoS control ourselves.
+ * In addition, HW reverses addr3 and addr4 - reverse it back.
*/
if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) &&
!WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) {
+ int i;
u8 *qc = ieee80211_get_qos_ctl(hdr);
+ u8 mac_addr[ETH_ALEN];
*qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
- if (!(desc->amsdu_info &
- IWL_RX_MPDU_AMSDU_LAST_SUBFRAME))
- rx_status->flag |= RX_FLAG_AMSDU_MORE;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = hdr->addr3[ETH_ALEN - i - 1];
+ ether_addr_copy(hdr->addr3, mac_addr);
+
+ if (ieee80211_has_a4(hdr->frame_control)) {
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] =
+ hdr->addr4[ETH_ALEN - i - 1];
+ ether_addr_copy(hdr->addr4, mac_addr);
+ }
+ }
+ if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) {
+ u32 reorder_data = le32_to_cpu(desc->reorder_data);
+
+ iwl_mvm_agg_rx_received(mvm, reorder_data, baid);
}
- if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
- iwl_mvm_agg_rx_received(mvm, baid);
}
/* Set up the HT phy flags */
@@ -920,42 +958,50 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
case RATE_MCS_CHAN_WIDTH_20:
break;
case RATE_MCS_CHAN_WIDTH_40:
- rx_status->flag |= RX_FLAG_40MHZ;
+ rx_status->bw = RATE_INFO_BW_40;
break;
case RATE_MCS_CHAN_WIDTH_80:
- rx_status->vht_flag |= RX_VHT_FLAG_80MHZ;
+ rx_status->bw = RATE_INFO_BW_80;
break;
case RATE_MCS_CHAN_WIDTH_160:
- rx_status->vht_flag |= RX_VHT_FLAG_160MHZ;
+ rx_status->bw = RATE_INFO_BW_160;
break;
}
if (rate_n_flags & RATE_MCS_SGI_MSK)
- rx_status->flag |= RX_FLAG_SHORT_GI;
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (rate_n_flags & RATE_HT_MCS_GF_MSK)
- rx_status->flag |= RX_FLAG_HT_GF;
+ rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
if (rate_n_flags & RATE_MCS_LDPC_MSK)
- rx_status->flag |= RX_FLAG_LDPC;
+ rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
if (rate_n_flags & RATE_MCS_HT_MSK) {
- u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >>
+ u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
RATE_MCS_STBC_POS;
- rx_status->flag |= RX_FLAG_HT;
+ rx_status->encoding = RX_ENC_HT;
rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
- rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
} else if (rate_n_flags & RATE_MCS_VHT_MSK) {
- u8 stbc = (rate_n_flags & RATE_MCS_VHT_STBC_MSK) >>
+ u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
RATE_MCS_STBC_POS;
- rx_status->vht_nss =
+ rx_status->nss =
((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
RATE_VHT_MCS_NSS_POS) + 1;
rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
- rx_status->flag |= RX_FLAG_VHT;
- rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
+ rx_status->encoding = RX_ENC_VHT;
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
if (rate_n_flags & RATE_MCS_BF_MSK)
- rx_status->vht_flag |= RX_VHT_FLAG_BF;
+ rx_status->enc_flags |= RX_ENC_FLAG_BF;
} else {
- rx_status->rate_idx =
- iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
- rx_status->band);
+ int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
+ rx_status->band);
+
+ if (WARN(rate < 0 || rate > 0xFF,
+ "Invalid rate flags 0x%x, band %d,\n",
+ rate_n_flags, rx_status->band)) {
+ kfree_skb(skb);
+ goto out;
+ }
+ rx_status->rate_idx = rate;
+
}
/* management stuff on default queue */
@@ -974,6 +1020,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
+out:
rcu_read_unlock();
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 0a64efa844b7..8d1b994ae79f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -81,44 +81,30 @@ enum iwl_mvm_traffic_load {
IWL_MVM_TRAFFIC_HIGH,
};
+#define IWL_SCAN_DWELL_ACTIVE 10
+#define IWL_SCAN_DWELL_PASSIVE 110
+#define IWL_SCAN_DWELL_FRAGMENTED 44
+#define IWL_SCAN_DWELL_EXTENDED 90
+
struct iwl_mvm_scan_timing_params {
- u32 dwell_active;
- u32 dwell_passive;
- u32 dwell_fragmented;
- u32 dwell_extended;
u32 suspend_time;
u32 max_out_time;
};
static struct iwl_mvm_scan_timing_params scan_timing[] = {
[IWL_SCAN_TYPE_UNASSOC] = {
- .dwell_active = 10,
- .dwell_passive = 110,
- .dwell_fragmented = 44,
- .dwell_extended = 90,
.suspend_time = 0,
.max_out_time = 0,
},
[IWL_SCAN_TYPE_WILD] = {
- .dwell_active = 10,
- .dwell_passive = 110,
- .dwell_fragmented = 44,
- .dwell_extended = 90,
.suspend_time = 30,
.max_out_time = 120,
},
[IWL_SCAN_TYPE_MILD] = {
- .dwell_active = 10,
- .dwell_passive = 110,
- .dwell_fragmented = 44,
- .dwell_extended = 90,
.suspend_time = 120,
.max_out_time = 120,
},
[IWL_SCAN_TYPE_FRAGMENTED] = {
- .dwell_active = 10,
- .dwell_passive = 110,
- .dwell_fragmented = 44,
.suspend_time = 95,
.max_out_time = 44,
},
@@ -294,34 +280,15 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm)
return max_ie_len;
}
-static u8 *iwl_mvm_dump_channel_list(struct iwl_scan_results_notif *res,
- int num_res, u8 *buf, size_t buf_size)
-{
- int i;
- u8 *pos = buf, *end = buf + buf_size;
-
- for (i = 0; pos < end && i < num_res; i++)
- pos += snprintf(pos, end - pos, " %u", res[i].channel);
-
- /* terminate the string in case the buffer was too short */
- *(buf + buf_size - 1) = '\0';
-
- return buf;
-}
-
void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
- u8 buf[256];
IWL_DEBUG_SCAN(mvm,
- "Scan offload iteration complete: status=0x%x scanned channels=%d channels list: %s\n",
- notif->status, notif->scanned_channels,
- iwl_mvm_dump_channel_list(notif->results,
- notif->scanned_channels, buf,
- sizeof(buf)));
+ "Scan offload iteration complete: status=0x%x scanned channels=%d\n",
+ notif->status, notif->scanned_channels);
if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
@@ -743,10 +710,10 @@ static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
struct iwl_scan_req_lmac *cmd,
struct iwl_mvm_scan_params *params)
{
- cmd->active_dwell = scan_timing[params->type].dwell_active;
- cmd->passive_dwell = scan_timing[params->type].dwell_passive;
- cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
- cmd->extended_dwell = scan_timing[params->type].dwell_extended;
+ cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE;
+ cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE;
+ cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
+ cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED;
cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
@@ -944,13 +911,12 @@ static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
}
static void iwl_mvm_fill_scan_dwell(struct iwl_mvm *mvm,
- struct iwl_scan_dwell *dwell,
- struct iwl_mvm_scan_timing_params *timing)
+ struct iwl_scan_dwell *dwell)
{
- dwell->active = timing->dwell_active;
- dwell->passive = timing->dwell_passive;
- dwell->fragmented = timing->dwell_fragmented;
- dwell->extended = timing->dwell_extended;
+ dwell->active = IWL_SCAN_DWELL_ACTIVE;
+ dwell->passive = IWL_SCAN_DWELL_PASSIVE;
+ dwell->fragmented = IWL_SCAN_DWELL_FRAGMENTED;
+ dwell->extended = IWL_SCAN_DWELL_EXTENDED;
}
static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels)
@@ -966,11 +932,11 @@ static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels)
channels[j] = band->channels[i].hw_value;
}
-static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
- u32 flags, u8 channel_flags)
+static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
+ u32 flags, u8 channel_flags)
{
enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
- struct iwl_scan_config *cfg = config;
+ struct iwl_scan_config_v1 *cfg = config;
cfg->flags = cpu_to_le32(flags);
cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
@@ -979,7 +945,7 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
cfg->out_of_channel_time = cpu_to_le32(scan_timing[type].max_out_time);
cfg->suspend_time = cpu_to_le32(scan_timing[type].suspend_time);
- iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell, &scan_timing[type]);
+ iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell);
memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
@@ -989,11 +955,11 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
iwl_mvm_fill_channels(mvm, cfg->channel_array);
}
-static void iwl_mvm_fill_scan_config_cdb(struct iwl_mvm *mvm, void *config,
- u32 flags, u8 channel_flags)
+static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
+ u32 flags, u8 channel_flags)
{
enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
- struct iwl_scan_config_cdb *cfg = config;
+ struct iwl_scan_config *cfg = config;
cfg->flags = cpu_to_le32(flags);
cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
@@ -1001,12 +967,16 @@ static void iwl_mvm_fill_scan_config_cdb(struct iwl_mvm *mvm, void *config,
cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
cfg->out_of_channel_time[0] =
cpu_to_le32(scan_timing[type].max_out_time);
- cfg->out_of_channel_time[1] =
- cpu_to_le32(scan_timing[type].max_out_time);
cfg->suspend_time[0] = cpu_to_le32(scan_timing[type].suspend_time);
- cfg->suspend_time[1] = cpu_to_le32(scan_timing[type].suspend_time);
- iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell, &scan_timing[type]);
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ cfg->suspend_time[1] =
+ cpu_to_le32(scan_timing[type].suspend_time);
+ cfg->out_of_channel_time[1] =
+ cpu_to_le32(scan_timing[type].max_out_time);
+ }
+
+ iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell);
memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
@@ -1033,16 +1003,13 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
return -ENOBUFS;
- if (type == mvm->scan_type) {
- IWL_DEBUG_SCAN(mvm,
- "Ignoring UMAC scan config of the same type\n");
+ if (type == mvm->scan_type)
return 0;
- }
- if (iwl_mvm_is_cdb_supported(mvm))
- cmd_size = sizeof(struct iwl_scan_config_cdb);
- else
+ if (iwl_mvm_has_new_tx_api(mvm))
cmd_size = sizeof(struct iwl_scan_config);
+ else
+ cmd_size = sizeof(struct iwl_scan_config_v1);
cmd_size += mvm->fw->ucode_capa.n_scan_channels;
cfg = kzalloc(cmd_size, GFP_KERNEL);
@@ -1068,13 +1035,13 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
IWL_CHANNEL_FLAG_EBS_ADD |
IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
- if (iwl_mvm_is_cdb_supported(mvm)) {
+ if (iwl_mvm_has_new_tx_api(mvm)) {
flags |= (type == IWL_SCAN_TYPE_FRAGMENTED) ?
SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
- iwl_mvm_fill_scan_config_cdb(mvm, cfg, flags, channel_flags);
- } else {
iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags);
+ } else {
+ iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags);
}
cmd.data[0] = cfg;
@@ -1113,22 +1080,26 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
cmd->passive_dwell = params->measurement_dwell;
cmd->extended_dwell = params->measurement_dwell;
} else {
- cmd->active_dwell = timing->dwell_active;
- cmd->passive_dwell = timing->dwell_passive;
- cmd->extended_dwell = timing->dwell_extended;
+ cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE;
+ cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE;
+ cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED;
}
- cmd->fragmented_dwell = timing->dwell_fragmented;
-
- if (iwl_mvm_is_cdb_supported(mvm)) {
- cmd->cdb.max_out_time[0] = cpu_to_le32(timing->max_out_time);
- cmd->cdb.suspend_time[0] = cpu_to_le32(timing->suspend_time);
- cmd->cdb.max_out_time[1] = cpu_to_le32(timing->max_out_time);
- cmd->cdb.suspend_time[1] = cpu_to_le32(timing->suspend_time);
- cmd->cdb.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+ cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+ cmd->v6.max_out_time[0] = cpu_to_le32(timing->max_out_time);
+ cmd->v6.suspend_time[0] = cpu_to_le32(timing->suspend_time);
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ cmd->v6.max_out_time[1] =
+ cpu_to_le32(timing->max_out_time);
+ cmd->v6.suspend_time[1] =
+ cpu_to_le32(timing->suspend_time);
+ }
} else {
- cmd->no_cdb.max_out_time = cpu_to_le32(timing->max_out_time);
- cmd->no_cdb.suspend_time = cpu_to_le32(timing->suspend_time);
- cmd->no_cdb.scan_priority =
+ cmd->v1.max_out_time = cpu_to_le32(timing->max_out_time);
+ cmd->v1.suspend_time = cpu_to_le32(timing->suspend_time);
+ cmd->v1.scan_priority =
cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
}
@@ -1207,8 +1178,8 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int type)
{
struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
- void *cmd_data = iwl_mvm_is_cdb_supported(mvm) ?
- (void *)&cmd->cdb.data : (void *)&cmd->no_cdb.data;
+ void *cmd_data = iwl_mvm_has_new_tx_api(mvm) ?
+ (void *)&cmd->v6.data : (void *)&cmd->v1.data;
struct iwl_scan_req_umac_tail *sec_part = cmd_data +
sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels;
@@ -1245,12 +1216,12 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
- if (iwl_mvm_is_cdb_supported(mvm)) {
- cmd->cdb.channel_flags = channel_flags;
- cmd->cdb.n_channels = params->n_channels;
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ cmd->v6.channel_flags = channel_flags;
+ cmd->v6.n_channels = params->n_channels;
} else {
- cmd->no_cdb.channel_flags = channel_flags;
- cmd->no_cdb.n_channels = params->n_channels;
+ cmd->v1.channel_flags = channel_flags;
+ cmd->v1.n_channels = params->n_channels;
}
iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap);
@@ -1607,16 +1578,12 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
- u8 buf[256];
mvm->scan_start = le64_to_cpu(notif->start_tsf);
IWL_DEBUG_SCAN(mvm,
- "UMAC Scan iteration complete: status=0x%x scanned_channels=%d channels list: %s\n",
- notif->status, notif->scanned_channels,
- iwl_mvm_dump_channel_list(notif->results,
- notif->scanned_channels, buf,
- sizeof(buf)));
+ "UMAC Scan iteration complete: status=0x%x scanned_channels=%d\n",
+ notif->status, notif->scanned_channels);
if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
@@ -1692,10 +1659,10 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
int iwl_mvm_scan_size(struct iwl_mvm *mvm)
{
- int base_size = IWL_SCAN_REQ_UMAC_SIZE;
+ int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
- if (iwl_mvm_is_cdb_supported(mvm))
- base_size = IWL_SCAN_REQ_UMAC_SIZE_CDB;
+ if (iwl_mvm_has_new_tx_api(mvm))
+ base_size = IWL_SCAN_REQ_UMAC_SIZE;
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
return base_size +
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
index 101fb04a8573..539b06bf0803 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
@@ -235,7 +235,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
break;
case SF_FULL_ON:
- if (sta_id == IWL_MVM_STATION_COUNT) {
+ if (sta_id == IWL_MVM_INVALID_STA) {
IWL_ERR(mvm,
"No station: Cannot switch SF to FULL_ON\n");
return -EINVAL;
@@ -276,12 +276,12 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
bool remove_vif)
{
enum iwl_sf_state new_state;
- u8 sta_id = IWL_MVM_STATION_COUNT;
+ u8 sta_id = IWL_MVM_INVALID_STA;
struct iwl_mvm_vif *mvmvif = NULL;
struct iwl_mvm_active_iface_iterator_data data = {
.ignore_vif = changed_vif,
.sta_vif_state = SF_UNINIT,
- .sta_vif_ap_sta_id = IWL_MVM_STATION_COUNT,
+ .sta_vif_ap_sta_id = IWL_MVM_INVALID_STA,
};
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 9d28db7f56aa..f5c786ddc526 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -77,9 +77,11 @@
*/
static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
{
- return iwl_mvm_has_new_rx_api(mvm) ?
- sizeof(struct iwl_mvm_add_sta_cmd) :
- sizeof(struct iwl_mvm_add_sta_cmd_v7);
+ if (iwl_mvm_has_new_rx_api(mvm) ||
+ fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+ return sizeof(struct iwl_mvm_add_sta_cmd);
+ else
+ return sizeof(struct iwl_mvm_add_sta_cmd_v7);
}
static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
@@ -98,7 +100,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
reserved_ids = BIT(0);
/* Don't take rcu_read_lock() since we are protected by mvm->mutex */
- for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) {
+ for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
if (BIT(sta_id) & reserved_ids)
continue;
@@ -106,7 +108,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
lockdep_is_held(&mvm->mutex)))
return sta_id;
}
- return IWL_MVM_STATION_COUNT;
+ return IWL_MVM_INVALID_STA;
}
/* send station add/update command to firmware */
@@ -126,12 +128,21 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
u32 status;
u32 agg_size = 0, mpdu_dens = 0;
+ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+ add_sta_cmd.station_type = mvm_sta->sta_type;
+
if (!update || (flags & STA_MODIFY_QUEUES)) {
- add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
- if (flags & STA_MODIFY_QUEUES)
- add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
+ if (!iwl_mvm_has_new_tx_api(mvm)) {
+ add_sta_cmd.tfd_queue_msk =
+ cpu_to_le32(mvm_sta->tfd_queue_msk);
+
+ if (flags & STA_MODIFY_QUEUES)
+ add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
+ } else {
+ WARN_ON(flags & STA_MODIFY_QUEUES);
+ }
}
switch (sta->bandwidth) {
@@ -209,13 +220,15 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
- add_sta_cmd.uapsd_trigger_acs |= BIT(AC_BK);
+ add_sta_cmd.uapsd_acs |= BIT(AC_BK);
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
- add_sta_cmd.uapsd_trigger_acs |= BIT(AC_BE);
+ add_sta_cmd.uapsd_acs |= BIT(AC_BE);
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
- add_sta_cmd.uapsd_trigger_acs |= BIT(AC_VI);
+ add_sta_cmd.uapsd_acs |= BIT(AC_VI);
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
- add_sta_cmd.uapsd_trigger_acs |= BIT(AC_VO);
+ add_sta_cmd.uapsd_acs |= BIT(AC_VO);
+ add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
+ add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
}
status = ADD_STA_SUCCESS;
@@ -337,6 +350,9 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
u8 sta_id;
int ret;
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id;
spin_unlock_bh(&mvm->queue_info_lock);
@@ -387,6 +403,9 @@ static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id;
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
@@ -426,6 +445,9 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id;
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
@@ -447,7 +469,7 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
disable_agg_tids |= BIT(tid);
- mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE;
+ mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
}
mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
@@ -468,6 +490,9 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
spin_lock_bh(&mvm->queue_info_lock);
txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
sta_id = mvm->queue_info[queue].ra_sta_id;
@@ -475,6 +500,8 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
spin_unlock_bh(&mvm->queue_info_lock);
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+ if (WARN_ON(!mvmsta))
+ return -EINVAL;
disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
/* Disable the queue */
@@ -512,6 +539,8 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
int i;
lockdep_assert_held(&mvm->queue_info_lock);
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
@@ -596,6 +625,9 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
unsigned long mq;
int ret;
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
/*
* If the AC is lower than current one - FIFO needs to be redirected to
* the lowest one of the streams in the queue. Check if this is needed
@@ -617,7 +649,7 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
cmd.tid = mvm->queue_info[queue].txq_tid;
- mq = mvm->queue_info[queue].hw_queue_to_mac80211;
+ mq = mvm->hw_queue_to_mac80211[queue];
shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
spin_unlock_bh(&mvm->queue_info_lock);
@@ -626,7 +658,7 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
/* Stop MAC queues and wait for this queue to empty */
iwl_mvm_stop_mac_queues(mvm, mq);
- ret = iwl_trans_wait_tx_queue_empty(mvm->trans, BIT(queue));
+ ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
if (ret) {
IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
queue);
@@ -677,6 +709,37 @@ out:
return ret;
}
+static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta, u8 ac,
+ int tid)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ unsigned int wdg_timeout =
+ iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+ u8 mac_queue = mvmsta->vif->hw_queue[ac];
+ int queue = -1;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Allocating queue for sta %d on tid %d\n",
+ mvmsta->sta_id, tid);
+ queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
+ wdg_timeout);
+ if (queue < 0)
+ return queue;
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
+
+ spin_lock_bh(&mvmsta->lock);
+ mvmsta->tid_data[tid].txq_id = queue;
+ mvmsta->tid_data[tid].is_tid_active = true;
+ mvmsta->tfd_queue_msk |= BIT(queue);
+ spin_unlock_bh(&mvmsta->lock);
+
+ return 0;
+}
+
static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, u8 ac, int tid,
struct ieee80211_hdr *hdr)
@@ -702,6 +765,9 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
+
spin_lock_bh(&mvmsta->lock);
tfd_queue_mask = mvmsta->tfd_queue_msk;
spin_unlock_bh(&mvmsta->lock);
@@ -880,6 +946,9 @@ static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return;
+
spin_lock_bh(&mvm->queue_info_lock);
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
spin_unlock_bh(&mvm->queue_info_lock);
@@ -917,6 +986,10 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
int ssn;
int ret = true;
+ /* queue sharing is disabled on new TX path */
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return;
+
lockdep_assert_held(&mvm->mutex);
spin_lock_bh(&mvm->queue_info_lock);
@@ -1014,7 +1087,7 @@ static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
ac = iwl_mvm_tid_to_ac_queue(tid);
mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
- if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE &&
+ if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE &&
iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
IWL_ERR(mvm,
"Can't alloc TXQ for sta %d tid %d - dropping frame\n",
@@ -1059,8 +1132,12 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
mutex_lock(&mvm->mutex);
+ /* No queue reconfiguration in TVQM mode */
+ if (iwl_mvm_has_new_tx_api(mvm))
+ goto alloc_queues;
+
/* Reconfigure queues requiring reconfiguation */
- for (queue = 0; queue < IWL_MAX_HW_QUEUES; queue++) {
+ for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) {
bool reconfig;
bool change_owner;
@@ -1088,6 +1165,7 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
iwl_mvm_change_queue_owner(mvm, queue);
}
+alloc_queues:
/* Go over all stations with deferred traffic */
for_each_set_bit(sta_id, mvm->sta_deferred_frames,
IWL_MVM_STATION_COUNT) {
@@ -1116,6 +1194,10 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
int queue;
bool using_inactive_queue = false, same_sta = false;
+ /* queue reserving is disabled on new TX path */
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return 0;
+
/*
* Check for inactive queues, so we don't reach a situation where we
* can't add a STA due to a shortage in queues that doesn't really exist
@@ -1191,7 +1273,7 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
int ac;
u8 mac_queue;
- if (txq_id == IEEE80211_INVAL_HW_QUEUE)
+ if (txq_id == IWL_MVM_INVALID_QUEUE)
continue;
skb_queue_head_init(&tid_data->deferred_tx_frames);
@@ -1199,20 +1281,31 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
ac = tid_to_mac80211_ac[i];
mac_queue = mvm_sta->vif->hw_queue[ac];
- cfg.tid = i;
- cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
- cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
- txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Re-mapping sta %d tid %d\n",
+ mvm_sta->sta_id, i);
+ txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
+ mvm_sta->sta_id,
+ i, wdg_timeout);
+ tid_data->txq_id = txq_id;
+ } else {
+ u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
- IWL_DEBUG_TX_QUEUES(mvm,
- "Re-mapping sta %d tid %d to queue %d\n",
- mvm_sta->sta_id, i, txq_id);
+ cfg.tid = i;
+ cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
+ cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
+ txq_id ==
+ IWL_MVM_DQA_BSS_CLIENT_QUEUE);
- iwl_mvm_enable_txq(mvm, txq_id, mac_queue,
- IEEE80211_SEQ_TO_SN(tid_data->seq_number),
- &cfg, wdg_timeout);
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Re-mapping sta %d tid %d to queue %d\n",
+ mvm_sta->sta_id, i, txq_id);
- mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
+ iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
+ wdg_timeout);
+ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
+ }
}
atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0);
@@ -1235,7 +1328,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
else
sta_id = mvm_sta->sta_id;
- if (sta_id == IWL_MVM_STATION_COUNT)
+ if (sta_id == IWL_MVM_INVALID_STA)
return -ENOSPC;
spin_lock_init(&mvm_sta->lock);
@@ -1254,6 +1347,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
mvm_sta->tx_protection = 0;
mvm_sta->tt_tx_protection = false;
+ mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
/* HW restart, don't assume the memory has been zeroed */
atomic_set(&mvm->pending_frames[sta_id], 0);
@@ -1287,7 +1381,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
* Mark all queues for this STA as unallocated and defer TX
* frames until the queue is allocated
*/
- mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
+ mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
}
mvm_sta->deferred_traffic_tid_map = 0;
@@ -1303,7 +1397,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
mvm_sta->dup_data = dup_data;
}
- if (iwl_mvm_is_dqa_supported(mvm)) {
+ if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
ret = iwl_mvm_reserve_sta_stream(mvm, sta,
ieee80211_vif_type_p2p(vif));
if (ret)
@@ -1317,10 +1411,10 @@ update_fw:
if (vif->type == NL80211_IFTYPE_STATION) {
if (!sta->tdls) {
- WARN_ON(mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT);
+ WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
mvmvif->ap_sta_id = sta_id;
} else {
- WARN_ON(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT);
+ WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
}
}
@@ -1486,13 +1580,13 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
- if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE)
+ if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
continue;
ac = iwl_mvm_tid_to_ac_queue(i);
iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
vif->hw_queue[ac], i, 0);
- mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
+ mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
}
}
@@ -1520,8 +1614,8 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
if (ret)
return ret;
- ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
- mvm_sta->tfd_queue_msk);
+ ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
+ mvm_sta->tfd_queue_msk);
if (ret)
return ret;
ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
@@ -1571,11 +1665,11 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
return ret;
/* unassoc - go ahead - remove the AP STA now */
- mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+ mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
/* clear d0i3_ap_sta_id if no longer relevant */
if (mvm->d0i3_ap_sta_id == sta_id)
- mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
+ mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
}
}
@@ -1584,7 +1678,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
* before the STA is removed.
*/
if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
- mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
+ mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
cancel_delayed_work(&mvm->tdls_cs.dwork);
}
@@ -1637,27 +1731,28 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
struct iwl_mvm_int_sta *sta,
- u32 qmask, enum nl80211_iftype iftype)
+ u32 qmask, enum nl80211_iftype iftype,
+ enum iwl_sta_type type)
{
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
- if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
+ if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
return -ENOSPC;
}
sta->tfd_queue_msk = qmask;
+ sta->type = type;
/* put a non-NULL value so iterating over the stations won't stop */
rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
return 0;
}
-static void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
- struct iwl_mvm_int_sta *sta)
+void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
{
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
- sta->sta_id = IWL_MVM_STATION_COUNT;
+ sta->sta_id = IWL_MVM_INVALID_STA;
}
static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
@@ -1675,8 +1770,11 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
cmd.sta_id = sta->sta_id;
cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
color));
+ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+ cmd.station_type = sta->type;
- cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
cmd.tid_disable_tx = cpu_to_le16(0xffff);
if (addr)
@@ -1701,27 +1799,19 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
return ret;
}
-int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
+static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm)
{
unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
mvm->cfg->base_params->wd_timeout :
IWL_WATCHDOG_DISABLED;
- int ret;
-
- lockdep_assert_held(&mvm->mutex);
-
- /* Map Aux queue to fifo - needs to happen before adding Aux station */
- if (!iwl_mvm_is_dqa_supported(mvm))
- iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
- IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
-
- /* Allocate aux station and assign to it the aux queue */
- ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
- NL80211_IFTYPE_UNSPECIFIED);
- if (ret)
- return ret;
- if (iwl_mvm_is_dqa_supported(mvm)) {
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ int queue = iwl_mvm_tvqm_enable_txq(mvm, mvm->aux_queue,
+ mvm->aux_sta.sta_id,
+ IWL_MAX_TID_COUNT,
+ wdg_timeout);
+ mvm->aux_queue = queue;
+ } else if (iwl_mvm_is_dqa_supported(mvm)) {
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = IWL_MVM_TX_FIFO_MCAST,
.sta_id = mvm->aux_sta.sta_id,
@@ -1732,14 +1822,44 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
wdg_timeout);
+ } else {
+ iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
+ IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
}
+}
- ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
- MAC_INDEX_AUX, 0);
+int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
+{
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+ /* Allocate aux station and assign to it the aux queue */
+ ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
+ NL80211_IFTYPE_UNSPECIFIED,
+ IWL_STA_AUX_ACTIVITY);
if (ret)
+ return ret;
+
+ /* Map Aux queue to fifo - needs to happen before adding Aux station */
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ iwl_mvm_enable_aux_queue(mvm);
+
+ ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
+ MAC_INDEX_AUX, 0);
+ if (ret) {
iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
- return ret;
+ return ret;
+ }
+
+ /*
+ * For a000 firmware and on we cannot add queue to a station unknown
+ * to firmware so enable queue here - after the station was added
+ */
+ if (iwl_mvm_has_new_tx_api(mvm))
+ iwl_mvm_enable_aux_queue(mvm);
+
+ return 0;
}
int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
@@ -1790,39 +1910,39 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
const u8 *baddr = _baddr;
+ int queue;
int ret;
+ unsigned int wdg_timeout =
+ iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .fifo = IWL_MVM_TX_FIFO_VO,
+ .sta_id = mvmvif->bcast_sta.sta_id,
+ .tid = IWL_MAX_TID_COUNT,
+ .aggregate = false,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
lockdep_assert_held(&mvm->mutex);
- if (iwl_mvm_is_dqa_supported(mvm)) {
- struct iwl_trans_txq_scd_cfg cfg = {
- .fifo = IWL_MVM_TX_FIFO_VO,
- .sta_id = mvmvif->bcast_sta.sta_id,
- .tid = IWL_MAX_TID_COUNT,
- .aggregate = false,
- .frame_limit = IWL_FRAME_LIMIT,
- };
- unsigned int wdg_timeout =
- iwl_mvm_get_wd_timeout(mvm, vif, false, false);
- int queue;
-
+ if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
if (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_ADHOC)
- queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
+ queue = mvm->probe_queue;
else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
- queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
+ queue = mvm->p2p_dev_queue;
else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
return -EINVAL;
- iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg,
- wdg_timeout);
bsta->tfd_queue_msk |= BIT(queue);
+
+ iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
+ &cfg, wdg_timeout);
}
if (vif->type == NL80211_IFTYPE_ADHOC)
baddr = vif->bss_conf.bssid;
- if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
+ if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
return -ENOSPC;
ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
@@ -1831,27 +1951,21 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return ret;
/*
- * In AP vif type, we also need to enable the cab_queue. However, we
- * have to enable it after the ADD_STA command is sent, otherwise the
- * FW will throw an assert once we send the ADD_STA command (it'll
- * detect a mismatch in the tfd_queue_msk, as we can't add the
- * enabled-cab_queue to the mask)
+ * For a000 firmware and on we cannot add queue to a station unknown
+ * to firmware so enable queue here - after the station was added
*/
- if (iwl_mvm_is_dqa_supported(mvm) &&
- (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_ADHOC)) {
- struct iwl_trans_txq_scd_cfg cfg = {
- .fifo = IWL_MVM_TX_FIFO_MCAST,
- .sta_id = mvmvif->bcast_sta.sta_id,
- .tid = IWL_MAX_TID_COUNT,
- .aggregate = false,
- .frame_limit = IWL_FRAME_LIMIT,
- };
- unsigned int wdg_timeout =
- iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
+ bsta->sta_id,
+ IWL_MAX_TID_COUNT,
+ wdg_timeout);
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ mvm->probe_queue = queue;
+ else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ mvm->p2p_dev_queue = queue;
- iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue,
- 0, &cfg, wdg_timeout);
+ bsta->tfd_queue_msk |= BIT(queue);
}
return 0;
@@ -1869,24 +1983,18 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
IWL_MAX_TID_COUNT, 0);
- if (mvmvif->bcast_sta.tfd_queue_msk &
- BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)) {
- iwl_mvm_disable_txq(mvm,
- IWL_MVM_DQA_AP_PROBE_RESP_QUEUE,
+ if (mvmvif->bcast_sta.tfd_queue_msk & BIT(mvm->probe_queue)) {
+ iwl_mvm_disable_txq(mvm, mvm->probe_queue,
vif->hw_queue[0], IWL_MAX_TID_COUNT,
0);
- mvmvif->bcast_sta.tfd_queue_msk &=
- ~BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE);
+ mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(mvm->probe_queue);
}
- if (mvmvif->bcast_sta.tfd_queue_msk &
- BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)) {
- iwl_mvm_disable_txq(mvm,
- IWL_MVM_DQA_P2P_DEVICE_QUEUE,
+ if (mvmvif->bcast_sta.tfd_queue_msk & BIT(mvm->p2p_dev_queue)) {
+ iwl_mvm_disable_txq(mvm, mvm->p2p_dev_queue,
vif->hw_queue[0], IWL_MAX_TID_COUNT,
0);
- mvmvif->bcast_sta.tfd_queue_msk &=
- ~BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE);
+ mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(mvm->p2p_dev_queue);
}
}
@@ -1928,7 +2036,8 @@ int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
}
return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
- ieee80211_vif_type_p2p(vif));
+ ieee80211_vif_type_p2p(vif),
+ IWL_STA_GENERAL_PURPOSE);
}
/* Allocate a new station entry for the broadcast station to the given vif,
@@ -1982,6 +2091,101 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return ret;
}
+/*
+ * Allocate a new station entry for the multicast station to the given vif,
+ * and send it to the FW.
+ * Note that each AP/GO mac should have its own multicast station.
+ *
+ * @mvm: the mvm component
+ * @vif: the interface to which the multicast station is added
+ */
+int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
+ static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
+ const u8 *maddr = _maddr;
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .fifo = IWL_MVM_TX_FIFO_MCAST,
+ .sta_id = msta->sta_id,
+ .tid = IWL_MAX_TID_COUNT,
+ .aggregate = false,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
+ unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!iwl_mvm_is_dqa_supported(mvm))
+ return 0;
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_AP))
+ return -ENOTSUPP;
+
+ /*
+ * While in previous FWs we had to exclude cab queue from TFD queue
+ * mask, now it is needed as any other queue.
+ */
+ if (!iwl_mvm_has_new_tx_api(mvm) &&
+ fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
+ iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
+ &cfg, timeout);
+ msta->tfd_queue_msk |= BIT(vif->cab_queue);
+ }
+ ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
+ mvmvif->id, mvmvif->color);
+ if (ret) {
+ iwl_mvm_dealloc_int_sta(mvm, msta);
+ return ret;
+ }
+
+ /*
+ * Enable cab queue after the ADD_STA command is sent.
+ * This is needed for a000 firmware which won't accept SCD_QUEUE_CFG
+ * command with unknown station id, and for FW that doesn't support
+ * station API since the cab queue is not included in the
+ * tfd_queue_mask.
+ */
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
+ msta->sta_id,
+ IWL_MAX_TID_COUNT,
+ timeout);
+ mvmvif->cab_queue = queue;
+ } else if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_STA_TYPE)) {
+ iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
+ &cfg, timeout);
+ }
+
+ return 0;
+}
+
+/*
+ * Send the FW a request to remove the station from it's internal data
+ * structures, and in addition remove it from the local data structure.
+ */
+int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!iwl_mvm_is_dqa_supported(mvm))
+ return 0;
+
+ iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
+ IWL_MAX_TID_COUNT, 0);
+
+ ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
+ if (ret)
+ IWL_WARN(mvm, "Failed sending remove station\n");
+
+ return ret;
+}
+
#define IWL_MAX_RX_BA_SESSIONS 16
static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
@@ -2059,6 +2263,7 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
reorder_buf->mvm = mvm;
reorder_buf->queue = i;
reorder_buf->sta_id = sta_id;
+ reorder_buf->valid = false;
for (j = 0; j < reorder_buf->buf_size; j++)
__skb_queue_head_init(&reorder_buf->entries[j]);
}
@@ -2226,7 +2431,9 @@ int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
cmd.sta_id = mvm_sta->sta_id;
cmd.add_modify = STA_MODE_MODIFY;
- cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX;
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ cmd.modify_mask = STA_MODIFY_QUEUES;
+ cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
@@ -2310,10 +2517,18 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* one and mark it as reserved
* 3. In DQA mode, but no traffic yet on this TID: same treatment as in
* non-DQA mode, since the TXQ hasn't yet been allocated
+ * Don't support case 3 for new TX path as it is not expected to happen
+ * and aggregation will be offloaded soon anyway
*/
txq_id = mvmsta->tid_data[tid].txq_id;
- if (iwl_mvm_is_dqa_supported(mvm) &&
- unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) {
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ if (txq_id == IWL_MVM_INVALID_QUEUE) {
+ ret = -ENXIO;
+ goto release_locks;
+ }
+ } else if (iwl_mvm_is_dqa_supported(mvm) &&
+ unlikely(mvm->queue_info[txq_id].status ==
+ IWL_MVM_QUEUE_SHARED)) {
ret = -ENXIO;
IWL_DEBUG_TX_QUEUES(mvm,
"Can't start tid %d agg on shared queue!\n",
@@ -2409,6 +2624,20 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
tid_data->amsdu_in_ampdu_allowed = amsdu;
spin_unlock_bh(&mvmsta->lock);
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ /*
+ * If no queue iwl_mvm_sta_tx_agg_start() would have failed so
+ * no need to check queue's status
+ */
+ if (buf_size < mvmsta->max_agg_bufsize)
+ return -ENOTSUPP;
+
+ ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
+ if (ret)
+ return -EIO;
+ goto out;
+ }
+
cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
spin_lock_bh(&mvm->queue_info_lock);
@@ -2430,8 +2659,8 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* If reconfiguring an existing queue, it first must be
* drained
*/
- ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
- BIT(queue));
+ ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
+ BIT(queue));
if (ret) {
IWL_ERR(mvm,
"Error draining queue before reconfig\n");
@@ -2466,6 +2695,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
spin_unlock_bh(&mvm->queue_info_lock);
+out:
/*
* Even though in theory the peer could have different
* aggregation reorder buffer sizes for different sessions,
@@ -2483,6 +2713,27 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
}
+static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta,
+ u16 txq_id)
+{
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ /*
+ * The TXQ is marked as reserved only if no traffic came through yet
+ * This means no traffic has been sent on this TID (agg'd or not), so
+ * we no longer have use for the queue. Since it hasn't even been
+ * allocated through iwl_mvm_enable_txq, so we can just mark it back as
+ * free.
+ */
+ if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
+ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
+
+ spin_unlock_bh(&mvm->queue_info_lock);
+}
+
int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid)
{
@@ -2509,18 +2760,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvmsta->agg_tids &= ~BIT(tid);
- spin_lock_bh(&mvm->queue_info_lock);
- /*
- * The TXQ is marked as reserved only if no traffic came through yet
- * This means no traffic has been sent on this TID (agg'd or not), so
- * we no longer have use for the queue. Since it hasn't even been
- * allocated through iwl_mvm_enable_txq, so we can just mark it back as
- * free.
- */
- if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
- mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
-
- spin_unlock_bh(&mvm->queue_info_lock);
+ iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id);
switch (tid_data->state) {
case IWL_AGG_ON:
@@ -2600,24 +2840,14 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvmsta->agg_tids &= ~BIT(tid);
spin_unlock_bh(&mvmsta->lock);
- spin_lock_bh(&mvm->queue_info_lock);
- /*
- * The TXQ is marked as reserved only if no traffic came through yet
- * This means no traffic has been sent on this TID (agg'd or not), so
- * we no longer have use for the queue. Since it hasn't even been
- * allocated through iwl_mvm_enable_txq, so we can just mark it back as
- * free.
- */
- if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
- mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
- spin_unlock_bh(&mvm->queue_info_lock);
+ iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id);
if (old_state >= IWL_AGG_ON) {
iwl_mvm_drain_sta(mvm, mvmsta, true);
if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
- iwl_trans_wait_tx_queue_empty(mvm->trans,
- mvmsta->tfd_queue_msk);
+ iwl_trans_wait_tx_queues_empty(mvm->trans,
+ mvmsta->tfd_queue_msk);
iwl_mvm_drain_sta(mvm, mvmsta, false);
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
@@ -2675,7 +2905,7 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
* station ID, then use AP's station ID.
*/
if (vif->type == NL80211_IFTYPE_STATION &&
- mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
+ mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
u8 sta_id = mvmvif->ap_sta_id;
sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
@@ -2697,68 +2927,97 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta,
- struct ieee80211_key_conf *keyconf, bool mcast,
+ struct ieee80211_key_conf *key, bool mcast,
u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
u8 key_offset)
{
- struct iwl_mvm_add_sta_key_cmd cmd = {};
+ union {
+ struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
+ struct iwl_mvm_add_sta_key_cmd cmd;
+ } u = {};
__le16 key_flags;
int ret;
u32 status;
u16 keyidx;
- int i;
- u8 sta_id = mvm_sta->sta_id;
+ u64 pn = 0;
+ int i, size;
+ bool new_api = fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
- keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
+ keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
STA_KEY_FLG_KEYID_MSK;
key_flags = cpu_to_le16(keyidx);
key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
- switch (keyconf->cipher) {
+ switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
- cmd.tkip_rx_tsc_byte2 = tkip_iv32;
- for (i = 0; i < 5; i++)
- cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
- memcpy(cmd.key, keyconf->key, keyconf->keylen);
+ if (new_api) {
+ memcpy((void *)&u.cmd.tx_mic_key,
+ &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
+ IWL_MIC_KEY_SIZE);
+
+ memcpy((void *)&u.cmd.rx_mic_key,
+ &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
+ IWL_MIC_KEY_SIZE);
+ pn = atomic64_read(&key->tx_pn);
+
+ } else {
+ u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
+ for (i = 0; i < 5; i++)
+ u.cmd_v1.tkip_rx_ttak[i] =
+ cpu_to_le16(tkip_p1k[i]);
+ }
+ memcpy(u.cmd.common.key, key->key, key->keylen);
break;
case WLAN_CIPHER_SUITE_CCMP:
key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
- memcpy(cmd.key, keyconf->key, keyconf->keylen);
+ memcpy(u.cmd.common.key, key->key, key->keylen);
+ if (new_api)
+ pn = atomic64_read(&key->tx_pn);
break;
case WLAN_CIPHER_SUITE_WEP104:
key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
/* fall through */
case WLAN_CIPHER_SUITE_WEP40:
key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
- memcpy(cmd.key + 3, keyconf->key, keyconf->keylen);
+ memcpy(u.cmd.common.key + 3, key->key, key->keylen);
break;
case WLAN_CIPHER_SUITE_GCMP_256:
key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
/* fall through */
case WLAN_CIPHER_SUITE_GCMP:
key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
- memcpy(cmd.key, keyconf->key, keyconf->keylen);
+ memcpy(u.cmd.common.key, key->key, key->keylen);
+ if (new_api)
+ pn = atomic64_read(&key->tx_pn);
break;
default:
key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
- memcpy(cmd.key, keyconf->key, keyconf->keylen);
+ memcpy(u.cmd.common.key, key->key, key->keylen);
}
if (mcast)
key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
- cmd.key_offset = key_offset;
- cmd.key_flags = key_flags;
- cmd.sta_id = sta_id;
+ u.cmd.common.key_offset = key_offset;
+ u.cmd.common.key_flags = key_flags;
+ u.cmd.common.sta_id = mvm_sta->sta_id;
+
+ if (new_api) {
+ u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
+ size = sizeof(u.cmd);
+ } else {
+ size = sizeof(u.cmd_v1);
+ }
status = ADD_STA_SUCCESS;
if (cmd_flags & CMD_ASYNC)
- ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC,
- sizeof(cmd), &cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
+ &u.cmd);
else
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
- &cmd, &status);
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
+ &u.cmd, &status);
switch (status) {
case ADD_STA_SUCCESS:
@@ -2858,7 +3117,7 @@ static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
return sta->addr;
if (vif->type == NL80211_IFTYPE_STATION &&
- mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
+ mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
u8 sta_id = mvmvif->ap_sta_id;
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex));
@@ -2911,9 +3170,14 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
struct ieee80211_key_conf *keyconf,
bool mcast)
{
- struct iwl_mvm_add_sta_key_cmd cmd = {};
+ union {
+ struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
+ struct iwl_mvm_add_sta_key_cmd cmd;
+ } u = {};
+ bool new_api = fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
__le16 key_flags;
- int ret;
+ int ret, size;
u32 status;
key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
@@ -2924,13 +3188,19 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
if (mcast)
key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
- cmd.key_flags = key_flags;
- cmd.key_offset = keyconf->hw_key_idx;
- cmd.sta_id = sta_id;
+ /*
+ * The fields assigned here are in the same location at the start
+ * of the command, so we can do this union trick.
+ */
+ u.cmd.common.key_flags = key_flags;
+ u.cmd.common.key_offset = keyconf->hw_key_idx;
+ u.cmd.common.sta_id = sta_id;
+
+ size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
- &cmd, &status);
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
+ &status);
switch (status) {
case ADD_STA_SUCCESS:
@@ -3044,7 +3314,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
{
bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
struct iwl_mvm_sta *mvm_sta;
- u8 sta_id = IWL_MVM_STATION_COUNT;
+ u8 sta_id = IWL_MVM_INVALID_STA;
int ret, i;
lockdep_assert_held(&mvm->mutex);
@@ -3207,13 +3477,13 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
/* Note: this is ignored by firmware not supporting GO uAPSD */
if (more_data)
- cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA);
+ cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
mvmsta->next_status_eosp = true;
- cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL);
+ cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
} else {
- cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
+ cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
}
/* block the Tx queues until the FW updated the sleep Tx count */
@@ -3290,6 +3560,27 @@ void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
spin_unlock_bh(&mvm_sta->lock);
}
+static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ struct iwl_mvm_int_sta *sta,
+ bool disable)
+{
+ u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
+ struct iwl_mvm_add_sta_cmd cmd = {
+ .add_modify = STA_MODE_MODIFY,
+ .sta_id = sta->sta_id,
+ .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
+ .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
+ .mac_id_n_color = cpu_to_le32(id),
+ };
+ int ret;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
+ iwl_mvm_add_sta_cmd_size(mvm), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
+}
+
void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif,
bool disable)
@@ -3301,7 +3592,7 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
/* Block/unblock all the stations of the given mvmvif */
- for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta))
@@ -3314,6 +3605,22 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
}
+
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+ return;
+
+ /* Need to block/unblock also multicast station */
+ if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
+ iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
+ &mvmvif->mcast_sta, disable);
+
+ /*
+ * Only unblock the broadcast station (FW blocks it for immediate
+ * quiet, not the driver)
+ */
+ if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
+ iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
+ &mvmvif->bcast_sta, disable);
}
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 1927ce607798..2716cb5483bf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -380,6 +380,7 @@ struct iwl_mvm_rxq_dup_data {
* @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
* tid.
* @max_agg_bufsize: the maximal size of the AGG buffer for this station
+ * @sta_type: station type
* @bt_reduced_txpower: is reduced tx power enabled for this station
* @next_status_eosp: the next reclaimed packet is a PS-Poll response and
* we need to signal the EOSP
@@ -416,6 +417,7 @@ struct iwl_mvm_sta {
u32 mac_id_n_color;
u16 tid_disable_agg;
u8 max_agg_bufsize;
+ enum iwl_sta_type sta_type;
bool bt_reduced_txpower;
bool next_status_eosp;
spinlock_t lock;
@@ -453,10 +455,12 @@ iwl_mvm_sta_from_mac80211(struct ieee80211_sta *sta)
* struct iwl_mvm_int_sta - representation of an internal station (auxiliary or
* broadcast)
* @sta_id: the index of the station in the fw (will be replaced by id_n_color)
+ * @type: station type
* @tfd_queue_msk: the tfd queues used by the station
*/
struct iwl_mvm_int_sta {
u32 sta_id;
+ enum iwl_sta_type type;
u32 tfd_queue_msk;
};
@@ -532,10 +536,14 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
struct iwl_mvm_int_sta *sta,
- u32 qmask, enum nl80211_iftype iftype);
+ u32 qmask, enum nl80211_iftype iftype,
+ enum iwl_sta_type type);
void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta);
int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
index 9f160fc58cd0..df7cd87199ea 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,6 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -78,7 +80,7 @@ void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex);
- for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (!sta || IS_ERR(sta) || !sta->tdls)
@@ -101,7 +103,7 @@ int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
- for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (!sta || IS_ERR(sta) || !sta->tdls)
@@ -145,7 +147,7 @@ static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
/* populate TDLS peer data */
cnt = 0;
- for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta) || !sta->tdls)
@@ -251,7 +253,7 @@ static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
if (state == IWL_MVM_TDLS_SW_IDLE)
- mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT;
+ mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA;
}
void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
@@ -305,7 +307,7 @@ iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
/* get the existing peer if it's there */
if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
- mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) {
+ mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
struct ieee80211_sta *sta = rcu_dereference_protected(
mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
lockdep_is_held(&mvm->mutex));
@@ -523,7 +525,7 @@ void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
/* station might be gone, in that case do nothing */
- if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT)
+ if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA)
goto out;
sta = rcu_dereference_protected(
@@ -573,7 +575,7 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
sta->addr, chandef->chan->center_freq, chandef->width);
/* we only support a single peer for channel switching */
- if (mvm->tdls_cs.peer.sta_id != IWL_MVM_STATION_COUNT) {
+ if (mvm->tdls_cs.peer.sta_id != IWL_MVM_INVALID_STA) {
IWL_DEBUG_TDLS(mvm,
"Existing peer. Can't start switch with %pM\n",
sta->addr);
@@ -633,7 +635,7 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
/* we only support a single peer for channel switching */
- if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT) {
+ if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) {
IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
goto out;
}
@@ -654,7 +656,7 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE)
wait_for_phy = true;
- mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
+ mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
dev_kfree_skb(mvm->tdls_cs.peer.skb);
mvm->tdls_cs.peer.skb = NULL;
@@ -697,7 +699,7 @@ iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE &&
params->status != 0 &&
mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
- mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) {
+ mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
struct ieee80211_sta *cur_sta;
/* make sure it's the same peer */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c
index a1947d6f3a2c..16ce8a56b5b9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c
@@ -80,7 +80,7 @@ void iwl_mvm_tof_init(struct iwl_mvm *mvm)
if (IWL_MVM_TOF_IS_RESPONDER) {
tof_data->responder_cfg.sub_grp_cmd_id =
cpu_to_le32(TOF_RESPONDER_CONFIG_CMD);
- tof_data->responder_cfg.sta_id = IWL_MVM_STATION_COUNT;
+ tof_data->responder_cfg.sta_id = IWL_MVM_INVALID_STA;
}
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index bec7d9c46087..f9cbd197246f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -356,7 +356,7 @@ static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
struct iwl_mvm_sta *mvmsta;
int i, err;
- for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, i);
if (!mvmsta)
continue;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 1ba0a6f55503..bcaceb64a6e8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -475,6 +475,39 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
memset(dev_cmd, 0, sizeof(*dev_cmd));
dev_cmd->hdr.cmd = TX_CMD;
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
+ u16 offload_assist = iwl_mvm_tx_csum(mvm, skb, hdr, info);
+
+ /* padding is inserted later in transport */
+ /* FIXME - check for AMSDU may need to be removed */
+ if (ieee80211_hdrlen(hdr->frame_control) % 4 &&
+ !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
+ offload_assist |= BIT(TX_CMD_OFFLD_PAD);
+
+ cmd->offload_assist |= cpu_to_le16(offload_assist);
+
+ /* Total # bytes to be transmitted */
+ cmd->len = cpu_to_le16((u16)skb->len);
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(cmd->hdr, hdr, hdrlen);
+
+ if (!info->control.hw_key)
+ cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_ENCRYPT_DIS);
+
+ /* For data packets rate info comes from the fw */
+ if (ieee80211_is_data(hdr->frame_control) && sta)
+ goto out;
+
+ cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_CMD_RATE);
+ cmd->rate_n_flags =
+ cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta));
+
+ goto out;
+ }
+
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
if (info->control.hw_key)
@@ -484,6 +517,10 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, hdrlen);
+
+out:
return dev_cmd;
}
@@ -514,21 +551,21 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
*/
if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) ||
ieee80211_is_deauth(fc))
- return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
+ return mvm->probe_queue;
if (info->hw_queue == info->control.vif->cab_queue)
return info->hw_queue;
WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
"fc=0x%02x", le16_to_cpu(fc));
- return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
+ return mvm->probe_queue;
case NL80211_IFTYPE_P2P_DEVICE:
if (ieee80211_is_mgmt(fc))
- return IWL_MVM_DQA_P2P_DEVICE_QUEUE;
+ return mvm->p2p_dev_queue;
if (info->hw_queue == info->control.vif->cab_queue)
return info->hw_queue;
WARN_ON_ONCE(1);
- return IWL_MVM_DQA_P2P_DEVICE_QUEUE;
+ return mvm->p2p_dev_queue;
default:
WARN_ONCE(1, "Not a ctrl vif, no available queue\n");
return -1;
@@ -541,7 +578,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_info info;
struct iwl_device_cmd *dev_cmd;
- struct iwl_tx_cmd *tx_cmd;
u8 sta_id;
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
int queue;
@@ -594,11 +630,13 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
if (queue < 0)
return -1;
+ if (queue == info.control.vif->cab_queue)
+ queue = mvmvif->cab_queue;
} else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
is_multicast_ether_addr(hdr->addr1)) {
u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
- if (ap_sta_id != IWL_MVM_STATION_COUNT)
+ if (ap_sta_id != IWL_MVM_INVALID_STA)
sta_id = ap_sta_id;
} else if (iwl_mvm_is_dqa_supported(mvm) &&
info.control.vif->type == NL80211_IFTYPE_STATION &&
@@ -616,11 +654,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
/* From now on, we cannot access info->control */
iwl_mvm_skb_prepare_status(skb, dev_cmd);
- tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
-
- /* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, hdrlen);
-
if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) {
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
return -1;
@@ -713,7 +746,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
* fifo to be able to send bursts.
*/
max_amsdu_len = min_t(unsigned int, max_amsdu_len,
- mvm->shared_mem_cfg.txfifo_size[txf] - 256);
+ mvm->smem_cfg.lmac[0].txfifo_size[txf] - 256);
if (unlikely(dbg_max_amsdu_len))
max_amsdu_len = min_t(unsigned int, max_amsdu_len,
@@ -862,6 +895,9 @@ static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
unsigned long now = jiffies;
int tid;
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return false;
+
for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) {
if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] +
IWL_MVM_DQA_QUEUE_TIMEOUT, now))
@@ -881,11 +917,10 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct iwl_mvm_sta *mvmsta;
struct iwl_device_cmd *dev_cmd;
- struct iwl_tx_cmd *tx_cmd;
__le16 fc;
u16 seq_number = 0;
u8 tid = IWL_MAX_TID_COUNT;
- u8 txq_id = info->hw_queue;
+ u16 txq_id = info->hw_queue;
bool is_ampdu = false;
int hdrlen;
@@ -896,7 +931,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
if (WARN_ON_ONCE(!mvmsta))
return -1;
- if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
+ if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
return -1;
dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
@@ -904,8 +939,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
if (!dev_cmd)
goto drop;
- tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
-
/*
* we handle that entirely ourselves -- for uAPSD the firmware
* will always send a notification, and for PS-Poll responses
@@ -926,18 +959,27 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
goto drop_unlock_sta;
- seq_number = mvmsta->tid_data[tid].seq_number;
- seq_number &= IEEE80211_SCTL_SEQ;
- hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
- hdr->seq_ctrl |= cpu_to_le16(seq_number);
is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
if (WARN_ON_ONCE(is_ampdu &&
mvmsta->tid_data[tid].state != IWL_AGG_ON))
goto drop_unlock_sta;
+
+ seq_number = mvmsta->tid_data[tid].seq_number;
+ seq_number &= IEEE80211_SCTL_SEQ;
+
+ if (!iwl_mvm_has_new_tx_api(mvm)) {
+ struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
+
+ hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(seq_number);
+ /* update the tx_cmd hdr as it was already copied */
+ tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl;
+ }
}
if (iwl_mvm_is_dqa_supported(mvm) || is_ampdu)
txq_id = mvmsta->tid_data[tid].txq_id;
+
if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) {
/* default to TID 0 for non-QoS packets */
u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid;
@@ -945,17 +987,14 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]];
}
- /* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, hdrlen);
-
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
/* Check if TXQ needs to be allocated or re-activated */
- if (unlikely(txq_id == IEEE80211_INVAL_HW_QUEUE ||
+ if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE ||
!mvmsta->tid_data[tid].is_tid_active) &&
iwl_mvm_is_dqa_supported(mvm)) {
/* If TXQ needs to be allocated... */
- if (txq_id == IEEE80211_INVAL_HW_QUEUE) {
+ if (txq_id == IWL_MVM_INVALID_QUEUE) {
iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
/*
@@ -967,6 +1006,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
return 0;
}
+ /* queue should always be active in new TX path */
+ WARN_ON(iwl_mvm_has_new_tx_api(mvm));
+
/* If we are here - TXQ exists and needs to be re-activated */
spin_lock(&mvm->queue_info_lock);
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
@@ -977,7 +1019,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
txq_id);
}
- if (iwl_mvm_is_dqa_supported(mvm)) {
+ if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
/* Keep track of the time of the last frame for this RA/TID */
mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
@@ -1036,7 +1078,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
if (WARN_ON_ONCE(!mvmsta))
return -1;
- if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
+ if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
return -1;
memcpy(&info, skb->cb, sizeof(info));
@@ -1245,6 +1287,26 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
}
}
+/**
+ * iwl_mvm_get_scd_ssn - returns the SSN of the SCD
+ * @tx_resp: the Tx response from the fw (agg or non-agg)
+ *
+ * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
+ * it can't know that everything will go well until the end of the AMPDU, it
+ * can't know in advance the number of MPDUs that will be sent in the current
+ * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
+ * Hence, it can't know in advance what the SSN of the SCD will be at the end
+ * of the batch. This is why the SSN of the SCD is written at the end of the
+ * whole struct at a variable offset. This function knows how to cope with the
+ * variable offset and returns the SSN of the SCD.
+ */
+static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm,
+ struct iwl_mvm_tx_resp *tx_resp)
+{
+ return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
+ tx_resp->frame_count) & 0xfff;
+}
+
static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
@@ -1254,8 +1316,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
- u32 status = le16_to_cpu(tx_resp->status.status);
- u16 ssn = iwl_mvm_get_scd_ssn(tx_resp);
+ struct agg_tx_status *agg_status =
+ iwl_mvm_get_agg_status(mvm, tx_resp);
+ u32 status = le16_to_cpu(agg_status->status);
+ u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp);
struct iwl_mvm_sta *mvmsta;
struct sk_buff_head skbs;
u8 skb_freed = 0;
@@ -1264,6 +1328,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
__skb_queue_head_init(&skbs);
+ if (iwl_mvm_has_new_tx_api(mvm))
+ txq_id = le16_to_cpu(tx_resp->v6.tx_queue);
+
seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
/* we can free until ssn % q.n_bd not inclusive */
@@ -1388,7 +1455,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
if (!IS_ERR(sta)) {
mvmsta = iwl_mvm_sta_from_mac80211(sta);
- if (tid != IWL_TID_NON_QOS) {
+ if (tid != IWL_TID_NON_QOS && tid != IWL_MGMT_TID) {
struct iwl_mvm_tid_data *tid_data =
&mvmsta->tid_data[tid];
bool send_eosp_ndp = false;
@@ -1520,7 +1587,8 @@ static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
- struct agg_tx_status *frame_status = &tx_resp->status;
+ struct agg_tx_status *frame_status =
+ iwl_mvm_get_agg_status(mvm, tx_resp);
int i;
for (i = 0; i < tx_resp->frame_count; i++) {
@@ -1722,6 +1790,9 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
ba_info.status.status_driver_data[0] =
(void *)(uintptr_t)ba_res->reduced_txp;
+ if (!le16_to_cpu(ba_res->tfd_cnt))
+ goto out;
+
/*
* TODO:
* When supporting multi TID aggregations - we need to move
@@ -1730,12 +1801,16 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
* This will go together with SN and AddBA offload and cannot
* be handled properly for now.
*/
- WARN_ON(le16_to_cpu(ba_res->tfd_cnt) != 1);
- iwl_mvm_tx_reclaim(mvm, sta_id, ba_res->ra_tid[0].tid,
- (int)ba_res->tfd[0].q_num,
+ WARN_ON(le16_to_cpu(ba_res->ra_tid_cnt) != 1);
+ tid = ba_res->ra_tid[0].tid;
+ if (tid == IWL_MGMT_TID)
+ tid = IWL_MAX_TID_COUNT;
+ iwl_mvm_tx_reclaim(mvm, sta_id, tid,
+ (int)(le16_to_cpu(ba_res->tfd[0].q_num)),
le16_to_cpu(ba_res->tfd[0].tfd_index),
&ba_info, le32_to_cpu(ba_res->tx_rate));
+out:
IWL_DEBUG_TX_REPLY(mvm,
"BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
sta_id, le32_to_cpu(ba_res->flags),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index dedea96a8e0f..8f4f176e204e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright (C) 2015 Intel Deutschland GmbH
+ * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,6 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -591,6 +592,10 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq)
lockdep_assert_held(&mvm->queue_info_lock);
+ /* This should not be hit with new TX path */
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -ENOSPC;
+
/* Start by looking for a free queue */
for (i = minq; i <= maxq; i++)
if (mvm->queue_info[i].hw_queue_refcount == 0 &&
@@ -627,6 +632,9 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
};
int ret;
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
spin_lock_bh(&mvm->queue_info_lock);
if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0,
"Trying to reconfig unallocated queue %d\n", queue)) {
@@ -644,50 +652,94 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
return ret;
}
-void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
- u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
- unsigned int wdg_timeout)
+static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
+ int mac80211_queue, u8 sta_id, u8 tid)
{
bool enable_queue = true;
spin_lock_bh(&mvm->queue_info_lock);
/* Make sure this TID isn't already enabled */
- if (mvm->queue_info[queue].tid_bitmap & BIT(cfg->tid)) {
+ if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
spin_unlock_bh(&mvm->queue_info_lock);
IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
- queue, cfg->tid);
- return;
+ queue, tid);
+ return false;
}
/* Update mappings and refcounts */
if (mvm->queue_info[queue].hw_queue_refcount > 0)
enable_queue = false;
- mvm->queue_info[queue].hw_queue_to_mac80211 |= BIT(mac80211_queue);
+ mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
+
mvm->queue_info[queue].hw_queue_refcount++;
- mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid);
- mvm->queue_info[queue].ra_sta_id = cfg->sta_id;
+ mvm->queue_info[queue].tid_bitmap |= BIT(tid);
+ mvm->queue_info[queue].ra_sta_id = sta_id;
if (enable_queue) {
- if (cfg->tid != IWL_MAX_TID_COUNT)
+ if (tid != IWL_MAX_TID_COUNT)
mvm->queue_info[queue].mac80211_ac =
- tid_to_mac80211_ac[cfg->tid];
+ tid_to_mac80211_ac[tid];
else
mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
- mvm->queue_info[queue].txq_tid = cfg->tid;
+ mvm->queue_info[queue].txq_tid = tid;
}
IWL_DEBUG_TX_QUEUES(mvm,
"Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
queue, mvm->queue_info[queue].hw_queue_refcount,
- mvm->queue_info[queue].hw_queue_to_mac80211);
+ mvm->hw_queue_to_mac80211[queue]);
spin_unlock_bh(&mvm->queue_info_lock);
+ return enable_queue;
+}
+
+int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
+ u8 sta_id, u8 tid, unsigned int timeout)
+{
+ struct iwl_tx_queue_cfg_cmd cmd = {
+ .flags = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
+ .sta_id = sta_id,
+ .tid = tid,
+ };
+ int queue;
+
+ if (cmd.tid == IWL_MAX_TID_COUNT)
+ cmd.tid = IWL_MGMT_TID;
+ queue = iwl_trans_txq_alloc(mvm->trans, (void *)&cmd,
+ SCD_QUEUE_CFG, timeout);
+
+ if (queue < 0) {
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
+ sta_id, tid, queue);
+ return queue;
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
+ queue, sta_id, tid);
+
+ mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Enabling TXQ #%d (mac80211 map:0x%x)\n",
+ queue, mvm->hw_queue_to_mac80211[queue]);
+
+ return queue;
+}
+
+void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+ u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
+ unsigned int wdg_timeout)
+{
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return;
+
/* Send the enabling command if we need to */
- if (enable_queue) {
+ if (iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
+ cfg->sta_id, cfg->tid)) {
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
.action = SCD_CFG_ENABLE_QUEUE,
@@ -701,7 +753,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL,
wdg_timeout);
- WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
+ WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
+ sizeof(struct iwl_scd_txq_cfg_cmd),
&cmd),
"Failed to configure queue %d on FIFO %d\n", queue,
cfg->fifo);
@@ -718,6 +771,16 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
bool remove_mac_queue = true;
int ret;
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ spin_lock_bh(&mvm->queue_info_lock);
+ mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac80211_queue);
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ iwl_trans_txq_free(mvm->trans, queue);
+
+ return 0;
+ }
+
spin_lock_bh(&mvm->queue_info_lock);
if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) {
@@ -744,7 +807,7 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
}
if (remove_mac_queue)
- mvm->queue_info[queue].hw_queue_to_mac80211 &=
+ mvm->hw_queue_to_mac80211[queue] &=
~BIT(mac80211_queue);
mvm->queue_info[queue].hw_queue_refcount--;
@@ -757,7 +820,7 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
"Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
queue,
mvm->queue_info[queue].hw_queue_refcount,
- mvm->queue_info[queue].hw_queue_to_mac80211);
+ mvm->hw_queue_to_mac80211[queue]);
/* If the queue is still enabled - nothing left to do in this func */
if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
@@ -771,16 +834,16 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
/* Make sure queue info is correct even though we overwrite it */
WARN(mvm->queue_info[queue].hw_queue_refcount ||
mvm->queue_info[queue].tid_bitmap ||
- mvm->queue_info[queue].hw_queue_to_mac80211,
+ mvm->hw_queue_to_mac80211[queue],
"TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n",
queue, mvm->queue_info[queue].hw_queue_refcount,
- mvm->queue_info[queue].hw_queue_to_mac80211,
+ mvm->hw_queue_to_mac80211[queue],
mvm->queue_info[queue].tid_bitmap);
/* If we are here - the queue is freed and we can zero out these vals */
mvm->queue_info[queue].hw_queue_refcount = 0;
mvm->queue_info[queue].tid_bitmap = 0;
- mvm->queue_info[queue].hw_queue_to_mac80211 = 0;
+ mvm->hw_queue_to_mac80211[queue] = 0;
/* Regardless if this is a reserved TXQ for a STA - mark it as false */
mvm->queue_info[queue].reserved = false;
@@ -789,11 +852,11 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
iwl_trans_txq_disable(mvm->trans, queue, false);
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
- sizeof(cmd), &cmd);
+ sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
+
if (ret)
IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
queue, ret);
-
return ret;
}
@@ -816,7 +879,7 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
.data = { lq, },
};
- if (WARN_ON(lq->sta_id == IWL_MVM_STATION_COUNT))
+ if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA))
return -EINVAL;
return iwl_mvm_send_cmd(mvm, &cmd);
@@ -1006,6 +1069,35 @@ struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
return bss_iter_data.vif;
}
+struct iwl_sta_iter_data {
+ bool assoc;
+};
+
+static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_sta_iter_data *data = _data;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (vif->bss_conf.assoc)
+ data->assoc = true;
+}
+
+bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm)
+{
+ struct iwl_sta_iter_data data = {
+ .assoc = false,
+ };
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_sta_iface_iterator,
+ &data);
+ return data.assoc;
+}
+
unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool tdls, bool cmd_q)
@@ -1088,6 +1180,9 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
lockdep_assert_held(&mvmsta->lock);
lockdep_assert_held(&mvm->queue_info_lock);
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return;
+
/* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
/* If some TFDs are still queued - don't mark TID as inactive */
@@ -1114,8 +1209,8 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
- mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE;
- mvm->queue_info[queue].hw_queue_to_mac80211 &= ~BIT(mac_queue);
+ mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
+ mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
mvm->queue_info[queue].hw_queue_refcount--;
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
mvmsta->tid_data[tid].is_tid_active = false;
@@ -1135,7 +1230,7 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
*/
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
- mvm->queue_info[queue].hw_queue_to_mac80211 |=
+ mvm->hw_queue_to_mac80211[queue] |=
BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
}
@@ -1154,6 +1249,9 @@ void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
unsigned long now = jiffies;
int i;
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return;
+
spin_lock_bh(&mvm->queue_info_lock);
for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
if (mvm->queue_info[i].hw_queue_refcount > 0)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
new file mode 100644
index 000000000000..b1f43397bb59
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -0,0 +1,281 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include "iwl-trans.h"
+#include "iwl-fh.h"
+#include "iwl-context-info.h"
+#include "internal.h"
+#include "iwl-prph.h"
+
+static int iwl_pcie_get_num_sections(const struct fw_img *fw,
+ int start)
+{
+ int i = 0;
+
+ while (start < fw->num_sec &&
+ fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
+ fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
+ start++;
+ i++;
+ }
+
+ return i;
+}
+
+static int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
+ const struct fw_desc *sec,
+ struct iwl_dram_data *dram)
+{
+ dram->block = dma_alloc_coherent(trans->dev, sec->len,
+ &dram->physical,
+ GFP_KERNEL);
+ if (!dram->block)
+ return -ENOMEM;
+
+ dram->size = sec->len;
+ memcpy(dram->block, sec->data, sec->len);
+
+ return 0;
+}
+
+static void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
+ int i;
+
+ if (!dram->fw) {
+ WARN_ON(dram->fw_cnt);
+ return;
+ }
+
+ for (i = 0; i < dram->fw_cnt; i++)
+ dma_free_coherent(trans->dev, dram->fw[i].size,
+ dram->fw[i].block, dram->fw[i].physical);
+
+ kfree(dram->fw);
+ dram->fw_cnt = 0;
+}
+
+void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
+ int i;
+
+ if (!dram->paging) {
+ WARN_ON(dram->paging_cnt);
+ return;
+ }
+
+ /* free paging*/
+ for (i = 0; i < dram->paging_cnt; i++)
+ dma_free_coherent(trans->dev, dram->paging[i].size,
+ dram->paging[i].block,
+ dram->paging[i].physical);
+
+ kfree(dram->paging);
+ dram->paging_cnt = 0;
+}
+
+static int iwl_pcie_ctxt_info_init_fw_sec(struct iwl_trans *trans,
+ const struct fw_img *fw,
+ struct iwl_context_info *ctxt_info)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
+ struct iwl_context_info_dram *ctxt_dram = &ctxt_info->dram;
+ int i, ret, lmac_cnt, umac_cnt, paging_cnt;
+
+ lmac_cnt = iwl_pcie_get_num_sections(fw, 0);
+ /* add 1 due to separator */
+ umac_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + 1);
+ /* add 2 due to separators */
+ paging_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + umac_cnt + 2);
+
+ dram->fw = kcalloc(umac_cnt + lmac_cnt, sizeof(*dram->fw), GFP_KERNEL);
+ if (!dram->fw)
+ return -ENOMEM;
+ dram->paging = kcalloc(paging_cnt, sizeof(*dram->paging), GFP_KERNEL);
+ if (!dram->paging)
+ return -ENOMEM;
+
+ /* initialize lmac sections */
+ for (i = 0; i < lmac_cnt; i++) {
+ ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[i],
+ &dram->fw[dram->fw_cnt]);
+ if (ret)
+ return ret;
+ ctxt_dram->lmac_img[i] =
+ cpu_to_le64(dram->fw[dram->fw_cnt].physical);
+ dram->fw_cnt++;
+ }
+
+ /* initialize umac sections */
+ for (i = 0; i < umac_cnt; i++) {
+ /* access FW with +1 to make up for lmac separator */
+ ret = iwl_pcie_ctxt_info_alloc_dma(trans,
+ &fw->sec[dram->fw_cnt + 1],
+ &dram->fw[dram->fw_cnt]);
+ if (ret)
+ return ret;
+ ctxt_dram->umac_img[i] =
+ cpu_to_le64(dram->fw[dram->fw_cnt].physical);
+ dram->fw_cnt++;
+ }
+
+ /*
+ * Initialize paging.
+ * Paging memory isn't stored in dram->fw as the umac and lmac - it is
+ * stored separately.
+ * This is since the timing of its release is different -
+ * while fw memory can be released on alive, the paging memory can be
+ * freed only when the device goes down.
+ * Given that, the logic here in accessing the fw image is a bit
+ * different - fw_cnt isn't changing so loop counter is added to it.
+ */
+ for (i = 0; i < paging_cnt; i++) {
+ /* access FW with +2 to make up for lmac & umac separators */
+ int fw_idx = dram->fw_cnt + i + 2;
+
+ ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[fw_idx],
+ &dram->paging[i]);
+ if (ret)
+ return ret;
+
+ ctxt_dram->virtual_img[i] =
+ cpu_to_le64(dram->paging[i].physical);
+ dram->paging_cnt++;
+ }
+
+ return 0;
+}
+
+int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
+ const struct fw_img *fw)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_context_info *ctxt_info;
+ struct iwl_context_info_rbd_cfg *rx_cfg;
+ u32 control_flags = 0;
+ int ret;
+
+ ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info),
+ &trans_pcie->ctxt_info_dma_addr,
+ GFP_KERNEL);
+ if (!ctxt_info)
+ return -ENOMEM;
+
+ ctxt_info->version.version = 0;
+ ctxt_info->version.mac_id =
+ cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
+ /* size is in DWs */
+ ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4);
+
+ BUILD_BUG_ON(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) > 0xF);
+ control_flags = IWL_CTXT_INFO_RB_SIZE_4K |
+ IWL_CTXT_INFO_TFD_FORMAT_LONG |
+ RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) <<
+ IWL_CTXT_INFO_RB_CB_SIZE_POS;
+ ctxt_info->control.control_flags = cpu_to_le32(control_flags);
+
+ /* initialize RX default queue */
+ rx_cfg = &ctxt_info->rbd_cfg;
+ rx_cfg->free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma);
+ rx_cfg->used_rbd_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma);
+ rx_cfg->status_wr_ptr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
+
+ /* initialize TX command queue */
+ ctxt_info->hcmd_cfg.cmd_queue_addr =
+ cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
+ ctxt_info->hcmd_cfg.cmd_queue_size =
+ TFD_QUEUE_CB_SIZE(TFD_QUEUE_SIZE_MAX);
+
+ /* allocate ucode sections in dram and set addresses */
+ ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info);
+ if (ret) {
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
+ ctxt_info, trans_pcie->ctxt_info_dma_addr);
+ return ret;
+ }
+
+ trans_pcie->ctxt_info = ctxt_info;
+
+ iwl_enable_interrupts(trans);
+
+ /* Configure debug, if exists */
+ if (trans->dbg_dest_tlv)
+ iwl_pcie_apply_destination(trans);
+
+ /* kick FW self load */
+ iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr);
+ iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
+
+ /* Context info will be released upon alive or failure to get one */
+
+ return 0;
+}
+
+void iwl_pcie_ctxt_info_free(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (!trans_pcie->ctxt_info)
+ return;
+
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
+ trans_pcie->ctxt_info,
+ trans_pcie->ctxt_info_dma_addr);
+ trans_pcie->ctxt_info_dma_addr = 0;
+ trans_pcie->ctxt_info = NULL;
+
+ iwl_pcie_ctxt_info_free_fw_img(trans);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index ba8a81cb0e2b..e51760e752d4 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -501,6 +501,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x1014, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x3E02, iwl8275_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x3E01, iwl8275_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x1012, iwl8275_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)},
/* 9000 Series */
@@ -533,7 +537,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)},
/* a000 Series */
- {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)},
+ {IWL_PCI_DEVICE(0x2722, 0x0A10, iwla000_2ac_cfg_hr)},
#endif /* CONFIG_IWLMVM */
{0}
@@ -667,18 +672,11 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_trans->cfg = cfg_7265d;
}
- if (iwl_trans->cfg->rf_id) {
- if (cfg == &iwl9460_2ac_cfg &&
- iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_LC) {
- cfg = &iwl9000lc_2ac_cfg;
- iwl_trans->cfg = cfg;
- }
-
- if (cfg == &iwla000_2ac_cfg_hr &&
- iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_JF) {
- cfg = &iwla000_2ac_cfg_jf;
- iwl_trans->cfg = cfg;
- }
+ if (iwl_trans->cfg->rf_id &&
+ (cfg == &iwla000_2ac_cfg_hr || cfg == &iwla000_2ac_cfg_hr_cdb) &&
+ iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_JF) {
+ cfg = &iwla000_2ac_cfg_jf;
+ iwl_trans->cfg = cfg;
}
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 10937309641a..fd4faaaa1484 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -2,7 +2,7 @@
*
* Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -205,11 +205,11 @@ struct iwl_cmd_meta {
* into the buffer regardless of whether it should be mapped or not.
* This indicates how big the first TB must be to include the scratch buffer
* and the assigned PN.
- * Since PN location is 16 bytes at offset 24, it's 40 now.
+ * Since PN location is 8 bytes at offset 12, it's 20 now.
* If we make it bigger then allocations will be bigger and copy slower, so
* that's probably not useful.
*/
-#define IWL_FIRST_TB_SIZE 40
+#define IWL_FIRST_TB_SIZE 20
#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
struct iwl_pcie_txq_entry {
@@ -237,11 +237,11 @@ struct iwl_pcie_first_tb_buf {
* @stuck_timer: timer that fires if queue gets stuck
* @trans_pcie: pointer back to transport (for timer)
* @need_update: indicates need to update read/write index
- * @active: stores if queue is active
* @ampdu: true if this queue is an ampdu queue for an specific RA/TID
* @wd_timeout: queue watchdog timeout (jiffies) - per queue
* @frozen: tx stuck queue timer is frozen
* @frozen_expiry_remainder: remember how long until the timer fires
+ * @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
* @write_ptr: 1-st empty entry (index) host_w
* @read_ptr: last used entry (index) host_r
* @dma_addr: physical addr for BD's
@@ -277,11 +277,11 @@ struct iwl_txq {
struct iwl_trans_pcie *trans_pcie;
bool need_update;
bool frozen;
- u8 active;
bool ampdu;
int block;
unsigned long wd_timeout;
struct sk_buff_head overflow_q;
+ struct iwl_dma_ptr bc_tbl;
int write_ptr;
int read_ptr;
@@ -315,11 +315,43 @@ enum iwl_shared_irq_flags {
};
/**
+ * struct iwl_dram_data
+ * @physical: page phy pointer
+ * @block: pointer to the allocated block/page
+ * @size: size of the block/page
+ */
+struct iwl_dram_data {
+ dma_addr_t physical;
+ void *block;
+ int size;
+};
+
+/**
+ * struct iwl_self_init_dram - dram data used by self init process
+ * @fw: lmac and umac dram data
+ * @fw_cnt: total number of items in array
+ * @paging: paging dram data
+ * @paging_cnt: total number of items in array
+ */
+struct iwl_self_init_dram {
+ struct iwl_dram_data *fw;
+ int fw_cnt;
+ struct iwl_dram_data *paging;
+ int paging_cnt;
+};
+
+/**
* struct iwl_trans_pcie - PCIe transport specific data
* @rxq: all the RX queue data
* @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
* @global_table: table mapping received VID from hw to rxb
* @rba: allocator for RX replenishing
+ * @ctxt_info: context information for FW self init
+ * @ctxt_info_dma_addr: dma addr of context information
+ * @init_dram: DRAM data of firmware image (including paging).
+ * Context information addresses will be taken from here.
+ * This is driver's local copy for keeping track of size and
+ * count for allocating and freeing the memory.
* @trans: pointer to the generic transport area
* @scd_base_addr: scheduler sram base address in SRAM
* @scd_bc_tbls: pointer to the byte count table of the scheduler
@@ -357,6 +389,9 @@ struct iwl_trans_pcie {
struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
struct iwl_rb_allocator rba;
+ struct iwl_context_info *ctxt_info;
+ dma_addr_t ctxt_info_dma_addr;
+ struct iwl_self_init_dram init_dram;
struct iwl_trans *trans;
struct net_device napi_dev;
@@ -378,9 +413,10 @@ struct iwl_trans_pcie {
struct iwl_dma_ptr scd_bc_tbls;
struct iwl_dma_ptr kw;
- struct iwl_txq *txq;
- unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
- unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
+ struct iwl_txq *txq_memory;
+ struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
+ unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
+ unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
/* PCI bus related data */
struct pci_dev *pci_dev;
@@ -454,6 +490,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
* RX
******************************************************/
int iwl_pcie_rx_init(struct iwl_trans *trans);
+int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
@@ -474,6 +511,7 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans);
* TX / HCMD
******************************************************/
int iwl_pcie_tx_init(struct iwl_trans *trans);
+int iwl_pcie_gen2_tx_init(struct iwl_trans *trans);
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
int iwl_pcie_tx_stop(struct iwl_trans *trans);
void iwl_pcie_tx_free(struct iwl_trans *trans);
@@ -484,7 +522,6 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
bool configure_scd);
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
bool shared_mode);
-dma_addr_t iwl_trans_pcie_get_txq_byte_table(struct iwl_trans *trans, int txq);
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
struct iwl_txq *txq);
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
@@ -616,6 +653,12 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
}
}
+static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie,
+ struct iwl_txq *txq, int idx)
+{
+ return txq->tfds + trans_pcie->tfd_size * idx;
+}
+
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -719,4 +762,41 @@ int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable);
+/* common functions that are used by gen2 transport */
+void iwl_pcie_apm_config(struct iwl_trans *trans);
+int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
+void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
+bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans);
+void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
+int iwl_queue_space(const struct iwl_txq *q);
+int iwl_pcie_apm_stop_master(struct iwl_trans *trans);
+void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
+int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
+ int slots_num, bool cmd_queue);
+int iwl_pcie_txq_alloc(struct iwl_trans *trans,
+ struct iwl_txq *txq, int slots_num, bool cmd_queue);
+int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
+ struct iwl_dma_ptr *ptr, size_t size);
+void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
+void iwl_pcie_apply_destination(struct iwl_trans *trans);
+
+/* transport gen 2 exported functions */
+int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
+ const struct fw_img *fw, bool run_in_rfkill);
+void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
+int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
+ struct iwl_tx_queue_cfg_cmd *cmd,
+ int cmd_id,
+ unsigned int timeout);
+void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
+int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_cmd *dev_cmd, int txq_id);
+int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd);
+void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans,
+ bool low_power);
+void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power);
+void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id);
+void iwl_pcie_gen2_tx_free(struct iwl_trans *trans);
+void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans);
#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index de94dfdf2ec9..1da2de205cdf 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -2,7 +2,7 @@
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -880,7 +880,7 @@ static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
return 0;
}
-int iwl_pcie_rx_init(struct iwl_trans *trans)
+static int _iwl_pcie_rx_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *def_rxq;
@@ -958,20 +958,40 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
+ return 0;
+}
+
+int iwl_pcie_rx_init(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret = _iwl_pcie_rx_init(trans);
+
+ if (ret)
+ return ret;
+
if (trans->cfg->mq_rx_supported)
iwl_pcie_rx_mq_hw_init(trans);
else
- iwl_pcie_rx_hw_init(trans, def_rxq);
+ iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
- iwl_pcie_rxq_restock(trans, def_rxq);
+ iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
- spin_lock(&def_rxq->lock);
- iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq);
- spin_unlock(&def_rxq->lock);
+ spin_lock(&trans_pcie->rxq->lock);
+ iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
+ spin_unlock(&trans_pcie->rxq->lock);
return 0;
}
+int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
+{
+ /*
+ * We don't configure the RFH.
+ * Restock will be done at alive, after firmware configured the RFH.
+ */
+ return _iwl_pcie_rx_init(trans);
+}
+
void iwl_pcie_rx_free(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1074,7 +1094,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
bool emergency)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
bool page_stolen = false;
int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
u32 offset = 0;
@@ -1127,7 +1147,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
* Ucode should set SEQ_RX_FRAME bit if ucode-originated,
* but apparently a few don't get set; catch them here. */
reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
- if (reclaim) {
+ if (reclaim && !pkt->hdr.group_id) {
int i;
for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
@@ -1393,17 +1413,17 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
return;
}
- iwl_pcie_dump_csr(trans);
- iwl_dump_fh(trans, NULL);
-
local_bh_disable();
/* The STATUS_FW_ERROR bit is set in this function. This must happen
* before we wake up the command caller, to ensure a proper cleanup. */
iwl_trans_fw_error(trans);
local_bh_enable();
- for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
- del_timer(&trans_pcie->txq[i].stuck_timer);
+ for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+ if (!trans_pcie->txq[i])
+ continue;
+ del_timer(&trans_pcie->txq[i]->stuck_timer);
+ }
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
wake_up(&trans_pcie->wait_command_queue);
@@ -1597,6 +1617,13 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
if (inta & CSR_INT_BIT_ALIVE) {
IWL_DEBUG_ISR(trans, "Alive interrupt\n");
isr_stats->alive++;
+ if (trans->cfg->gen2) {
+ /*
+ * We can restock, since firmware configured
+ * the RFH
+ */
+ iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
+ }
}
}
@@ -1933,6 +1960,10 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
IWL_DEBUG_ISR(trans, "Alive interrupt\n");
isr_stats->alive++;
+ if (trans->cfg->gen2) {
+ /* We can restock, since firmware configured the RFH */
+ iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
+ }
}
/* uCode wakes up after power-down sleep */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
new file mode 100644
index 000000000000..ac60a282d6de
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -0,0 +1,374 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "iwl-trans.h"
+#include "iwl-context-info.h"
+#include "internal.h"
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
+ * NOTE: This does not load uCode nor start the embedded processor
+ */
+static int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
+{
+ int ret = 0;
+
+ IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
+
+ /*
+ * Use "set_bit" below rather than "write", to preserve any hardware
+ * bits already set by default after reset.
+ */
+
+ /*
+ * Disable L0s without affecting L1;
+ * don't wait for ICH L0s (ICH bug W/A)
+ */
+ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+ /* Set FH wait threshold to maximum (HW error during stress W/A) */
+ iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
+
+ /*
+ * Enable HAP INTA (interrupt from management bus) to
+ * wake device's PCI Express link L1a -> L0s
+ */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+
+ iwl_pcie_apm_config(trans);
+
+ /*
+ * Set "initialization complete" bit to move adapter from
+ * D0U* --> D0A* (powered-up active) state.
+ */
+ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /*
+ * Wait for clock stabilization; once stabilized, access to
+ * device-internal resources is supported, e.g. iwl_write_prph()
+ * and accesses to uCode SRAM.
+ */
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
+ if (ret < 0) {
+ IWL_DEBUG_INFO(trans, "Failed to init the card\n");
+ return ret;
+ }
+
+ set_bit(STATUS_DEVICE_ENABLED, &trans->status);
+
+ return 0;
+}
+
+static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
+{
+ IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
+
+ if (op_mode_leave) {
+ if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+ iwl_pcie_gen2_apm_init(trans);
+
+ /* inform ME that we are leaving */
+ iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_PREPARE |
+ CSR_HW_IF_CONFIG_REG_ENABLE_PME);
+ mdelay(1);
+ iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ mdelay(5);
+ }
+
+ clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
+
+ /* Stop device's DMA activity */
+ iwl_pcie_apm_stop_master(trans);
+
+ /* Reset the entire device */
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+ usleep_range(1000, 2000);
+
+ /*
+ * Clear "initialization complete" bit to move adapter from
+ * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+ */
+ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+}
+
+void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ bool hw_rfkill, was_hw_rfkill;
+
+ lockdep_assert_held(&trans_pcie->mutex);
+
+ if (trans_pcie->is_down)
+ return;
+
+ trans_pcie->is_down = true;
+
+ was_hw_rfkill = iwl_is_rfkill_set(trans);
+
+ /* tell the device to stop sending interrupts */
+ iwl_disable_interrupts(trans);
+
+ /* device going down, Stop using ICT table */
+ iwl_pcie_disable_ict(trans);
+
+ /*
+ * If a HW restart happens during firmware loading,
+ * then the firmware loading might call this function
+ * and later it might be called again due to the
+ * restart. So don't process again if the device is
+ * already dead.
+ */
+ if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+ IWL_DEBUG_INFO(trans,
+ "DEVICE_ENABLED bit was set and is now cleared\n");
+ iwl_pcie_gen2_tx_stop(trans);
+ iwl_pcie_rx_stop(trans);
+ }
+
+ iwl_pcie_ctxt_info_free_paging(trans);
+ iwl_pcie_ctxt_info_free(trans);
+
+ /* Make sure (redundant) we've released our request to stay awake */
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ /* Stop the device, and put it in low power state */
+ iwl_pcie_gen2_apm_stop(trans, false);
+
+ /* stop and reset the on-board processor */
+ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+ usleep_range(1000, 2000);
+
+ /*
+ * Upon stop, the IVAR table gets erased, so msi-x won't
+ * work. This causes a bug in RF-KILL flows, since the interrupt
+ * that enables radio won't fire on the correct irq, and the
+ * driver won't be able to handle the interrupt.
+ * Configure the IVAR table again after reset.
+ */
+ iwl_pcie_conf_msix_hw(trans_pcie);
+
+ /*
+ * Upon stop, the APM issues an interrupt if HW RF kill is set.
+ * This is a bug in certain verions of the hardware.
+ * Certain devices also keep sending HW RF kill interrupt all
+ * the time, unless the interrupt is ACKed even if the interrupt
+ * should be masked. Re-ACK all the interrupts here.
+ */
+ iwl_disable_interrupts(trans);
+
+ /* clear all status bits */
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ clear_bit(STATUS_INT_ENABLED, &trans->status);
+ clear_bit(STATUS_TPOWER_PMI, &trans->status);
+ clear_bit(STATUS_RFKILL, &trans->status);
+
+ /*
+ * Even if we stop the HW, we still want the RF kill
+ * interrupt
+ */
+ iwl_enable_rfkill_int(trans);
+
+ /*
+ * Check again since the RF kill state may have changed while
+ * all the interrupts were disabled, in this case we couldn't
+ * receive the RF kill interrupt and update the state in the
+ * op_mode.
+ * Don't call the op_mode if the rkfill state hasn't changed.
+ * This allows the op_mode to call stop_device from the rfkill
+ * notification without endless recursion. Under very rare
+ * circumstances, we might have a small recursion if the rfkill
+ * state changed exactly now while we were called from stop_device.
+ * This is very unlikely but can happen and is supported.
+ */
+ hw_rfkill = iwl_is_rfkill_set(trans);
+ if (hw_rfkill)
+ set_bit(STATUS_RFKILL, &trans->status);
+ else
+ clear_bit(STATUS_RFKILL, &trans->status);
+ if (hw_rfkill != was_hw_rfkill)
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+
+ /* re-take ownership to prevent other users from stealing the device */
+ iwl_pcie_prepare_card_hw(trans);
+}
+
+void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ mutex_lock(&trans_pcie->mutex);
+ _iwl_trans_pcie_gen2_stop_device(trans, low_power);
+ mutex_unlock(&trans_pcie->mutex);
+}
+
+static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ /* TODO: most of the logic can be removed in A0 - but not in Z0 */
+ spin_lock(&trans_pcie->irq_lock);
+ iwl_pcie_gen2_apm_init(trans);
+ spin_unlock(&trans_pcie->irq_lock);
+
+ iwl_op_mode_nic_config(trans->op_mode);
+
+ /* Allocate the RX queue, or reset if it is already allocated */
+ if (iwl_pcie_gen2_rx_init(trans))
+ return -ENOMEM;
+
+ /* Allocate or reset and init all Tx and Command queues */
+ if (iwl_pcie_gen2_tx_init(trans))
+ return -ENOMEM;
+
+ /* enable shadow regs in HW */
+ iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
+ IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
+
+ return 0;
+}
+
+void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ iwl_pcie_reset_ict(trans);
+
+ /* make sure all queue are not stopped/used */
+ memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
+ memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
+ /* now that we got alive we can free the fw image & the context info.
+ * paging memory cannot be freed included since FW will still use it
+ */
+ iwl_pcie_ctxt_info_free(trans);
+}
+
+int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
+ const struct fw_img *fw, bool run_in_rfkill)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ bool hw_rfkill;
+ int ret;
+
+ /* This may fail if AMT took ownership of the device */
+ if (iwl_pcie_prepare_card_hw(trans)) {
+ IWL_WARN(trans, "Exit HW not ready\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ iwl_enable_rfkill_int(trans);
+
+ iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
+
+ /*
+ * We enabled the RF-Kill interrupt and the handler may very
+ * well be running. Disable the interrupts to make sure no other
+ * interrupt can be fired.
+ */
+ iwl_disable_interrupts(trans);
+
+ /* Make sure it finished running */
+ iwl_pcie_synchronize_irqs(trans);
+
+ mutex_lock(&trans_pcie->mutex);
+
+ /* If platform's RF_KILL switch is NOT set to KILL */
+ hw_rfkill = iwl_trans_check_hw_rf_kill(trans);
+ if (hw_rfkill && !run_in_rfkill) {
+ ret = -ERFKILL;
+ goto out;
+ }
+
+ /* Someone called stop_device, don't try to start_fw */
+ if (trans_pcie->is_down) {
+ IWL_WARN(trans,
+ "Can't start_fw since the HW hasn't been started\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ /* make sure rfkill handshake bits are cleared */
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ /* clear (again), then enable host interrupts */
+ iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
+
+ ret = iwl_pcie_gen2_nic_init(trans);
+ if (ret) {
+ IWL_ERR(trans, "Unable to init nic\n");
+ goto out;
+ }
+
+ ret = iwl_pcie_ctxt_info_init(trans, fw);
+ if (ret)
+ goto out;
+
+ /* re-check RF-Kill state since we may have missed the interrupt */
+ hw_rfkill = iwl_trans_check_hw_rf_kill(trans);
+ if (hw_rfkill && !run_in_rfkill)
+ ret = -ERFKILL;
+
+out:
+ mutex_unlock(&trans_pcie->mutex);
+ return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 7f05fc56587a..70acf850a9f1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -201,7 +201,7 @@ static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
-static void iwl_pcie_apm_config(struct iwl_trans *trans)
+void iwl_pcie_apm_config(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u16 lctl;
@@ -448,7 +448,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
}
-static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
+int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
{
int ret = 0;
@@ -567,7 +567,7 @@ static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
}
/* Note: returns standard 0/-ERROR code */
-static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
+int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
{
int ret;
int t = 0;
@@ -636,29 +636,6 @@ static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans,
FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
}
-static void iwl_pcie_load_firmware_chunk_tfh(struct iwl_trans *trans,
- u32 dst_addr, dma_addr_t phy_addr,
- u32 byte_cnt)
-{
- /* Stop DMA channel */
- iwl_write32(trans, TFH_SRV_DMA_CHNL0_CTRL, 0);
-
- /* Configure SRAM address */
- iwl_write32(trans, TFH_SRV_DMA_CHNL0_SRAM_ADDR,
- dst_addr);
-
- /* Configure DRAM address - 64 bit */
- iwl_write64(trans, TFH_SRV_DMA_CHNL0_DRAM_ADDR, phy_addr);
-
- /* Configure byte count to transfer */
- iwl_write32(trans, TFH_SRV_DMA_CHNL0_BC, byte_cnt);
-
- /* Enable the DRAM2SRAM to start */
- iwl_write32(trans, TFH_SRV_DMA_CHNL0_CTRL, TFH_SRV_DMA_SNOOP |
- TFH_SRV_DMA_TO_DRIVER |
- TFH_SRV_DMA_START);
-}
-
static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
u32 dst_addr, dma_addr_t phy_addr,
u32 byte_cnt)
@@ -672,12 +649,8 @@ static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
if (!iwl_trans_grab_nic_access(trans, &flags))
return -EIO;
- if (trans->cfg->use_tfh)
- iwl_pcie_load_firmware_chunk_tfh(trans, dst_addr, phy_addr,
- byte_cnt);
- else
- iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,
- byte_cnt);
+ iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,
+ byte_cnt);
iwl_trans_release_nic_access(trans, &flags);
ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
@@ -747,47 +720,6 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
return ret;
}
-/*
- * Driver Takes the ownership on secure machine before FW load
- * and prevent race with the BT load.
- * W/A for ROM bug. (should be remove in the next Si step)
- */
-static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
-{
- u32 val, loop = 1000;
-
- /*
- * Check the RSA semaphore is accessible.
- * If the HW isn't locked and the rsa semaphore isn't accessible,
- * we are in trouble.
- */
- val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
- if (val & (BIT(1) | BIT(17))) {
- IWL_DEBUG_INFO(trans,
- "can't access the RSA semaphore it is write protected\n");
- return 0;
- }
-
- /* take ownership on the AUX IF */
- iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK);
- iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK);
-
- do {
- iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1);
- val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS);
- if (val == 0x1) {
- iwl_write_prph(trans, RSA_ENABLE, 0);
- return 0;
- }
-
- udelay(10);
- loop--;
- } while (loop > 0);
-
- IWL_ERR(trans, "Failed to take ownership on secure machine\n");
- return -EIO;
-}
-
static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
const struct fw_img *image,
int cpu,
@@ -828,15 +760,10 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
return ret;
/* Notify ucode of loaded section number and status */
- if (trans->cfg->use_tfh) {
- val = iwl_read_prph(trans, UREG_UCODE_LOAD_STATUS);
- val = val | (sec_num << shift_param);
- iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, val);
- } else {
- val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
- val = val | (sec_num << shift_param);
- iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
- }
+ val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
+ val = val | (sec_num << shift_param);
+ iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
+
sec_num = (sec_num << 1) | 0x1;
}
@@ -904,7 +831,7 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
return 0;
}
-static void iwl_pcie_apply_destination(struct iwl_trans *trans)
+void iwl_pcie_apply_destination(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv;
@@ -1042,10 +969,15 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
if (trans->dbg_dest_tlv)
iwl_pcie_apply_destination(trans);
- /* TODO: remove in the next Si step */
- ret = iwl_pcie_rsa_race_bug_wa(trans);
- if (ret)
- return ret;
+ IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n",
+ iwl_read_prph(trans, WFPM_GP2));
+
+ /*
+ * Set default value. On resume reading the values that were
+ * zeored can provide debug data on the resume flow.
+ * This is for debugging only and has no functional impact.
+ */
+ iwl_write_prph(trans, WFPM_GP2, 0x01010101);
/* configure the ucode to be ready to get the secured image */
/* release CPU reset */
@@ -1062,7 +994,7 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
&first_ucode_section);
}
-static bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans)
+bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans)
{
bool hw_rfkill = iwl_is_rfkill_set(trans);
@@ -1147,7 +1079,7 @@ static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
}
-static void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
+void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
{
struct iwl_trans *trans = trans_pcie->trans;
@@ -1299,7 +1231,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
iwl_pcie_prepare_card_hw(trans);
}
-static void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
+void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1423,8 +1355,12 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
lockdep_assert_held(&trans_pcie->mutex);
- if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
- _iwl_trans_pcie_stop_device(trans, true);
+ if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) {
+ if (trans->cfg->gen2)
+ _iwl_trans_pcie_gen2_stop_device(trans, true);
+ else
+ _iwl_trans_pcie_stop_device(trans, true);
+ }
}
static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
@@ -1527,6 +1463,9 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
}
}
+ IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n",
+ iwl_read_prph(trans, WFPM_GP2));
+
val = iwl_read32(trans, CSR_RESET);
if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
*status = IWL_D3_STATUS_RESET;
@@ -1828,7 +1767,10 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iwl_pcie_synchronize_irqs(trans);
- iwl_pcie_tx_free(trans);
+ if (trans->cfg->gen2)
+ iwl_pcie_gen2_tx_free(trans);
+ else
+ iwl_pcie_tx_free(trans);
iwl_pcie_rx_free(trans);
if (trans_pcie->msix_enabled) {
@@ -1998,7 +1940,7 @@ static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
int queue;
for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
- struct iwl_txq *txq = &trans_pcie->txq[queue];
+ struct iwl_txq *txq = trans_pcie->txq[queue];
unsigned long now;
spin_lock_bh(&txq->lock);
@@ -2050,7 +1992,7 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
int i;
for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
- struct iwl_txq *txq = &trans_pcie->txq[i];
+ struct iwl_txq *txq = trans_pcie->txq[i];
if (i == trans_pcie->cmd_queue)
continue;
@@ -2075,48 +2017,32 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u32 scd_sram_addr;
- u8 buf[16];
- int cnt;
+ u32 txq_id = txq->id;
+ u32 status;
+ bool active;
+ u8 fifo;
- IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
- txq->read_ptr, txq->write_ptr);
-
- if (trans->cfg->use_tfh)
+ if (trans->cfg->use_tfh) {
+ IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
+ txq->read_ptr, txq->write_ptr);
/* TODO: access new SCD registers and dump them */
return;
+ }
- scd_sram_addr = trans_pcie->scd_base_addr +
- SCD_TX_STTS_QUEUE_OFFSET(txq->id);
- iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
-
- iwl_print_hex_error(trans, buf, sizeof(buf));
-
- for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
- IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
- iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));
-
- for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
- u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
- u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
- bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
- u32 tbl_dw =
- iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
- SCD_TRANS_TBL_OFFSET_QUEUE(cnt));
-
- if (cnt & 0x1)
- tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
- else
- tbl_dw = tbl_dw & 0x0000FFFF;
+ status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
+ fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
+ active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
- IWL_ERR(trans,
- "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
- cnt, active ? "" : "in", fifo, tbl_dw,
- iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
- (TFD_QUEUE_SIZE_MAX - 1),
- iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
- }
+ IWL_ERR(trans,
+ "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
+ txq_id, active ? "" : "in", fifo,
+ jiffies_to_msecs(txq->wd_timeout),
+ txq->read_ptr, txq->write_ptr,
+ iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
+ (TFD_QUEUE_SIZE_MAX - 1),
+ iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
+ (TFD_QUEUE_SIZE_MAX - 1),
+ iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
}
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
@@ -2139,7 +2065,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
continue;
IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
- txq = &trans_pcie->txq[cnt];
+ txq = trans_pcie->txq[cnt];
wr_ptr = ACCESS_ONCE(txq->write_ptr);
while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) &&
@@ -2330,7 +2256,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues;
- if (!trans_pcie->txq)
+ if (!trans_pcie->txq_memory)
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -2338,7 +2264,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
return -ENOMEM;
for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
- txq = &trans_pcie->txq[cnt];
+ txq = trans_pcie->txq[cnt];
pos += scnprintf(buf + pos, bufsz - pos,
"hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
cnt, txq->read_ptr, txq->write_ptr,
@@ -2755,7 +2681,7 @@ static struct iwl_trans_dump_data
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_fw_error_dump_data *data;
- struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_fw_error_dump_txcmd *txcmd;
struct iwl_trans_dump_data *dump_data;
u32 len, num_rbs;
@@ -2890,21 +2816,43 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
}
#endif /* CONFIG_PM_SLEEP */
+#define IWL_TRANS_COMMON_OPS \
+ .op_mode_leave = iwl_trans_pcie_op_mode_leave, \
+ .write8 = iwl_trans_pcie_write8, \
+ .write32 = iwl_trans_pcie_write32, \
+ .read32 = iwl_trans_pcie_read32, \
+ .read_prph = iwl_trans_pcie_read_prph, \
+ .write_prph = iwl_trans_pcie_write_prph, \
+ .read_mem = iwl_trans_pcie_read_mem, \
+ .write_mem = iwl_trans_pcie_write_mem, \
+ .configure = iwl_trans_pcie_configure, \
+ .set_pmi = iwl_trans_pcie_set_pmi, \
+ .grab_nic_access = iwl_trans_pcie_grab_nic_access, \
+ .release_nic_access = iwl_trans_pcie_release_nic_access, \
+ .set_bits_mask = iwl_trans_pcie_set_bits_mask, \
+ .ref = iwl_trans_pcie_ref, \
+ .unref = iwl_trans_pcie_unref, \
+ .dump_data = iwl_trans_pcie_dump_data, \
+ .wait_tx_queues_empty = iwl_trans_pcie_wait_txq_empty, \
+ .d3_suspend = iwl_trans_pcie_d3_suspend, \
+ .d3_resume = iwl_trans_pcie_d3_resume
+
+#ifdef CONFIG_PM_SLEEP
+#define IWL_TRANS_PM_OPS \
+ .suspend = iwl_trans_pcie_suspend, \
+ .resume = iwl_trans_pcie_resume,
+#else
+#define IWL_TRANS_PM_OPS
+#endif /* CONFIG_PM_SLEEP */
+
static const struct iwl_trans_ops trans_ops_pcie = {
+ IWL_TRANS_COMMON_OPS,
+ IWL_TRANS_PM_OPS
.start_hw = iwl_trans_pcie_start_hw,
- .op_mode_leave = iwl_trans_pcie_op_mode_leave,
.fw_alive = iwl_trans_pcie_fw_alive,
.start_fw = iwl_trans_pcie_start_fw,
.stop_device = iwl_trans_pcie_stop_device,
- .d3_suspend = iwl_trans_pcie_d3_suspend,
- .d3_resume = iwl_trans_pcie_d3_resume,
-
-#ifdef CONFIG_PM_SLEEP
- .suspend = iwl_trans_pcie_suspend,
- .resume = iwl_trans_pcie_resume,
-#endif /* CONFIG_PM_SLEEP */
-
.send_cmd = iwl_trans_pcie_send_hcmd,
.tx = iwl_trans_pcie_tx,
@@ -2913,31 +2861,27 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.txq_disable = iwl_trans_pcie_txq_disable,
.txq_enable = iwl_trans_pcie_txq_enable,
- .get_txq_byte_table = iwl_trans_pcie_get_txq_byte_table,
-
.txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,
- .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
.freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
.block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
+};
+
+static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
+ IWL_TRANS_COMMON_OPS,
+ IWL_TRANS_PM_OPS
+ .start_hw = iwl_trans_pcie_start_hw,
+ .fw_alive = iwl_trans_pcie_gen2_fw_alive,
+ .start_fw = iwl_trans_pcie_gen2_start_fw,
+ .stop_device = iwl_trans_pcie_gen2_stop_device,
- .write8 = iwl_trans_pcie_write8,
- .write32 = iwl_trans_pcie_write32,
- .read32 = iwl_trans_pcie_read32,
- .read_prph = iwl_trans_pcie_read_prph,
- .write_prph = iwl_trans_pcie_write_prph,
- .read_mem = iwl_trans_pcie_read_mem,
- .write_mem = iwl_trans_pcie_write_mem,
- .configure = iwl_trans_pcie_configure,
- .set_pmi = iwl_trans_pcie_set_pmi,
- .grab_nic_access = iwl_trans_pcie_grab_nic_access,
- .release_nic_access = iwl_trans_pcie_release_nic_access,
- .set_bits_mask = iwl_trans_pcie_set_bits_mask,
-
- .ref = iwl_trans_pcie_ref,
- .unref = iwl_trans_pcie_unref,
-
- .dump_data = iwl_trans_pcie_dump_data,
+ .send_cmd = iwl_trans_pcie_gen2_send_hcmd,
+
+ .tx = iwl_trans_pcie_gen2_tx,
+ .reclaim = iwl_trans_pcie_reclaim,
+
+ .txq_alloc = iwl_trans_pcie_dyn_txq_alloc,
+ .txq_free = iwl_trans_pcie_dyn_txq_free,
};
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
@@ -2952,8 +2896,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
if (ret)
return ERR_PTR(ret);
- trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
- &pdev->dev, cfg, &trans_ops_pcie, 0);
+ if (cfg->gen2)
+ trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
+ &pdev->dev, cfg, &trans_ops_pcie_gen2);
+ else
+ trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
+ &pdev->dev, cfg, &trans_ops_pcie);
if (!trans)
return ERR_PTR(-ENOMEM);
@@ -3028,7 +2976,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
* PCI Tx retries from interfering with C3 CPU state */
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
- trans->dev = &pdev->dev;
trans_pcie->pci_dev = pdev;
iwl_disable_interrupts(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
new file mode 100644
index 000000000000..9fb46a6f47cf
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -0,0 +1,1018 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/pm_runtime.h>
+
+#include "iwl-debug.h"
+#include "iwl-csr.h"
+#include "iwl-io.h"
+#include "internal.h"
+#include "mvm/fw-api.h"
+
+ /*
+ * iwl_pcie_gen2_tx_stop - Stop all Tx DMA channels
+ */
+void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int txq_id;
+
+ /*
+ * This function can be called before the op_mode disabled the
+ * queues. This happens when we have an rfkill interrupt.
+ * Since we stop Tx altogether - mark the queues as stopped.
+ */
+ memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
+ memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
+ /* Unmap DMA from host system and free skb's */
+ for (txq_id = 0; txq_id < ARRAY_SIZE(trans_pcie->txq); txq_id++) {
+ if (!trans_pcie->txq[txq_id])
+ continue;
+ iwl_pcie_gen2_txq_unmap(trans, txq_id);
+ }
+}
+
+/*
+ * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
+ */
+static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs)
+{
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
+ int write_ptr = txq->write_ptr;
+ u8 filled_tfd_size, num_fetch_chunks;
+ u16 len = byte_cnt;
+ __le16 bc_ent;
+
+ len = DIV_ROUND_UP(len, 4);
+
+ if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
+ return;
+
+ filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
+ num_tbs * sizeof(struct iwl_tfh_tb);
+ /*
+ * filled_tfd_size contains the number of filled bytes in the TFD.
+ * Dividing it by 64 will give the number of chunks to fetch
+ * to SRAM- 0 for one chunk, 1 for 2 and so on.
+ * If, for example, TFD contains only 3 TBs then 32 bytes
+ * of the TFD are used, and only one chunk of 64 bytes should
+ * be fetched
+ */
+ num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
+
+ bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
+ scd_bc_tbl->tfd_offset[write_ptr] = bc_ent;
+}
+
+/*
+ * iwl_pcie_gen2_txq_inc_wr_ptr - Send new write index to hardware
+ */
+static void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
+ struct iwl_txq *txq)
+{
+ lockdep_assert_held(&txq->lock);
+
+ IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
+
+ /*
+ * if not in power-save mode, uCode will never sleep when we're
+ * trying to tx (during RFKILL, we're not trying to tx).
+ */
+ iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
+}
+
+static u8 iwl_pcie_gen2_get_num_tbs(struct iwl_trans *trans,
+ struct iwl_tfh_tfd *tfd)
+{
+ return le16_to_cpu(tfd->num_tbs) & 0x1f;
+}
+
+static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
+ struct iwl_cmd_meta *meta,
+ struct iwl_tfh_tfd *tfd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i, num_tbs;
+
+ /* Sanity check on number of chunks */
+ num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd);
+
+ if (num_tbs >= trans_pcie->max_tbs) {
+ IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
+ return;
+ }
+
+ /* first TB is never freed - it's the bidirectional DMA data */
+ for (i = 1; i < num_tbs; i++) {
+ if (meta->tbs & BIT(i))
+ dma_unmap_page(trans->dev,
+ le64_to_cpu(tfd->tbs[i].addr),
+ le16_to_cpu(tfd->tbs[i].tb_len),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(trans->dev,
+ le64_to_cpu(tfd->tbs[i].addr),
+ le16_to_cpu(tfd->tbs[i].tb_len),
+ DMA_TO_DEVICE);
+ }
+
+ tfd->num_tbs = 0;
+}
+
+static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
+ * idx is bounded by n_window
+ */
+ int rd_ptr = txq->read_ptr;
+ int idx = get_cmd_index(txq, rd_ptr);
+
+ lockdep_assert_held(&txq->lock);
+
+ /* We have only q->n_window txq->entries, but we use
+ * TFD_QUEUE_SIZE_MAX tfds
+ */
+ iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
+ iwl_pcie_get_tfd(trans_pcie, txq, rd_ptr));
+
+ /* free SKB */
+ if (txq->entries) {
+ struct sk_buff *skb;
+
+ skb = txq->entries[idx].skb;
+
+ /* Can be called from irqs-disabled context
+ * If skb is not NULL, it means that the whole queue is being
+ * freed and that the queue is not empty - free the skb
+ */
+ if (skb) {
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ txq->entries[idx].skb = NULL;
+ }
+ }
+}
+
+static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
+ struct iwl_tfh_tfd *tfd, dma_addr_t addr,
+ u16 len)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
+ struct iwl_tfh_tb *tb = &tfd->tbs[idx];
+
+ /* Each TFD can point to a maximum max_tbs Tx buffers */
+ if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->max_tbs) {
+ IWL_ERR(trans, "Error can not send more than %d chunks\n",
+ trans_pcie->max_tbs);
+ return -EINVAL;
+ }
+
+ put_unaligned_le64(addr, &tb->addr);
+ tb->tb_len = cpu_to_le16(len);
+
+ tfd->num_tbs = cpu_to_le16(idx + 1);
+
+ return idx;
+}
+
+static
+struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct iwl_tfh_tfd *tfd =
+ iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
+ dma_addr_t tb_phys;
+ int i, len, tb1_len, tb2_len, hdr_len;
+ void *tb1_addr;
+
+ memset(tfd, 0, sizeof(*tfd));
+
+ tb_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr);
+ /* The first TB points to bi-directional DMA data */
+ memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
+ IWL_FIRST_TB_SIZE);
+
+ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
+
+ /* there must be data left over for TB1 or this code must be changed */
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
+
+ /*
+ * The second TB (tb1) points to the remainder of the TX command
+ * and the 802.11 header - dword aligned size
+ * (This calculation modifies the TX command, so do it before the
+ * setup of the first TB)
+ */
+ len = sizeof(struct iwl_tx_cmd_gen2) + sizeof(struct iwl_cmd_header) +
+ ieee80211_hdrlen(hdr->frame_control) - IWL_FIRST_TB_SIZE;
+
+ tb1_len = ALIGN(len, 4);
+
+ /* map the data for TB1 */
+ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
+ tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ goto out_err;
+ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
+
+ /* set up TFD's third entry to point to remainder of skb's head */
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ tb2_len = skb_headlen(skb) - hdr_len;
+
+ if (tb2_len > 0) {
+ tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
+ tb2_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ goto out_err;
+ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
+ }
+
+ /* set up the remaining entries to point to the data */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ int tb_idx;
+
+ if (!skb_frag_size(frag))
+ continue;
+
+ tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ goto out_err;
+ tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
+ skb_frag_size(frag));
+
+ out_meta->tbs |= BIT(tb_idx);
+ }
+
+ trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
+ IWL_FIRST_TB_SIZE + tb1_len,
+ skb->data + hdr_len, tb2_len);
+ trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len,
+ skb->len - hdr_len);
+
+ return tfd;
+
+out_err:
+ iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
+ return NULL;
+}
+
+int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_cmd *dev_cmd, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
+ struct iwl_cmd_meta *out_meta;
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ void *tfd;
+
+ if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
+ "TX on unused queue %d\n", txq_id))
+ return -EINVAL;
+
+ if (skb_is_nonlinear(skb) &&
+ skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
+ __skb_linearize(skb))
+ return -ENOMEM;
+
+ spin_lock(&txq->lock);
+
+ /* Set up driver data for this TFD */
+ txq->entries[txq->write_ptr].skb = skb;
+ txq->entries[txq->write_ptr].cmd = dev_cmd;
+
+ dev_cmd->hdr.sequence =
+ cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+ INDEX_TO_SEQ(txq->write_ptr)));
+
+ /* Set up first empty entry in queue's array of Tx/cmd buffers */
+ out_meta = &txq->entries[txq->write_ptr].meta;
+ out_meta->flags = 0;
+
+ tfd = iwl_pcie_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
+ if (!tfd) {
+ spin_unlock(&txq->lock);
+ return -1;
+ }
+
+ /* Set up entry for this TFD in Tx byte-count array */
+ iwl_pcie_gen2_update_byte_tbl(txq, le16_to_cpu(tx_cmd->len),
+ iwl_pcie_gen2_get_num_tbs(trans, tfd));
+
+ /* start timer if queue currently empty */
+ if (txq->read_ptr == txq->write_ptr) {
+ if (txq->wd_timeout)
+ mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+ IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id);
+ iwl_trans_ref(trans);
+ }
+
+ /* Tell device the write index *just past* this latest filled TFD */
+ txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
+ iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
+ if (iwl_queue_space(txq) < txq->high_mark)
+ iwl_stop_queue(trans, txq);
+
+ /*
+ * At this point the frame is "transmitted" successfully
+ * and we will get a TX status notification eventually.
+ */
+ spin_unlock(&txq->lock);
+ return 0;
+}
+
+/*************** HOST COMMAND QUEUE FUNCTIONS *****/
+
+/*
+ * iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command
+ * @priv: device private data point
+ * @cmd: a pointer to the ucode command structure
+ *
+ * The function returns < 0 values to indicate the operation
+ * failed. On success, it returns the index (>= 0) of command in the
+ * command queue.
+ */
+static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_device_cmd *out_cmd;
+ struct iwl_cmd_meta *out_meta;
+ unsigned long flags;
+ void *dup_buf = NULL;
+ dma_addr_t phys_addr;
+ int idx, i, cmd_pos;
+ u16 copy_size, cmd_size, tb0_size;
+ bool had_nocopy = false;
+ u8 group_id = iwl_cmd_groupid(cmd->id);
+ const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
+ u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
+ struct iwl_tfh_tfd *tfd =
+ iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
+
+ memset(tfd, 0, sizeof(*tfd));
+
+ copy_size = sizeof(struct iwl_cmd_header_wide);
+ cmd_size = sizeof(struct iwl_cmd_header_wide);
+
+ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+ cmddata[i] = cmd->data[i];
+ cmdlen[i] = cmd->len[i];
+
+ if (!cmd->len[i])
+ continue;
+
+ /* need at least IWL_FIRST_TB_SIZE copied */
+ if (copy_size < IWL_FIRST_TB_SIZE) {
+ int copy = IWL_FIRST_TB_SIZE - copy_size;
+
+ if (copy > cmdlen[i])
+ copy = cmdlen[i];
+ cmdlen[i] -= copy;
+ cmddata[i] += copy;
+ copy_size += copy;
+ }
+
+ if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
+ had_nocopy = true;
+ if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
+ } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
+ /*
+ * This is also a chunk that isn't copied
+ * to the static buffer so set had_nocopy.
+ */
+ had_nocopy = true;
+
+ /* only allowed once */
+ if (WARN_ON(dup_buf)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
+
+ dup_buf = kmemdup(cmddata[i], cmdlen[i],
+ GFP_ATOMIC);
+ if (!dup_buf)
+ return -ENOMEM;
+ } else {
+ /* NOCOPY must not be followed by normal! */
+ if (WARN_ON(had_nocopy)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
+ copy_size += cmdlen[i];
+ }
+ cmd_size += cmd->len[i];
+ }
+
+ /*
+ * If any of the command structures end up being larger than the
+ * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into
+ * separate TFDs, then we will need to increase the size of the buffers
+ */
+ if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
+ "Command %s (%#x) is too large (%d bytes)\n",
+ iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
+
+ spin_lock_bh(&txq->lock);
+
+ if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ spin_unlock_bh(&txq->lock);
+
+ IWL_ERR(trans, "No space in command queue\n");
+ iwl_op_mode_cmd_queue_full(trans->op_mode);
+ idx = -ENOSPC;
+ goto free_dup_buf;
+ }
+
+ idx = get_cmd_index(txq, txq->write_ptr);
+ out_cmd = txq->entries[idx].cmd;
+ out_meta = &txq->entries[idx].meta;
+
+ /* re-initialize to NULL */
+ memset(out_meta, 0, sizeof(*out_meta));
+ if (cmd->flags & CMD_WANT_SKB)
+ out_meta->source = cmd;
+
+ /* set up the header */
+ out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
+ out_cmd->hdr_wide.group_id = group_id;
+ out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
+ out_cmd->hdr_wide.length =
+ cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
+ out_cmd->hdr_wide.reserved = 0;
+ out_cmd->hdr_wide.sequence =
+ cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+ INDEX_TO_SEQ(txq->write_ptr));
+
+ cmd_pos = sizeof(struct iwl_cmd_header_wide);
+ copy_size = sizeof(struct iwl_cmd_header_wide);
+
+ /* and copy the data that needs to be copied */
+ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+ int copy;
+
+ if (!cmd->len[i])
+ continue;
+
+ /* copy everything if not nocopy/dup */
+ if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
+ IWL_HCMD_DFL_DUP))) {
+ copy = cmd->len[i];
+
+ memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
+ cmd_pos += copy;
+ copy_size += copy;
+ continue;
+ }
+
+ /*
+ * Otherwise we need at least IWL_FIRST_TB_SIZE copied
+ * in total (for bi-directional DMA), but copy up to what
+ * we can fit into the payload for debug dump purposes.
+ */
+ copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
+
+ memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
+ cmd_pos += copy;
+
+ /* However, treat copy_size the proper way, we need it below */
+ if (copy_size < IWL_FIRST_TB_SIZE) {
+ copy = IWL_FIRST_TB_SIZE - copy_size;
+
+ if (copy > cmd->len[i])
+ copy = cmd->len[i];
+ copy_size += copy;
+ }
+ }
+
+ IWL_DEBUG_HC(trans,
+ "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
+ iwl_get_cmd_string(trans, cmd->id), group_id,
+ out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
+ cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
+
+ /* start the TFD with the minimum copy bytes */
+ tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
+ memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
+ iwl_pcie_gen2_set_tb(trans, tfd, iwl_pcie_get_first_tb_dma(txq, idx),
+ tb0_size);
+
+ /* map first command fragment, if any remains */
+ if (copy_size > tb0_size) {
+ phys_addr = dma_map_single(trans->dev,
+ ((u8 *)&out_cmd->hdr) + tb0_size,
+ copy_size - tb0_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(trans->dev, phys_addr)) {
+ idx = -ENOMEM;
+ iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
+ goto out;
+ }
+ iwl_pcie_gen2_set_tb(trans, tfd, phys_addr,
+ copy_size - tb0_size);
+ }
+
+ /* map the remaining (adjusted) nocopy/dup fragments */
+ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+ const void *data = cmddata[i];
+
+ if (!cmdlen[i])
+ continue;
+ if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
+ IWL_HCMD_DFL_DUP)))
+ continue;
+ if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
+ data = dup_buf;
+ phys_addr = dma_map_single(trans->dev, (void *)data,
+ cmdlen[i], DMA_TO_DEVICE);
+ if (dma_mapping_error(trans->dev, phys_addr)) {
+ idx = -ENOMEM;
+ iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
+ goto out;
+ }
+ iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]);
+ }
+
+ BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
+ out_meta->flags = cmd->flags;
+ if (WARN_ON_ONCE(txq->entries[idx].free_buf))
+ kzfree(txq->entries[idx].free_buf);
+ txq->entries[idx].free_buf = dup_buf;
+
+ trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
+
+ /* start timer if queue currently empty */
+ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
+ mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+
+ spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+ if (!(cmd->flags & CMD_SEND_IN_IDLE) &&
+ !trans_pcie->ref_cmd_in_flight) {
+ trans_pcie->ref_cmd_in_flight = true;
+ IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n");
+ iwl_trans_ref(trans);
+ }
+ /* Increment and update queue's write index */
+ txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
+ iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+
+out:
+ spin_unlock_bh(&txq->lock);
+free_dup_buf:
+ if (idx < 0)
+ kfree(dup_buf);
+ return idx;
+}
+
+#define HOST_COMPLETE_TIMEOUT (2 * HZ)
+
+static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
+ struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+ int cmd_idx;
+ int ret;
+
+ IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
+
+ if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status),
+ "Command %s: a command is already active!\n", cmd_str))
+ return -EIO;
+
+ IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
+
+ if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) {
+ ret = wait_event_timeout(trans_pcie->d0i3_waitq,
+ pm_runtime_active(&trans_pcie->pci_dev->dev),
+ msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
+ if (!ret) {
+ IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
+ if (cmd_idx < 0) {
+ ret = cmd_idx;
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
+ cmd_str, ret);
+ return ret;
+ }
+
+ ret = wait_event_timeout(trans_pcie->wait_command_queue,
+ !test_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status),
+ HOST_COMPLETE_TIMEOUT);
+ if (!ret) {
+ IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
+ cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+
+ IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
+ txq->read_ptr, txq->write_ptr);
+
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
+ cmd_str);
+ ret = -ETIMEDOUT;
+
+ iwl_force_nmi(trans);
+ iwl_trans_fw_error(trans);
+
+ goto cancel;
+ }
+
+ if (test_bit(STATUS_FW_ERROR, &trans->status)) {
+ IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
+ dump_stack();
+ ret = -EIO;
+ goto cancel;
+ }
+
+ if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+ test_bit(STATUS_RFKILL, &trans->status)) {
+ IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
+ ret = -ERFKILL;
+ goto cancel;
+ }
+
+ if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
+ IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
+ ret = -EIO;
+ goto cancel;
+ }
+
+ return 0;
+
+cancel:
+ if (cmd->flags & CMD_WANT_SKB) {
+ /*
+ * Cancel the CMD_WANT_SKB flag for the cmd in the
+ * TX cmd queue. Otherwise in case the cmd comes
+ * in later, it will possibly set an invalid
+ * address (cmd->meta.source).
+ */
+ txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
+ }
+
+ if (cmd->resp_pkt) {
+ iwl_free_resp(cmd);
+ cmd->resp_pkt = NULL;
+ }
+
+ return ret;
+}
+
+int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
+{
+ if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+ test_bit(STATUS_RFKILL, &trans->status)) {
+ IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
+ cmd->id);
+ return -ERFKILL;
+ }
+
+ if (cmd->flags & CMD_ASYNC) {
+ int ret;
+
+ /* An asynchronous command can not expect an SKB to be set. */
+ if (WARN_ON(cmd->flags & CMD_WANT_SKB))
+ return -EINVAL;
+
+ ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
+ if (ret < 0) {
+ IWL_ERR(trans,
+ "Error sending %s: enqueue_hcmd failed: %d\n",
+ iwl_get_cmd_string(trans, cmd->id), ret);
+ return ret;
+ }
+ return 0;
+ }
+
+ return iwl_pcie_gen2_send_hcmd_sync(trans, cmd);
+}
+
+/*
+ * iwl_pcie_gen2_txq_unmap - Unmap any remaining DMA mappings and free skb's
+ */
+void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
+
+ spin_lock_bh(&txq->lock);
+ while (txq->write_ptr != txq->read_ptr) {
+ IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
+ txq_id, txq->read_ptr);
+
+ iwl_pcie_gen2_free_tfd(trans, txq);
+ txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
+
+ if (txq->read_ptr == txq->write_ptr) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+ if (txq_id != trans_pcie->cmd_queue) {
+ IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
+ txq->id);
+ iwl_trans_unref(trans);
+ } else if (trans_pcie->ref_cmd_in_flight) {
+ trans_pcie->ref_cmd_in_flight = false;
+ IWL_DEBUG_RPM(trans,
+ "clear ref_cmd_in_flight\n");
+ iwl_trans_unref(trans);
+ }
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+ }
+ }
+ spin_unlock_bh(&txq->lock);
+
+ /* just in case - this queue may have been stopped */
+ iwl_wake_queue(trans, txq);
+}
+
+static void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
+ struct iwl_txq *txq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct device *dev = trans->dev;
+
+ /* De-alloc circular buffer of TFDs */
+ if (txq->tfds) {
+ dma_free_coherent(dev,
+ trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX,
+ txq->tfds, txq->dma_addr);
+ dma_free_coherent(dev,
+ sizeof(*txq->first_tb_bufs) * txq->n_window,
+ txq->first_tb_bufs, txq->first_tb_dma);
+ }
+
+ kfree(txq->entries);
+ iwl_pcie_free_dma_ptr(trans, &txq->bc_tbl);
+ kfree(txq);
+}
+
+/*
+ * iwl_pcie_txq_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ int i;
+
+ if (WARN_ON(!txq))
+ return;
+
+ iwl_pcie_gen2_txq_unmap(trans, txq_id);
+
+ /* De-alloc array of command/tx buffers */
+ if (txq_id == trans_pcie->cmd_queue)
+ for (i = 0; i < txq->n_window; i++) {
+ kzfree(txq->entries[i].cmd);
+ kzfree(txq->entries[i].free_buf);
+ }
+ del_timer_sync(&txq->stuck_timer);
+
+ iwl_pcie_gen2_txq_free_memory(trans, txq);
+
+ trans_pcie->txq[txq_id] = NULL;
+
+ clear_bit(txq_id, trans_pcie->queue_used);
+}
+
+int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
+ struct iwl_tx_queue_cfg_cmd *cmd,
+ int cmd_id,
+ unsigned int timeout)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue_cfg_rsp *rsp;
+ struct iwl_txq *txq;
+ struct iwl_host_cmd hcmd = {
+ .id = cmd_id,
+ .len = { sizeof(*cmd) },
+ .data = { cmd, },
+ .flags = CMD_WANT_SKB,
+ };
+ int ret, qid;
+
+ txq = kzalloc(sizeof(*txq), GFP_KERNEL);
+ if (!txq)
+ return -ENOMEM;
+ ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
+ sizeof(struct iwlagn_scd_bc_tbl));
+ if (ret) {
+ IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
+ kfree(txq);
+ return -ENOMEM;
+ }
+
+ ret = iwl_pcie_txq_alloc(trans, txq, TFD_TX_CMD_SLOTS, false);
+ if (ret) {
+ IWL_ERR(trans, "Tx queue alloc failed\n");
+ goto error;
+ }
+ ret = iwl_pcie_txq_init(trans, txq, TFD_TX_CMD_SLOTS, false);
+ if (ret) {
+ IWL_ERR(trans, "Tx queue init failed\n");
+ goto error;
+ }
+
+ txq->wd_timeout = msecs_to_jiffies(timeout);
+
+ cmd->tfdq_addr = cpu_to_le64(txq->dma_addr);
+ cmd->byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
+ cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(TFD_QUEUE_SIZE_MAX));
+
+ ret = iwl_trans_send_cmd(trans, &hcmd);
+ if (ret)
+ goto error;
+
+ if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ rsp = (void *)hcmd.resp_pkt->data;
+ qid = le16_to_cpu(rsp->queue_number);
+
+ if (qid > ARRAY_SIZE(trans_pcie->txq)) {
+ WARN_ONCE(1, "queue index %d unsupported", qid);
+ ret = -EIO;
+ goto error;
+ }
+
+ if (test_and_set_bit(qid, trans_pcie->queue_used)) {
+ WARN_ONCE(1, "queue %d already used", qid);
+ ret = -EIO;
+ goto error;
+ }
+
+ txq->id = qid;
+ trans_pcie->txq[qid] = txq;
+
+ /* Place first TFD at index corresponding to start sequence number */
+ txq->read_ptr = le16_to_cpu(rsp->write_pointer);
+ txq->write_ptr = le16_to_cpu(rsp->write_pointer);
+ iwl_write_direct32(trans, HBUS_TARG_WRPTR,
+ (txq->write_ptr) | (qid << 16));
+ IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
+
+ return qid;
+
+error:
+ iwl_pcie_gen2_txq_free_memory(trans, txq);
+ return ret;
+}
+
+void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ /*
+ * Upon HW Rfkill - we stop the device, and then stop the queues
+ * in the op_mode. Just for the sake of the simplicity of the op_mode,
+ * allow the op_mode to call txq_disable after it already called
+ * stop_device.
+ */
+ if (!test_and_clear_bit(queue, trans_pcie->queue_used)) {
+ WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
+ "queue %d not used", queue);
+ return;
+ }
+
+ iwl_pcie_gen2_txq_unmap(trans, queue);
+
+ IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
+}
+
+void iwl_pcie_gen2_tx_free(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i;
+
+ memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
+ /* Free all TX queues */
+ for (i = 0; i < ARRAY_SIZE(trans_pcie->txq); i++) {
+ if (!trans_pcie->txq[i])
+ continue;
+
+ iwl_pcie_gen2_txq_free(trans, i);
+ }
+}
+
+int iwl_pcie_gen2_tx_init(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *cmd_queue;
+ int txq_id = trans_pcie->cmd_queue, ret;
+
+ /* alloc and init the command queue */
+ if (!trans_pcie->txq[txq_id]) {
+ cmd_queue = kzalloc(sizeof(*cmd_queue), GFP_KERNEL);
+ if (!cmd_queue) {
+ IWL_ERR(trans, "Not enough memory for command queue\n");
+ return -ENOMEM;
+ }
+ trans_pcie->txq[txq_id] = cmd_queue;
+ ret = iwl_pcie_txq_alloc(trans, cmd_queue, TFD_CMD_SLOTS, true);
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+ } else {
+ cmd_queue = trans_pcie->txq[txq_id];
+ }
+
+ ret = iwl_pcie_txq_init(trans, cmd_queue, TFD_CMD_SLOTS, true);
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
+ goto error;
+ }
+ trans_pcie->txq[txq_id]->id = txq_id;
+ set_bit(txq_id, trans_pcie->queue_used);
+
+ return 0;
+
+error:
+ iwl_pcie_gen2_tx_free(trans);
+ return ret;
+}
+
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 911cf9868107..386950a2d616 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -2,7 +2,7 @@
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -71,7 +71,7 @@
*
***************************************************/
-static int iwl_queue_space(const struct iwl_txq *q)
+int iwl_queue_space(const struct iwl_txq *q)
{
unsigned int max;
unsigned int used;
@@ -102,10 +102,9 @@ static int iwl_queue_space(const struct iwl_txq *q)
/*
* iwl_queue_init - Initialize queue's high/low-water and read/write indexes
*/
-static int iwl_queue_init(struct iwl_txq *q, int slots_num, u32 id)
+static int iwl_queue_init(struct iwl_txq *q, int slots_num)
{
q->n_window = slots_num;
- q->id = id;
/* slots_num must be power-of-two size, otherwise
* get_cmd_index is broken. */
@@ -126,8 +125,8 @@ static int iwl_queue_init(struct iwl_txq *q, int slots_num, u32 id)
return 0;
}
-static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
- struct iwl_dma_ptr *ptr, size_t size)
+int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
+ struct iwl_dma_ptr *ptr, size_t size)
{
if (WARN_ON(ptr->addr))
return -EINVAL;
@@ -140,8 +139,7 @@ static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
return 0;
}
-static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans,
- struct iwl_dma_ptr *ptr)
+void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
{
if (unlikely(!ptr->addr))
return;
@@ -164,9 +162,6 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
}
spin_unlock(&txq->lock);
- IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->id,
- jiffies_to_msecs(txq->wd_timeout));
-
iwl_trans_pcie_log_scd_error(trans, txq);
iwl_force_nmi(trans);
@@ -188,6 +183,7 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
__le16 bc_ent;
struct iwl_tx_cmd *tx_cmd =
(void *)txq->entries[txq->write_ptr].cmd->payload;
+ u8 sta_id = tx_cmd->sta_id;
scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
@@ -210,26 +206,7 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
return;
- if (trans->cfg->use_tfh) {
- u8 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
- num_tbs * sizeof(struct iwl_tfh_tb);
- /*
- * filled_tfd_size contains the number of filled bytes in the
- * TFD.
- * Dividing it by 64 will give the number of chunks to fetch
- * to SRAM- 0 for one chunk, 1 for 2 and so on.
- * If, for example, TFD contains only 3 TBs then 32 bytes
- * of the TFD are used, and only one chunk of 64 bytes should
- * be fetched
- */
- u8 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
-
- bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
- } else {
- u8 sta_id = tx_cmd->sta_id;
-
- bc_ent = cpu_to_le16(len | (sta_id << 12));
- }
+ bc_ent = cpu_to_le16(len | (sta_id << 12));
scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
@@ -319,23 +296,17 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
int i;
for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
- struct iwl_txq *txq = &trans_pcie->txq[i];
+ struct iwl_txq *txq = trans_pcie->txq[i];
spin_lock_bh(&txq->lock);
- if (trans_pcie->txq[i].need_update) {
+ if (txq->need_update) {
iwl_pcie_txq_inc_wr_ptr(trans, txq);
- trans_pcie->txq[i].need_update = false;
+ txq->need_update = false;
}
spin_unlock_bh(&txq->lock);
}
}
-static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie,
- struct iwl_txq *txq, int idx)
-{
- return txq->tfds + trans_pcie->tfd_size * idx;
-}
-
static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans,
void *_tfd, u8 idx)
{
@@ -368,28 +339,17 @@ static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans,
static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd,
u8 idx, dma_addr_t addr, u16 len)
{
- if (trans->cfg->use_tfh) {
- struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
- struct iwl_tfh_tb *tb = &tfd_fh->tbs[idx];
+ struct iwl_tfd *tfd_fh = (void *)tfd;
+ struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx];
- put_unaligned_le64(addr, &tb->addr);
- tb->tb_len = cpu_to_le16(len);
+ u16 hi_n_len = len << 4;
- tfd_fh->num_tbs = cpu_to_le16(idx + 1);
- } else {
- struct iwl_tfd *tfd_fh = (void *)tfd;
- struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx];
-
- u16 hi_n_len = len << 4;
-
- put_unaligned_le32(addr, &tb->lo);
- if (sizeof(dma_addr_t) > sizeof(u32))
- hi_n_len |= ((addr >> 16) >> 16) & 0xF;
+ put_unaligned_le32(addr, &tb->lo);
+ hi_n_len |= iwl_get_dma_hi_addr(addr);
- tb->hi_n_len = cpu_to_le16(hi_n_len);
+ tb->hi_n_len = cpu_to_le16(hi_n_len);
- tfd_fh->num_tbs = idx + 1;
- }
+ tfd_fh->num_tbs = idx + 1;
}
static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd)
@@ -460,7 +420,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
* Does NOT advance any TFD circular buffer read/write indexes
* Does NOT free the TFD itself (which is within circular buffer)
*/
-static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
+void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
{
/* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
* idx is bounded by n_window
@@ -522,9 +482,8 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
return num_tbs;
}
-static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
- struct iwl_txq *txq, int slots_num,
- u32 txq_id)
+int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
+ int slots_num, bool cmd_queue)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX;
@@ -547,7 +506,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
if (!txq->entries)
goto error;
- if (txq_id == trans_pcie->cmd_queue)
+ if (cmd_queue)
for (i = 0; i < slots_num; i++) {
txq->entries[i].cmd =
kmalloc(sizeof(struct iwl_device_cmd),
@@ -573,13 +532,11 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
if (!txq->first_tb_bufs)
goto err_free_tfds;
- txq->id = txq_id;
-
return 0;
err_free_tfds:
dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
error:
- if (txq->entries && txq_id == trans_pcie->cmd_queue)
+ if (txq->entries && cmd_queue)
for (i = 0; i < slots_num; i++)
kfree(txq->entries[i].cmd);
kfree(txq->entries);
@@ -589,10 +546,9 @@ error:
}
-static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
- int slots_num, u32 txq_id)
+int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
+ int slots_num, bool cmd_queue)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
txq->need_update = false;
@@ -602,13 +558,13 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
/* Initialize queue's high/low-water marks, and head/tail indexes */
- ret = iwl_queue_init(txq, slots_num, txq_id);
+ ret = iwl_queue_init(txq, slots_num);
if (ret)
return ret;
spin_lock_init(&txq->lock);
- if (txq_id == trans_pcie->cmd_queue) {
+ if (cmd_queue) {
static struct lock_class_key iwl_pcie_cmd_queue_lock_class;
lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class);
@@ -616,18 +572,6 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
__skb_queue_head_init(&txq->overflow_q);
- /*
- * Tell nic where to find circular buffer of Tx Frame Descriptors for
- * given Tx queue, and enable the DMA channel used for that queue.
- * Circular buffer (TFD queue in DRAM) physical base address */
- if (trans->cfg->use_tfh)
- iwl_write_direct64(trans,
- FH_MEM_CBBC_QUEUE(trans, txq_id),
- txq->dma_addr);
- else
- iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
- txq->dma_addr >> 8);
-
return 0;
}
@@ -672,7 +616,7 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
spin_lock_bh(&txq->lock);
while (txq->write_ptr != txq->read_ptr) {
@@ -704,7 +648,6 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
}
}
- txq->active = false;
while (!skb_queue_empty(&txq->overflow_q)) {
struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
@@ -729,7 +672,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
struct device *dev = trans->dev;
int i;
@@ -780,9 +723,6 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
- if (trans->cfg->use_tfh)
- return;
-
trans_pcie->scd_base_addr =
iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
@@ -832,9 +772,16 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int txq_id;
+ /*
+ * we should never get here in gen2 trans mode return early to avoid
+ * having invalid accesses
+ */
+ if (WARN_ON_ONCE(trans->cfg->gen2))
+ return;
+
for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
txq_id++) {
- struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
if (trans->cfg->use_tfh)
iwl_write_direct64(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
@@ -914,7 +861,7 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans)
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
/* This can happen: start_hw, stop_device */
- if (!trans_pcie->txq)
+ if (!trans_pcie->txq_memory)
return 0;
/* Unmap DMA from host system and free skb's */
@@ -935,15 +882,20 @@ void iwl_pcie_tx_free(struct iwl_trans *trans)
int txq_id;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
/* Tx queues */
- if (trans_pcie->txq) {
+ if (trans_pcie->txq_memory) {
for (txq_id = 0;
- txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
+ txq_id < trans->cfg->base_params->num_of_queues;
+ txq_id++) {
iwl_pcie_txq_free(trans, txq_id);
+ trans_pcie->txq[txq_id] = NULL;
+ }
}
- kfree(trans_pcie->txq);
- trans_pcie->txq = NULL;
+ kfree(trans_pcie->txq_memory);
+ trans_pcie->txq_memory = NULL;
iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
@@ -965,7 +917,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
/*It is not allowed to alloc twice, so warn when this happens.
* We cannot rely on the previous allocation, so free and fail */
- if (WARN_ON(trans_pcie->txq)) {
+ if (WARN_ON(trans_pcie->txq_memory)) {
ret = -EINVAL;
goto error;
}
@@ -984,9 +936,9 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
goto error;
}
- trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
- sizeof(struct iwl_txq), GFP_KERNEL);
- if (!trans_pcie->txq) {
+ trans_pcie->txq_memory = kcalloc(trans->cfg->base_params->num_of_queues,
+ sizeof(struct iwl_txq), GFP_KERNEL);
+ if (!trans_pcie->txq_memory) {
IWL_ERR(trans, "Not enough memory for txq\n");
ret = -ENOMEM;
goto error;
@@ -995,14 +947,17 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
txq_id++) {
- slots_num = (txq_id == trans_pcie->cmd_queue) ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id],
- slots_num, txq_id);
+ bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
+
+ slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id];
+ ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id],
+ slots_num, cmd_queue);
if (ret) {
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
goto error;
}
+ trans_pcie->txq[txq_id]->id = txq_id;
}
return 0;
@@ -1012,6 +967,7 @@ error:
return ret;
}
+
int iwl_pcie_tx_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1019,7 +975,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
int txq_id, slots_num;
bool alloc = false;
- if (!trans_pcie->txq) {
+ if (!trans_pcie->txq_memory) {
ret = iwl_pcie_tx_alloc(trans);
if (ret)
goto error;
@@ -1040,22 +996,24 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
txq_id++) {
- slots_num = (txq_id == trans_pcie->cmd_queue) ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id],
- slots_num, txq_id);
+ bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
+
+ slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id],
+ slots_num, cmd_queue);
if (ret) {
IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
goto error;
}
- }
- if (trans->cfg->use_tfh) {
- iwl_write_direct32(trans, TFH_TRANSFER_MODE,
- TFH_TRANSFER_MAX_PENDING_REQ |
- TFH_CHUNK_SIZE_128 |
- TFH_CHUNK_SPLIT_MODE);
- return 0;
+ /*
+ * Tell nic where to find circular buffer of TFDs for a
+ * given Tx queue, and enable the DMA channel used for that
+ * queue.
+ * Circular buffer (TFD queue in DRAM) physical base address
+ */
+ iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
+ trans_pcie->txq[txq_id]->dma_addr >> 8);
}
iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
@@ -1100,7 +1058,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
int last_to_free;
@@ -1110,7 +1068,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
spin_lock_bh(&txq->lock);
- if (!txq->active) {
+ if (!test_bit(txq_id, trans_pcie->queue_used)) {
IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
txq_id, ssn);
goto out;
@@ -1257,7 +1215,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
unsigned long flags;
int nfreed = 0;
@@ -1324,15 +1282,12 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
unsigned int wdg_timeout)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
int fifo = -1;
if (test_and_set_bit(txq_id, trans_pcie->queue_used))
WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
- if (cfg && trans->cfg->use_tfh)
- WARN_ONCE(1, "Expected no calls to SCD configuration");
-
txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
if (cfg) {
@@ -1414,27 +1369,17 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
"Activate queue %d WrPtr: %d\n",
txq_id, ssn & 0xff);
}
-
- txq->active = true;
}
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
bool shared_mode)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
txq->ampdu = !shared_mode;
}
-dma_addr_t iwl_trans_pcie_get_txq_byte_table(struct iwl_trans *trans, int txq)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- return trans_pcie->scd_bc_tbls.dma +
- txq * sizeof(struct iwlagn_scd_bc_tbl);
-}
-
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
bool configure_scd)
{
@@ -1443,8 +1388,8 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
SCD_TX_STTS_QUEUE_OFFSET(txq_id);
static const u32 zero_val[4] = {};
- trans_pcie->txq[txq_id].frozen_expiry_remainder = 0;
- trans_pcie->txq[txq_id].frozen = false;
+ trans_pcie->txq[txq_id]->frozen_expiry_remainder = 0;
+ trans_pcie->txq[txq_id]->frozen = false;
/*
* Upon HW Rfkill - we stop the device, and then stop the queues
@@ -1458,9 +1403,6 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
return;
}
- if (configure_scd && trans->cfg->use_tfh)
- WARN_ONCE(1, "Expected no calls to SCD configuration");
-
if (configure_scd) {
iwl_scd_txq_set_inactive(trans, txq_id);
@@ -1469,7 +1411,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
}
iwl_pcie_txq_unmap(trans, txq_id);
- trans_pcie->txq[txq_id].ampdu = false;
+ trans_pcie->txq[txq_id]->ampdu = false;
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
}
@@ -1489,7 +1431,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
unsigned long flags;
@@ -1774,16 +1716,15 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
struct iwl_device_cmd *cmd;
struct iwl_cmd_meta *meta;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
/* If a Tx command is being handled and it isn't in the actual
* command queue then there a command routing bug has been introduced
* in the queue management code. */
if (WARN(txq_id != trans_pcie->cmd_queue,
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
- txq_id, trans_pcie->cmd_queue, sequence,
- trans_pcie->txq[trans_pcie->cmd_queue].read_ptr,
- trans_pcie->txq[trans_pcie->cmd_queue].write_ptr)) {
+ txq_id, trans_pcie->cmd_queue, sequence, txq->read_ptr,
+ txq->write_ptr)) {
iwl_print_hex_error(trans, pkt, 32);
return;
}
@@ -1867,6 +1808,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
int cmd_idx;
int ret;
@@ -1907,8 +1849,6 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
&trans->status),
HOST_COMPLETE_TIMEOUT);
if (!ret) {
- struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
-
IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
iwl_get_cmd_string(trans, cmd->id),
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
@@ -1959,8 +1899,7 @@ cancel:
* in later, it will possibly set an invalid
* address (cmd->meta.source).
*/
- trans_pcie->txq[trans_pcie->cmd_queue].
- entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
+ txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
}
if (cmd->resp_pkt) {
@@ -2314,7 +2253,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
u16 wifi_seq;
bool amsdu;
- txq = &trans_pcie->txq[txq_id];
+ txq = trans_pcie->txq[txq_id];
if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
"TX on unused queue %d\n", txq_id))