summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/hci_conn.c6
-rw-r--r--net/bluetooth/hci_core.c36
-rw-r--r--net/bluetooth/hci_event.c7
-rw-r--r--net/bluetooth/hci_sync.c77
-rw-r--r--net/bluetooth/l2cap_core.c1
-rw-r--r--net/bluetooth/mgmt.c148
-rw-r--r--net/bluetooth/smp.c151
-rw-r--r--net/bridge/br_fdb.c6
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/bridge/br_netfilter_hooks.c6
-rw-r--r--net/can/bcm.c4
-rw-r--r--net/core/dev.c27
-rw-r--r--net/core/link_watch.c4
-rw-r--r--net/core/net-sysfs.c4
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/pktgen.c4
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/dsa/tag.c5
-rw-r--r--net/dsa/tag.h135
-rw-r--r--net/dsa/tag_ocelot.c37
-rw-r--r--net/ethtool/cmis_fw_update.c8
-rw-r--r--net/ethtool/ioctl.c64
-rw-r--r--net/ethtool/rss.c8
-rw-r--r--net/ipv4/fou_core.c29
-rw-r--r--net/ipv4/netfilter/iptable_nat.c18
-rw-r--r--net/ipv4/tcp.c18
-rw-r--r--net/ipv4/tcp_ao.c43
-rw-r--r--net/ipv4/tcp_bpf.c2
-rw-r--r--net/ipv4/tcp_input.c5
-rw-r--r--net/ipv4/tcp_ipv4.c14
-rw-r--r--net/ipv4/tcp_offload.c3
-rw-r--r--net/ipv4/udp_offload.c11
-rw-r--r--net/ipv6/ila/ila.h1
-rw-r--r--net/ipv6/ila/ila_main.c6
-rw-r--r--net/ipv6/ila/ila_xlat.c13
-rw-r--r--net/ipv6/ip6_output.c10
-rw-r--r--net/ipv6/ip6_tunnel.c12
-rw-r--r--net/ipv6/ndisc.c34
-rw-r--r--net/ipv6/netfilter/ip6table_nat.c14
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c4
-rw-r--r--net/iucv/af_iucv.c4
-rw-r--r--net/iucv/iucv.c4
-rw-r--r--net/kcm/kcmsock.c4
-rw-r--r--net/l2tp/l2tp_core.c15
-rw-r--r--net/mac80211/cfg.c7
-rw-r--r--net/mac80211/mlme.c2
-rw-r--r--net/mac80211/tx.c9
-rw-r--r--net/mac80211/util.c2
-rw-r--r--net/mctp/test/route-test.c2
-rw-r--r--net/mptcp/diag.c2
-rw-r--r--net/mptcp/fastopen.c4
-rw-r--r--net/mptcp/mib.c2
-rw-r--r--net/mptcp/mib.h2
-rw-r--r--net/mptcp/options.c55
-rw-r--r--net/mptcp/pm.c57
-rw-r--r--net/mptcp/pm_netlink.c306
-rw-r--r--net/mptcp/pm_userspace.c18
-rw-r--r--net/mptcp/protocol.c83
-rw-r--r--net/mptcp/protocol.h16
-rw-r--r--net/mptcp/sched.c4
-rw-r--r--net/mptcp/sockopt.c4
-rw-r--r--net/mptcp/subflow.c80
-rw-r--r--net/netfilter/nf_flow_table_inet.c3
-rw-r--r--net/netfilter/nf_flow_table_ip.c3
-rw-r--r--net/netfilter/nf_flow_table_offload.c2
-rw-r--r--net/netfilter/nf_tables_api.c147
-rw-r--r--net/netfilter/nfnetlink.c5
-rw-r--r--net/netfilter/nfnetlink_queue.c35
-rw-r--r--net/netfilter/nft_counter.c9
-rw-r--r--net/openvswitch/datapath.c2
-rw-r--r--net/sched/act_ct.c4
-rw-r--r--net/sched/sch_cake.c11
-rw-r--r--net/sched/sch_fq.c4
-rw-r--r--net/sched/sch_netem.c56
-rw-r--r--net/sctp/input.c19
-rw-r--r--net/sctp/sm_statefuns.c22
-rw-r--r--net/smc/af_smc.c7
-rw-r--r--net/smc/smc.h3
-rw-r--r--net/smc/smc_inet.c8
-rw-r--r--net/smc/smc_stats.h2
-rw-r--r--net/socket.c4
-rw-r--r--net/sunrpc/svc.c2
-rw-r--r--net/sunrpc/xprtrdma/ib_client.c6
-rw-r--r--net/vmw_vsock/af_vsock.c50
-rw-r--r--net/vmw_vsock/vsock_bpf.c4
-rw-r--r--net/wireless/scan.c11
-rw-r--r--net/wireless/sme.c1
87 files changed, 1360 insertions, 725 deletions
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 8e48ccd2af30..c82502e213a8 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -2952,5 +2952,9 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason)
return 0;
}
- return hci_cmd_sync_queue_once(hdev, abort_conn_sync, conn, NULL);
+ /* Run immediately if on cmd_sync_work since this may be called
+ * as a result to MGMT_OP_DISCONNECT/MGMT_OP_UNPAIR which does
+ * already queue its callback on cmd_sync_work.
+ */
+ return hci_cmd_sync_run_once(hdev, abort_conn_sync, conn, NULL);
}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 8a4ebd93adfc..d6976db02c06 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -119,13 +119,6 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state)
case DISCOVERY_STARTING:
break;
case DISCOVERY_FINDING:
- /* If discovery was not started then it was initiated by the
- * MGMT interface so no MGMT event shall be generated either
- */
- if (old_state != DISCOVERY_STARTING) {
- hdev->discovery.state = old_state;
- return;
- }
mgmt_discovering(hdev, 1);
break;
case DISCOVERY_RESOLVING:
@@ -2413,10 +2406,16 @@ static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
/* To avoid a potential race with hci_unregister_dev. */
hci_dev_hold(hdev);
- if (action == PM_SUSPEND_PREPARE)
+ switch (action) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
ret = hci_suspend_dev(hdev);
- else if (action == PM_POST_SUSPEND)
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
ret = hci_resume_dev(hdev);
+ break;
+ }
if (ret)
bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
@@ -3671,19 +3670,19 @@ static void hci_sched_le(struct hci_dev *hdev)
{
struct hci_chan *chan;
struct sk_buff *skb;
- int quote, cnt, tmp;
+ int quote, *cnt, tmp;
BT_DBG("%s", hdev->name);
if (!hci_conn_num(hdev, LE_LINK))
return;
- cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
+ cnt = hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
- __check_timeout(hdev, cnt, LE_LINK);
+ __check_timeout(hdev, *cnt, LE_LINK);
- tmp = cnt;
- while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
+ tmp = *cnt;
+ while (*cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
u32 priority = (skb_peek(&chan->data_q))->priority;
while (quote-- && (skb = skb_peek(&chan->data_q))) {
BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
@@ -3698,7 +3697,7 @@ static void hci_sched_le(struct hci_dev *hdev)
hci_send_frame(hdev, skb);
hdev->le_last_tx = jiffies;
- cnt--;
+ (*cnt)--;
chan->sent++;
chan->conn->sent++;
@@ -3708,12 +3707,7 @@ static void hci_sched_le(struct hci_dev *hdev)
}
}
- if (hdev->le_pkts)
- hdev->le_cnt = cnt;
- else
- hdev->acl_cnt = cnt;
-
- if (cnt != tmp)
+ if (*cnt != tmp)
hci_prio_recalculate(hdev, LE_LINK);
}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index dce8035ca799..1c82dcdf6e8f 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1721,9 +1721,10 @@ static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
switch (enable) {
case LE_SCAN_ENABLE:
hci_dev_set_flag(hdev, HCI_LE_SCAN);
- if (hdev->le_scan_type == LE_SCAN_ACTIVE)
+ if (hdev->le_scan_type == LE_SCAN_ACTIVE) {
clear_pending_adv_report(hdev);
- hci_discovery_set_state(hdev, DISCOVERY_FINDING);
+ hci_discovery_set_state(hdev, DISCOVERY_FINDING);
+ }
break;
case LE_SCAN_DISABLE:
@@ -5919,7 +5920,7 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
* while we have an existing one in peripheral role.
*/
if (hdev->conn_hash.le_num_peripheral > 0 &&
- (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
+ (test_bit(HCI_QUIRK_BROKEN_LE_STATES, &hdev->quirks) ||
!(hdev->le_states[3] & 0x10)))
return NULL;
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index cd2ed16da8a4..5533e6f561b3 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -112,7 +112,7 @@ static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen,
skb_queue_tail(&req->cmd_q, skb);
}
-static int hci_cmd_sync_run(struct hci_request *req)
+static int hci_req_sync_run(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
struct sk_buff *skb;
@@ -169,7 +169,7 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
hdev->req_status = HCI_REQ_PEND;
- err = hci_cmd_sync_run(&req);
+ err = hci_req_sync_run(&req);
if (err < 0)
return ERR_PTR(err);
@@ -782,6 +782,44 @@ int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
}
EXPORT_SYMBOL(hci_cmd_sync_queue_once);
+/* Run HCI command:
+ *
+ * - hdev must be running
+ * - if on cmd_sync_work then run immediately otherwise queue
+ */
+int hci_cmd_sync_run(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ /* Only queue command if hdev is running which means it had been opened
+ * and is either on init phase or is already up.
+ */
+ if (!test_bit(HCI_RUNNING, &hdev->flags))
+ return -ENETDOWN;
+
+ /* If on cmd_sync_work then run immediately otherwise queue */
+ if (current_work() == &hdev->cmd_sync_work)
+ return func(hdev, data);
+
+ return hci_cmd_sync_submit(hdev, func, data, destroy);
+}
+EXPORT_SYMBOL(hci_cmd_sync_run);
+
+/* Run HCI command entry once:
+ *
+ * - Lookup if an entry already exist and only if it doesn't creates a new entry
+ * and run it.
+ * - if on cmd_sync_work then run immediately otherwise queue
+ */
+int hci_cmd_sync_run_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy))
+ return 0;
+
+ return hci_cmd_sync_run(hdev, func, data, destroy);
+}
+EXPORT_SYMBOL(hci_cmd_sync_run_once);
+
/* Lookup HCI command entry:
*
* - Return first entry that matches by function callback or data or
@@ -2976,6 +3014,27 @@ static int hci_passive_scan_sync(struct hci_dev *hdev)
*/
filter_policy = hci_update_accept_list_sync(hdev);
+ /* If suspended and filter_policy set to 0x00 (no acceptlist) then
+ * passive scanning cannot be started since that would require the host
+ * to be woken up to process the reports.
+ */
+ if (hdev->suspended && !filter_policy) {
+ /* Check if accept list is empty then there is no need to scan
+ * while suspended.
+ */
+ if (list_empty(&hdev->le_accept_list))
+ return 0;
+
+ /* If there are devices is the accept_list that means some
+ * devices could not be programmed which in non-suspended case
+ * means filter_policy needs to be set to 0x00 so the host needs
+ * to filter, but since this is treating suspended case we
+ * can ignore device needing host to filter to allow devices in
+ * the acceptlist to be able to wakeup the system.
+ */
+ filter_policy = 0x01;
+ }
+
/* When the controller is using random resolvable addresses and
* with that having LE privacy enabled, then controllers with
* Extended Scanner Filter Policies support can now enable support
@@ -2998,6 +3057,20 @@ static int hci_passive_scan_sync(struct hci_dev *hdev)
} else if (hci_is_adv_monitoring(hdev)) {
window = hdev->le_scan_window_adv_monitor;
interval = hdev->le_scan_int_adv_monitor;
+
+ /* Disable duplicates filter when scanning for advertisement
+ * monitor for the following reasons.
+ *
+ * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
+ * controllers ignore RSSI_Sampling_Period when the duplicates
+ * filter is enabled.
+ *
+ * For SW pattern filtering, when we're not doing interleaved
+ * scanning, it is necessary to disable duplicates filter,
+ * otherwise hosts can only receive one advertisement and it's
+ * impossible to know if a peer is still in range.
+ */
+ filter_dups = LE_SCAN_FILTER_DUP_DISABLE;
} else {
window = hdev->le_scan_window;
interval = hdev->le_scan_interval;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index c3c26bbb5dda..9988ba382b68 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -6774,6 +6774,7 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
bt_cb(skb)->l2cap.psm = psm;
if (!chan->ops->recv(chan, skb)) {
+ l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
return;
}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 40d4887c7f79..279902e8bd8a 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -2830,16 +2830,6 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
key_count);
- for (i = 0; i < key_count; i++) {
- struct mgmt_link_key_info *key = &cp->keys[i];
-
- /* Considering SMP over BREDR/LE, there is no need to check addr_type */
- if (key->type > 0x08)
- return mgmt_cmd_status(sk, hdev->id,
- MGMT_OP_LOAD_LINK_KEYS,
- MGMT_STATUS_INVALID_PARAMS);
- }
-
hci_dev_lock(hdev);
hci_link_keys_clear(hdev);
@@ -2864,6 +2854,19 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
continue;
}
+ if (key->addr.type != BDADDR_BREDR) {
+ bt_dev_warn(hdev,
+ "Invalid link address type %u for %pMR",
+ key->addr.type, &key->addr.bdaddr);
+ continue;
+ }
+
+ if (key->type > 0x08) {
+ bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
+ key->type, &key->addr.bdaddr);
+ continue;
+ }
+
/* Always ignore debug keys and require a new pairing if
* the user wants to use them.
*/
@@ -2921,7 +2924,12 @@ static int unpair_device_sync(struct hci_dev *hdev, void *data)
if (!conn)
return 0;
- return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
+ /* Disregard any possible error since the likes of hci_abort_conn_sync
+ * will clean up the connection no matter the error.
+ */
+ hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
+
+ return 0;
}
static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -3053,13 +3061,44 @@ unlock:
return err;
}
+static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
+{
+ struct mgmt_pending_cmd *cmd = data;
+
+ cmd->cmd_complete(cmd, mgmt_status(err));
+ mgmt_pending_free(cmd);
+}
+
+static int disconnect_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_disconnect *cp = cmd->param;
+ struct hci_conn *conn;
+
+ if (cp->addr.type == BDADDR_BREDR)
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
+ &cp->addr.bdaddr);
+ else
+ conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
+ le_addr_type(cp->addr.type));
+
+ if (!conn)
+ return -ENOTCONN;
+
+ /* Disregard any possible error since the likes of hci_abort_conn_sync
+ * will clean up the connection no matter the error.
+ */
+ hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
+
+ return 0;
+}
+
static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
struct mgmt_cp_disconnect *cp = data;
struct mgmt_rp_disconnect rp;
struct mgmt_pending_cmd *cmd;
- struct hci_conn *conn;
int err;
bt_dev_dbg(hdev, "sock %p", sk);
@@ -3082,27 +3121,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
- if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
- err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
- MGMT_STATUS_BUSY, &rp, sizeof(rp));
- goto failed;
- }
-
- if (cp->addr.type == BDADDR_BREDR)
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
- &cp->addr.bdaddr);
- else
- conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
- le_addr_type(cp->addr.type));
-
- if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
- err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
- MGMT_STATUS_NOT_CONNECTED, &rp,
- sizeof(rp));
- goto failed;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
+ cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -3110,9 +3129,10 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
cmd->cmd_complete = generic_cmd_complete;
- err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
+ err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
+ disconnect_complete);
if (err < 0)
- mgmt_pending_remove(cmd);
+ mgmt_pending_free(cmd);
failed:
hci_dev_unlock(hdev);
@@ -3456,6 +3476,10 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
* will be kept and this function does nothing.
*/
p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
+ if (!p) {
+ err = -EIO;
+ goto unlock;
+ }
if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
p->auto_connect = HCI_AUTO_CONN_DISABLED;
@@ -7068,7 +7092,6 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
for (i = 0; i < irk_count; i++) {
struct mgmt_irk_info *irk = &cp->irks[i];
- u8 addr_type = le_addr_type(irk->addr.type);
if (hci_is_blocked_key(hdev,
HCI_BLOCKED_KEY_TYPE_IRK,
@@ -7078,12 +7101,8 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
continue;
}
- /* When using SMP over BR/EDR, the addr type should be set to BREDR */
- if (irk->addr.type == BDADDR_BREDR)
- addr_type = BDADDR_BREDR;
-
hci_add_irk(hdev, &irk->addr.bdaddr,
- addr_type, irk->val,
+ le_addr_type(irk->addr.type), irk->val,
BDADDR_ANY);
}
@@ -7148,15 +7167,6 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
bt_dev_dbg(hdev, "key_count %u", key_count);
- for (i = 0; i < key_count; i++) {
- struct mgmt_ltk_info *key = &cp->keys[i];
-
- if (!ltk_is_valid(key))
- return mgmt_cmd_status(sk, hdev->id,
- MGMT_OP_LOAD_LONG_TERM_KEYS,
- MGMT_STATUS_INVALID_PARAMS);
- }
-
hci_dev_lock(hdev);
hci_smp_ltks_clear(hdev);
@@ -7164,7 +7174,6 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
for (i = 0; i < key_count; i++) {
struct mgmt_ltk_info *key = &cp->keys[i];
u8 type, authenticated;
- u8 addr_type = le_addr_type(key->addr.type);
if (hci_is_blocked_key(hdev,
HCI_BLOCKED_KEY_TYPE_LTK,
@@ -7174,6 +7183,12 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
continue;
}
+ if (!ltk_is_valid(key)) {
+ bt_dev_warn(hdev, "Invalid LTK for %pMR",
+ &key->addr.bdaddr);
+ continue;
+ }
+
switch (key->type) {
case MGMT_LTK_UNAUTHENTICATED:
authenticated = 0x00;
@@ -7199,12 +7214,8 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
continue;
}
- /* When using SMP over BR/EDR, the addr type should be set to BREDR */
- if (key->addr.type == BDADDR_BREDR)
- addr_type = BDADDR_BREDR;
-
hci_add_ltk(hdev, &key->addr.bdaddr,
- addr_type, type, authenticated,
+ le_addr_type(key->addr.type), type, authenticated,
key->val, key->enc_size, key->ediv, key->rand);
}
@@ -9498,7 +9509,7 @@ void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
ev.store_hint = persistent;
bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
- ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
+ ev.key.addr.type = BDADDR_BREDR;
ev.key.type = key->type;
memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
ev.key.pin_len = key->pin_len;
@@ -9549,7 +9560,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
ev.store_hint = persistent;
bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
- ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
+ ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
ev.key.type = mgmt_ltk_type(key);
ev.key.enc_size = key->enc_size;
ev.key.ediv = key->ediv;
@@ -9578,7 +9589,7 @@ void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
bacpy(&ev.rpa, &irk->rpa);
bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
- ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
+ ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
memcpy(ev.irk.val, irk->val, sizeof(irk->val));
mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
@@ -9607,7 +9618,7 @@ void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
ev.store_hint = persistent;
bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
- ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
+ ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
ev.key.type = csrk->type;
memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
@@ -9685,18 +9696,6 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
mgmt_event_skb(skb, NULL);
}
-static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
-{
- struct sock **sk = data;
-
- cmd->cmd_complete(cmd, 0);
-
- *sk = cmd->sk;
- sock_hold(*sk);
-
- mgmt_pending_remove(cmd);
-}
-
static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
{
struct hci_dev *hdev = data;
@@ -9740,8 +9739,6 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
if (link_type != ACL_LINK && link_type != LE_LINK)
return;
- mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
-
bacpy(&ev.addr.bdaddr, bdaddr);
ev.addr.type = link_to_bdaddr(link_type, addr_type);
ev.reason = reason;
@@ -9754,9 +9751,6 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
if (sk)
sock_put(sk);
-
- mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
- hdev);
}
void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 1e7ea3a4b7ef..8b9724fd752a 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -914,7 +914,7 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
* Confirms and the responder Enters the passkey.
*/
if (smp->method == OVERLAP) {
- if (hcon->role == HCI_ROLE_MASTER)
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags))
smp->method = CFM_PASSKEY;
else
smp->method = REQ_PASSKEY;
@@ -964,7 +964,7 @@ static u8 smp_confirm(struct smp_chan *smp)
smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
- if (conn->hcon->out)
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags))
SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM);
else
SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM);
@@ -980,7 +980,8 @@ static u8 smp_random(struct smp_chan *smp)
int ret;
bt_dev_dbg(conn->hcon->hdev, "conn %p %s", conn,
- conn->hcon->out ? "initiator" : "responder");
+ test_bit(SMP_FLAG_INITIATOR, &smp->flags) ? "initiator" :
+ "responder");
ret = smp_c1(smp->tk, smp->rrnd, smp->preq, smp->prsp,
hcon->init_addr_type, &hcon->init_addr,
@@ -994,7 +995,7 @@ static u8 smp_random(struct smp_chan *smp)
return SMP_CONFIRM_FAILED;
}
- if (hcon->out) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
u8 stk[16];
__le64 rand = 0;
__le16 ediv = 0;
@@ -1059,7 +1060,6 @@ static void smp_notify_keys(struct l2cap_conn *conn)
}
if (smp->remote_irk) {
- smp->remote_irk->link_type = hcon->type;
mgmt_new_irk(hdev, smp->remote_irk, persistent);
/* Now that user space can be considered to know the
@@ -1079,28 +1079,24 @@ static void smp_notify_keys(struct l2cap_conn *conn)
}
if (smp->csrk) {
- smp->csrk->link_type = hcon->type;
smp->csrk->bdaddr_type = hcon->dst_type;
bacpy(&smp->csrk->bdaddr, &hcon->dst);
mgmt_new_csrk(hdev, smp->csrk, persistent);
}
if (smp->responder_csrk) {
- smp->responder_csrk->link_type = hcon->type;
smp->responder_csrk->bdaddr_type = hcon->dst_type;
bacpy(&smp->responder_csrk->bdaddr, &hcon->dst);
mgmt_new_csrk(hdev, smp->responder_csrk, persistent);
}
if (smp->ltk) {
- smp->ltk->link_type = hcon->type;
smp->ltk->bdaddr_type = hcon->dst_type;
bacpy(&smp->ltk->bdaddr, &hcon->dst);
mgmt_new_ltk(hdev, smp->ltk, persistent);
}
if (smp->responder_ltk) {
- smp->responder_ltk->link_type = hcon->type;
smp->responder_ltk->bdaddr_type = hcon->dst_type;
bacpy(&smp->responder_ltk->bdaddr, &hcon->dst);
mgmt_new_ltk(hdev, smp->responder_ltk, persistent);
@@ -1120,8 +1116,6 @@ static void smp_notify_keys(struct l2cap_conn *conn)
key = hci_add_link_key(hdev, smp->conn->hcon, &hcon->dst,
smp->link_key, type, 0, &persistent);
if (key) {
- key->link_type = hcon->type;
- key->bdaddr_type = hcon->dst_type;
mgmt_new_link_key(hdev, key, persistent);
/* Don't keep debug keys around if the relevant
@@ -1256,14 +1250,15 @@ static void smp_distribute_keys(struct smp_chan *smp)
rsp = (void *) &smp->prsp[1];
/* The responder sends its keys first */
- if (hcon->out && (smp->remote_key_dist & KEY_DIST_MASK)) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags) &&
+ (smp->remote_key_dist & KEY_DIST_MASK)) {
smp_allow_key_dist(smp);
return;
}
req = (void *) &smp->preq[1];
- if (hcon->out) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
keydist = &rsp->init_key_dist;
*keydist &= req->init_key_dist;
} else {
@@ -1432,7 +1427,7 @@ static int sc_mackey_and_ltk(struct smp_chan *smp, u8 mackey[16], u8 ltk[16])
struct hci_conn *hcon = smp->conn->hcon;
u8 *na, *nb, a[7], b[7];
- if (hcon->out) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
na = smp->prnd;
nb = smp->rrnd;
} else {
@@ -1460,7 +1455,7 @@ static void sc_dhkey_check(struct smp_chan *smp)
a[6] = hcon->init_addr_type;
b[6] = hcon->resp_addr_type;
- if (hcon->out) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
local_addr = a;
remote_addr = b;
memcpy(io_cap, &smp->preq[1], 3);
@@ -1539,7 +1534,7 @@ static u8 sc_passkey_round(struct smp_chan *smp, u8 smp_op)
/* The round is only complete when the initiator
* receives pairing random.
*/
- if (!hcon->out) {
+ if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM,
sizeof(smp->prnd), smp->prnd);
if (smp->passkey_round == 20)
@@ -1567,7 +1562,7 @@ static u8 sc_passkey_round(struct smp_chan *smp, u8 smp_op)
SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM);
- if (hcon->out) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM,
sizeof(smp->prnd), smp->prnd);
return 0;
@@ -1578,7 +1573,7 @@ static u8 sc_passkey_round(struct smp_chan *smp, u8 smp_op)
case SMP_CMD_PUBLIC_KEY:
default:
/* Initiating device starts the round */
- if (!hcon->out)
+ if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags))
return 0;
bt_dev_dbg(hdev, "Starting passkey round %u",
@@ -1623,7 +1618,7 @@ static int sc_user_reply(struct smp_chan *smp, u16 mgmt_op, __le32 passkey)
}
/* Initiator sends DHKey check first */
- if (hcon->out) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
sc_dhkey_check(smp);
SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK);
} else if (test_and_clear_bit(SMP_FLAG_DHKEY_PENDING, &smp->flags)) {
@@ -1746,7 +1741,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
struct smp_cmd_pairing rsp, *req = (void *) skb->data;
struct l2cap_chan *chan = conn->smp;
struct hci_dev *hdev = conn->hcon->hdev;
- struct smp_chan *smp;
+ struct smp_chan *smp = chan->data;
u8 key_size, auth, sec_level;
int ret;
@@ -1755,16 +1750,14 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
if (skb->len < sizeof(*req))
return SMP_INVALID_PARAMS;
- if (conn->hcon->role != HCI_ROLE_SLAVE)
+ if (smp && test_bit(SMP_FLAG_INITIATOR, &smp->flags))
return SMP_CMD_NOTSUPP;
- if (!chan->data)
+ if (!smp) {
smp = smp_chan_create(conn);
- else
- smp = chan->data;
-
- if (!smp)
- return SMP_UNSPECIFIED;
+ if (!smp)
+ return SMP_UNSPECIFIED;
+ }
/* We didn't start the pairing, so match remote */
auth = req->auth_req & AUTH_REQ_MASK(hdev);
@@ -1946,7 +1939,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
if (skb->len < sizeof(*rsp))
return SMP_INVALID_PARAMS;
- if (conn->hcon->role != HCI_ROLE_MASTER)
+ if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags))
return SMP_CMD_NOTSUPP;
skb_pull(skb, sizeof(*rsp));
@@ -2041,7 +2034,7 @@ static u8 sc_check_confirm(struct smp_chan *smp)
if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY)
return sc_passkey_round(smp, SMP_CMD_PAIRING_CONFIRM);
- if (conn->hcon->out) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
smp->prnd);
SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM);
@@ -2063,7 +2056,7 @@ static int fixup_sc_false_positive(struct smp_chan *smp)
u8 auth;
/* The issue is only observed when we're in responder role */
- if (hcon->out)
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags))
return SMP_UNSPECIFIED;
if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
@@ -2099,7 +2092,8 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
struct hci_dev *hdev = hcon->hdev;
bt_dev_dbg(hdev, "conn %p %s", conn,
- hcon->out ? "initiator" : "responder");
+ test_bit(SMP_FLAG_INITIATOR, &smp->flags) ? "initiator" :
+ "responder");
if (skb->len < sizeof(smp->pcnf))
return SMP_INVALID_PARAMS;
@@ -2121,7 +2115,7 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
return ret;
}
- if (conn->hcon->out) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
smp->prnd);
SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM);
@@ -2156,7 +2150,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
if (!test_bit(SMP_FLAG_SC, &smp->flags))
return smp_random(smp);
- if (hcon->out) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
pkax = smp->local_pk;
pkbx = smp->remote_pk;
na = smp->prnd;
@@ -2169,7 +2163,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
}
if (smp->method == REQ_OOB) {
- if (!hcon->out)
+ if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags))
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM,
sizeof(smp->prnd), smp->prnd);
SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK);
@@ -2180,7 +2174,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY)
return sc_passkey_round(smp, SMP_CMD_PAIRING_RANDOM);
- if (hcon->out) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
u8 cfm[16];
err = smp_f4(smp->tfm_cmac, smp->remote_pk, smp->local_pk,
@@ -2221,7 +2215,7 @@ mackey_and_ltk:
return SMP_UNSPECIFIED;
if (smp->method == REQ_OOB) {
- if (hcon->out) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
sc_dhkey_check(smp);
SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK);
}
@@ -2295,10 +2289,27 @@ bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level,
return false;
}
+static void smp_send_pairing_req(struct smp_chan *smp, __u8 auth)
+{
+ struct smp_cmd_pairing cp;
+
+ if (smp->conn->hcon->type == ACL_LINK)
+ build_bredr_pairing_cmd(smp, &cp, NULL);
+ else
+ build_pairing_cmd(smp->conn, &cp, NULL, auth);
+
+ smp->preq[0] = SMP_CMD_PAIRING_REQ;
+ memcpy(&smp->preq[1], &cp, sizeof(cp));
+
+ smp_send_cmd(smp->conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP);
+
+ set_bit(SMP_FLAG_INITIATOR, &smp->flags);
+}
+
static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_cmd_security_req *rp = (void *) skb->data;
- struct smp_cmd_pairing cp;
struct hci_conn *hcon = conn->hcon;
struct hci_dev *hdev = hcon->hdev;
struct smp_chan *smp;
@@ -2347,16 +2358,20 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
skb_pull(skb, sizeof(*rp));
- memset(&cp, 0, sizeof(cp));
- build_pairing_cmd(conn, &cp, NULL, auth);
+ smp_send_pairing_req(smp, auth);
- smp->preq[0] = SMP_CMD_PAIRING_REQ;
- memcpy(&smp->preq[1], &cp, sizeof(cp));
+ return 0;
+}
- smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
- SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP);
+static void smp_send_security_req(struct smp_chan *smp, __u8 auth)
+{
+ struct smp_cmd_security_req cp;
- return 0;
+ cp.auth_req = auth;
+ smp_send_cmd(smp->conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp);
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_REQ);
+
+ clear_bit(SMP_FLAG_INITIATOR, &smp->flags);
}
int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
@@ -2427,23 +2442,11 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
authreq |= SMP_AUTH_MITM;
}
- if (hcon->role == HCI_ROLE_MASTER) {
- struct smp_cmd_pairing cp;
-
- build_pairing_cmd(conn, &cp, NULL, authreq);
- smp->preq[0] = SMP_CMD_PAIRING_REQ;
- memcpy(&smp->preq[1], &cp, sizeof(cp));
-
- smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
- SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP);
- } else {
- struct smp_cmd_security_req cp;
- cp.auth_req = authreq;
- smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp);
- SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_REQ);
- }
+ if (hcon->role == HCI_ROLE_MASTER)
+ smp_send_pairing_req(smp, authreq);
+ else
+ smp_send_security_req(smp, authreq);
- set_bit(SMP_FLAG_INITIATOR, &smp->flags);
ret = 0;
unlock:
@@ -2694,8 +2697,6 @@ static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb)
static u8 sc_select_method(struct smp_chan *smp)
{
- struct l2cap_conn *conn = smp->conn;
- struct hci_conn *hcon = conn->hcon;
struct smp_cmd_pairing *local, *remote;
u8 local_mitm, remote_mitm, local_io, remote_io, method;
@@ -2708,7 +2709,7 @@ static u8 sc_select_method(struct smp_chan *smp)
* the "struct smp_cmd_pairing" from them we need to skip the
* first byte which contains the opcode.
*/
- if (hcon->out) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
local = (void *) &smp->preq[1];
remote = (void *) &smp->prsp[1];
} else {
@@ -2777,7 +2778,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
/* Non-initiating device sends its public key after receiving
* the key from the initiating device.
*/
- if (!hcon->out) {
+ if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
err = sc_send_public_key(smp);
if (err)
return err;
@@ -2839,7 +2840,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
}
if (smp->method == REQ_OOB) {
- if (hcon->out)
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags))
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM,
sizeof(smp->prnd), smp->prnd);
@@ -2848,7 +2849,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
return 0;
}
- if (hcon->out)
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags))
SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM);
if (smp->method == REQ_PASSKEY) {
@@ -2863,7 +2864,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
/* The Initiating device waits for the non-initiating device to
* send the confirm value.
*/
- if (conn->hcon->out)
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags))
return 0;
err = smp_f4(smp->tfm_cmac, smp->local_pk, smp->remote_pk, smp->prnd,
@@ -2897,7 +2898,7 @@ static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb)
a[6] = hcon->init_addr_type;
b[6] = hcon->resp_addr_type;
- if (hcon->out) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
local_addr = a;
remote_addr = b;
memcpy(io_cap, &smp->prsp[1], 3);
@@ -2922,7 +2923,7 @@ static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb)
if (crypto_memneq(check->e, e, 16))
return SMP_DHKEY_CHECK_FAILED;
- if (!hcon->out) {
+ if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
if (test_bit(SMP_FLAG_WAIT_USER, &smp->flags)) {
set_bit(SMP_FLAG_DHKEY_PENDING, &smp->flags);
return 0;
@@ -2934,7 +2935,7 @@ static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb)
sc_add_ltk(smp);
- if (hcon->out) {
+ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
hci_le_start_enc(hcon, 0, 0, smp->tk, smp->enc_key_size);
hcon->enc_key_size = smp->enc_key_size;
}
@@ -3083,7 +3084,6 @@ static void bredr_pairing(struct l2cap_chan *chan)
struct l2cap_conn *conn = chan->conn;
struct hci_conn *hcon = conn->hcon;
struct hci_dev *hdev = hcon->hdev;
- struct smp_cmd_pairing req;
struct smp_chan *smp;
bt_dev_dbg(hdev, "chan %p", chan);
@@ -3135,14 +3135,7 @@ static void bredr_pairing(struct l2cap_chan *chan)
bt_dev_dbg(hdev, "starting SMP over BR/EDR");
- /* Prepare and send the BR/EDR SMP Pairing Request */
- build_bredr_pairing_cmd(smp, &req, NULL);
-
- smp->preq[0] = SMP_CMD_PAIRING_REQ;
- memcpy(&smp->preq[1], &req, sizeof(req));
-
- smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(req), &req);
- SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP);
+ smp_send_pairing_req(smp, 0x00);
}
static void smp_resume_cb(struct l2cap_chan *chan)
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index c77591e63841..ad7a42b505ef 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -1469,12 +1469,10 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
modified = true;
}
- if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
+ if (test_and_set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
/* Refresh entry */
fdb->used = jiffies;
- } else if (!test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags)) {
- /* Take over SW learned entry */
- set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags);
+ } else {
modified = true;
}
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 9a1cb5079a7a..b2ae0d2434d2 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -2045,16 +2045,14 @@ void br_multicast_del_port(struct net_bridge_port *port)
{
struct net_bridge *br = port->br;
struct net_bridge_port_group *pg;
- HLIST_HEAD(deleted_head);
struct hlist_node *n;
/* Take care of the remaining groups, only perm ones should be left */
spin_lock_bh(&br->multicast_lock);
hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
br_multicast_find_del_pg(br, pg);
- hlist_move_list(&br->mcast_gc_list, &deleted_head);
spin_unlock_bh(&br->multicast_lock);
- br_multicast_gc(&deleted_head);
+ flush_work(&br->mcast_gc_work);
br_multicast_port_ctx_deinit(&port->multicast_ctx);
free_percpu(port->mcast_stats);
}
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 09f6a773a708..8f9c19d992ac 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -622,8 +622,12 @@ static unsigned int br_nf_local_in(void *priv,
if (likely(nf_ct_is_confirmed(ct)))
return NF_ACCEPT;
+ if (WARN_ON_ONCE(refcount_read(&nfct->use) != 1)) {
+ nf_reset_ct(skb);
+ return NF_ACCEPT;
+ }
+
WARN_ON_ONCE(skb_shared(skb));
- WARN_ON_ONCE(refcount_read(&nfct->use) != 1);
/* We can't call nf_confirm here, it would create a dependency
* on nf_conntrack module.
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 27d5fcf0eac9..46d3ec3aa44b 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1470,6 +1470,10 @@ static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
/* remove device reference, if this is our bound device */
if (bo->bound && bo->ifindex == dev->ifindex) {
+#if IS_ENABLED(CONFIG_PROC_FS)
+ if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read)
+ remove_proc_entry(bo->procname, sock_net(sk)->can.bcmproc_dir);
+#endif
bo->bound = 0;
bo->ifindex = 0;
notify_enodev = 1;
diff --git a/net/core/dev.c b/net/core/dev.c
index 6ea1d20676fb..f66e61407883 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5150,6 +5150,7 @@ int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb)
bpf_net_ctx_clear(bpf_net_ctx);
return XDP_DROP;
}
+ bpf_net_ctx_clear(bpf_net_ctx);
}
return XDP_PASS;
out_redir:
@@ -9911,6 +9912,15 @@ static void netdev_sync_lower_features(struct net_device *upper,
}
}
+static bool netdev_has_ip_or_hw_csum(netdev_features_t features)
+{
+ netdev_features_t ip_csum_mask = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ bool ip_csum = (features & ip_csum_mask) == ip_csum_mask;
+ bool hw_csum = features & NETIF_F_HW_CSUM;
+
+ return ip_csum || hw_csum;
+}
+
static netdev_features_t netdev_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -9992,15 +10002,9 @@ static netdev_features_t netdev_fix_features(struct net_device *dev,
features &= ~NETIF_F_LRO;
}
- if (features & NETIF_F_HW_TLS_TX) {
- bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) ==
- (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- bool hw_csum = features & NETIF_F_HW_CSUM;
-
- if (!ip_csum && !hw_csum) {
- netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
- features &= ~NETIF_F_HW_TLS_TX;
- }
+ if ((features & NETIF_F_HW_TLS_TX) && !netdev_has_ip_or_hw_csum(features)) {
+ netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
+ features &= ~NETIF_F_HW_TLS_TX;
}
if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
@@ -10008,6 +10012,11 @@ static netdev_features_t netdev_fix_features(struct net_device *dev,
features &= ~NETIF_F_HW_TLS_RX;
}
+ if ((features & NETIF_F_GSO_UDP_L4) && !netdev_has_ip_or_hw_csum(features)) {
+ netdev_dbg(dev, "Dropping USO feature since no CSUM feature.\n");
+ features &= ~NETIF_F_GSO_UDP_L4;
+ }
+
return features;
}
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 8ec35194bfcb..ab150641142a 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -148,9 +148,9 @@ static void linkwatch_schedule_work(int urgent)
* override the existing timer.
*/
if (test_bit(LW_URGENT, &linkwatch_flags))
- mod_delayed_work(system_wq, &linkwatch_work, 0);
+ mod_delayed_work(system_unbound_wq, &linkwatch_work, 0);
else
- schedule_delayed_work(&linkwatch_work, delay);
+ queue_delayed_work(system_unbound_wq, &linkwatch_work, delay);
}
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 0e2084ce7b75..291fdf4a328b 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -235,7 +235,7 @@ static ssize_t speed_show(struct device *dev,
if (!rtnl_trylock())
return restart_syscall();
- if (netif_running(netdev) && netif_device_present(netdev)) {
+ if (netif_running(netdev)) {
struct ethtool_link_ksettings cmd;
if (!__ethtool_get_link_ksettings(netdev, &cmd))
@@ -1524,7 +1524,7 @@ static const struct attribute_group dql_group = {
};
#else
/* Fake declaration, all the code using it should be dead */
-extern const struct attribute_group dql_group;
+static const struct attribute_group dql_group = {};
#endif /* CONFIG_BQL */
#ifdef CONFIG_XPS
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 55bcacf67df3..d657b042d5a0 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -228,7 +228,6 @@ void netpoll_poll_disable(struct net_device *dev)
down(&ni->dev_lock);
srcu_read_unlock(&netpoll_srcu, idx);
}
-EXPORT_SYMBOL(netpoll_poll_disable);
void netpoll_poll_enable(struct net_device *dev)
{
@@ -239,7 +238,6 @@ void netpoll_poll_enable(struct net_device *dev)
up(&ni->dev_lock);
rcu_read_unlock();
}
-EXPORT_SYMBOL(netpoll_poll_enable);
static void refill_skbs(void)
{
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index ea55a758a475..197a50ef8e2e 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3654,7 +3654,7 @@ static int pktgen_thread_worker(void *arg)
struct pktgen_dev *pkt_dev = NULL;
int cpu = t->cpu;
- WARN_ON(smp_processor_id() != cpu);
+ WARN_ON_ONCE(smp_processor_id() != cpu);
init_waitqueue_head(&t->queue);
complete(&t->start_done);
@@ -3989,6 +3989,7 @@ static int __net_init pg_net_init(struct net *net)
goto remove;
}
+ cpus_read_lock();
for_each_online_cpu(cpu) {
int err;
@@ -3997,6 +3998,7 @@ static int __net_init pg_net_init(struct net *net)
pr_warn("Cannot create thread for cpu %d (%d)\n",
cpu, err);
}
+ cpus_read_unlock();
if (list_empty(&pn->pktgen_threads)) {
pr_err("Initialization failed for all threads\n");
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 87e67194f240..73fd7f543fd0 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3288,7 +3288,7 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
- dev = rtnl_dev_get(net, tb);
+ dev = rtnl_dev_get(tgt_net, tb);
else if (tb[IFLA_GROUP])
err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
else
diff --git a/net/dsa/tag.c b/net/dsa/tag.c
index 6e402d49afd3..79ad105902d9 100644
--- a/net/dsa/tag.c
+++ b/net/dsa/tag.c
@@ -105,8 +105,9 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
p = netdev_priv(skb->dev);
- if (unlikely(cpu_dp->ds->untag_bridge_pvid)) {
- nskb = dsa_untag_bridge_pvid(skb);
+ if (unlikely(cpu_dp->ds->untag_bridge_pvid ||
+ cpu_dp->ds->untag_vlan_aware_bridge_pvid)) {
+ nskb = dsa_software_vlan_untag(skb);
if (!nskb) {
kfree_skb(skb);
return 0;
diff --git a/net/dsa/tag.h b/net/dsa/tag.h
index f6b9c73718df..d5707870906b 100644
--- a/net/dsa/tag.h
+++ b/net/dsa/tag.h
@@ -44,46 +44,81 @@ static inline struct net_device *dsa_conduit_find_user(struct net_device *dev,
return NULL;
}
-/* If under a bridge with vlan_filtering=0, make sure to send pvid-tagged
- * frames as untagged, since the bridge will not untag them.
+/**
+ * dsa_software_untag_vlan_aware_bridge: Software untagging for VLAN-aware bridge
+ * @skb: Pointer to received socket buffer (packet)
+ * @br: Pointer to bridge upper interface of ingress port
+ * @vid: Parsed VID from packet
+ *
+ * The bridge can process tagged packets. Software like STP/PTP may not. The
+ * bridge can also process untagged packets, to the same effect as if they were
+ * tagged with the PVID of the ingress port. So packets tagged with the PVID of
+ * the bridge port must be software-untagged, to support both use cases.
*/
-static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
+static inline void dsa_software_untag_vlan_aware_bridge(struct sk_buff *skb,
+ struct net_device *br,
+ u16 vid)
{
- struct dsa_port *dp = dsa_user_to_port(skb->dev);
- struct net_device *br = dsa_port_bridge_dev_get(dp);
- struct net_device *dev = skb->dev;
- struct net_device *upper_dev;
- u16 vid, pvid, proto;
+ u16 pvid, proto;
int err;
- if (!br || br_vlan_enabled(br))
- return skb;
-
err = br_vlan_get_proto(br, &proto);
if (err)
- return skb;
+ return;
- /* Move VLAN tag from data to hwaccel */
- if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
- skb = skb_vlan_untag(skb);
- if (!skb)
- return NULL;
- }
+ err = br_vlan_get_pvid_rcu(skb->dev, &pvid);
+ if (err)
+ return;
- if (!skb_vlan_tag_present(skb))
- return skb;
+ if (vid == pvid && skb->vlan_proto == htons(proto))
+ __vlan_hwaccel_clear_tag(skb);
+}
- vid = skb_vlan_tag_get_id(skb);
+/**
+ * dsa_software_untag_vlan_unaware_bridge: Software untagging for VLAN-unaware bridge
+ * @skb: Pointer to received socket buffer (packet)
+ * @br: Pointer to bridge upper interface of ingress port
+ * @vid: Parsed VID from packet
+ *
+ * The bridge ignores all VLAN tags. Software like STP/PTP may not (it may run
+ * on the plain port, or on a VLAN upper interface). Maybe packets are coming
+ * to software as tagged with a driver-defined VID which is NOT equal to the
+ * PVID of the bridge port (since the bridge is VLAN-unaware, its configuration
+ * should NOT be committed to hardware). DSA needs a method for this private
+ * VID to be communicated by software to it, and if packets are tagged with it,
+ * software-untag them. Note: the private VID may be different per bridge, to
+ * support the FDB isolation use case.
+ *
+ * FIXME: this is currently implemented based on the broken assumption that
+ * the "private VID" used by the driver in VLAN-unaware mode is equal to the
+ * bridge PVID. It should not be, except for a coincidence; the bridge PVID is
+ * irrelevant to the data path in the VLAN-unaware mode. Thus, the VID that
+ * this function removes is wrong.
+ *
+ * All users of ds->untag_bridge_pvid should fix their drivers, if necessary,
+ * to make the two independent. Only then, if there still remains a need to
+ * strip the private VID from packets, then a new ds->ops->get_private_vid()
+ * API shall be introduced to communicate to DSA what this VID is, which needs
+ * to be stripped here.
+ */
+static inline void dsa_software_untag_vlan_unaware_bridge(struct sk_buff *skb,
+ struct net_device *br,
+ u16 vid)
+{
+ struct net_device *upper_dev;
+ u16 pvid, proto;
+ int err;
- /* We already run under an RCU read-side critical section since
- * we are called from netif_receive_skb_list_internal().
- */
- err = br_vlan_get_pvid_rcu(dev, &pvid);
+ err = br_vlan_get_proto(br, &proto);
if (err)
- return skb;
+ return;
- if (vid != pvid)
- return skb;
+ err = br_vlan_get_pvid_rcu(skb->dev, &pvid);
+ if (err)
+ return;
+
+ if (vid != pvid || skb->vlan_proto != htons(proto))
+ return;
/* The sad part about attempting to untag from DSA is that we
* don't know, unless we check, if the skb will end up in
@@ -95,10 +130,50 @@ static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
* definitely keep the tag, to make sure it keeps working.
*/
upper_dev = __vlan_find_dev_deep_rcu(br, htons(proto), vid);
- if (upper_dev)
+ if (!upper_dev)
+ __vlan_hwaccel_clear_tag(skb);
+}
+
+/**
+ * dsa_software_vlan_untag: Software VLAN untagging in DSA receive path
+ * @skb: Pointer to socket buffer (packet)
+ *
+ * Receive path method for switches which cannot avoid tagging all packets
+ * towards the CPU port. Called when ds->untag_bridge_pvid (legacy) or
+ * ds->untag_vlan_aware_bridge_pvid is set to true.
+ *
+ * As a side effect of this method, any VLAN tag from the skb head is moved
+ * to hwaccel.
+ */
+static inline struct sk_buff *dsa_software_vlan_untag(struct sk_buff *skb)
+{
+ struct dsa_port *dp = dsa_user_to_port(skb->dev);
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
+ u16 vid;
+
+ /* software untagging for standalone ports not yet necessary */
+ if (!br)
return skb;
- __vlan_hwaccel_clear_tag(skb);
+ /* Move VLAN tag from data to hwaccel */
+ if (!skb_vlan_tag_present(skb)) {
+ skb = skb_vlan_untag(skb);
+ if (!skb)
+ return NULL;
+ }
+
+ if (!skb_vlan_tag_present(skb))
+ return skb;
+
+ vid = skb_vlan_tag_get_id(skb);
+
+ if (br_vlan_enabled(br)) {
+ if (dp->ds->untag_vlan_aware_bridge_pvid)
+ dsa_software_untag_vlan_aware_bridge(skb, br, vid);
+ } else {
+ if (dp->ds->untag_bridge_pvid)
+ dsa_software_untag_vlan_unaware_bridge(skb, br, vid);
+ }
return skb;
}
diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c
index e0e4300bfbd3..bf6608fc6be7 100644
--- a/net/dsa/tag_ocelot.c
+++ b/net/dsa/tag_ocelot.c
@@ -8,40 +8,6 @@
#define OCELOT_NAME "ocelot"
#define SEVILLE_NAME "seville"
-/* If the port is under a VLAN-aware bridge, remove the VLAN header from the
- * payload and move it into the DSA tag, which will make the switch classify
- * the packet to the bridge VLAN. Otherwise, leave the classified VLAN at zero,
- * which is the pvid of standalone and VLAN-unaware bridge ports.
- */
-static void ocelot_xmit_get_vlan_info(struct sk_buff *skb, struct dsa_port *dp,
- u64 *vlan_tci, u64 *tag_type)
-{
- struct net_device *br = dsa_port_bridge_dev_get(dp);
- struct vlan_ethhdr *hdr;
- u16 proto, tci;
-
- if (!br || !br_vlan_enabled(br)) {
- *vlan_tci = 0;
- *tag_type = IFH_TAG_TYPE_C;
- return;
- }
-
- hdr = skb_vlan_eth_hdr(skb);
- br_vlan_get_proto(br, &proto);
-
- if (ntohs(hdr->h_vlan_proto) == proto) {
- vlan_remove_tag(skb, &tci);
- *vlan_tci = tci;
- } else {
- rcu_read_lock();
- br_vlan_get_pvid_rcu(br, &tci);
- rcu_read_unlock();
- *vlan_tci = tci;
- }
-
- *tag_type = (proto != ETH_P_8021Q) ? IFH_TAG_TYPE_S : IFH_TAG_TYPE_C;
-}
-
static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
__be32 ifh_prefix, void **ifh)
{
@@ -53,7 +19,8 @@ static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
u32 rew_op = 0;
u64 qos_class;
- ocelot_xmit_get_vlan_info(skb, dp, &vlan_tci, &tag_type);
+ ocelot_xmit_get_vlan_info(skb, dsa_port_bridge_dev_get(dp), &vlan_tci,
+ &tag_type);
qos_class = netdev_get_num_tc(netdev) ?
netdev_get_prio_tc_map(netdev, skb->priority) : skb->priority;
diff --git a/net/ethtool/cmis_fw_update.c b/net/ethtool/cmis_fw_update.c
index ae4b4b28a601..655ff5224ffa 100644
--- a/net/ethtool/cmis_fw_update.c
+++ b/net/ethtool/cmis_fw_update.c
@@ -35,7 +35,10 @@ struct cmis_cdb_fw_mng_features_rpl {
__be16 resv7;
};
-#define CMIS_CDB_FW_WRITE_MECHANISM_LPL 0x01
+enum cmis_cdb_fw_write_mechanism {
+ CMIS_CDB_FW_WRITE_MECHANISM_LPL = 0x01,
+ CMIS_CDB_FW_WRITE_MECHANISM_BOTH = 0x11,
+};
static int
cmis_fw_update_fw_mng_features_get(struct ethtool_cmis_cdb *cdb,
@@ -64,7 +67,8 @@ cmis_fw_update_fw_mng_features_get(struct ethtool_cmis_cdb *cdb,
}
rpl = (struct cmis_cdb_fw_mng_features_rpl *)args.req.payload;
- if (!(rpl->write_mechanism == CMIS_CDB_FW_WRITE_MECHANISM_LPL)) {
+ if (!(rpl->write_mechanism == CMIS_CDB_FW_WRITE_MECHANISM_LPL ||
+ rpl->write_mechanism == CMIS_CDB_FW_WRITE_MECHANISM_BOTH)) {
ethnl_module_fw_flash_ntf_err(dev, ntf_params,
"Write LPL is not supported",
NULL);
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 983fee76f5cf..ae041f51cd2d 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -442,6 +442,9 @@ int __ethtool_get_link_ksettings(struct net_device *dev,
if (!dev->ethtool_ops->get_link_ksettings)
return -EOPNOTSUPP;
+ if (!netif_device_present(dev))
+ return -ENODEV;
+
memset(link_ksettings, 0, sizeof(*link_ksettings));
return dev->ethtool_ops->get_link_ksettings(dev, link_ksettings);
}
@@ -1331,13 +1334,13 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
u32 rss_cfg_offset = offsetof(struct ethtool_rxfh, rss_config[0]);
const struct ethtool_ops *ops = dev->ethtool_ops;
u32 dev_indir_size = 0, dev_key_size = 0, i;
+ u32 user_indir_len = 0, indir_bytes = 0;
struct ethtool_rxfh_param rxfh_dev = {};
struct ethtool_rxfh_context *ctx = NULL;
struct netlink_ext_ack *extack = NULL;
struct ethtool_rxnfc rx_rings;
struct ethtool_rxfh rxfh;
bool locked = false; /* dev->ethtool->rss_lock taken */
- u32 indir_bytes = 0;
bool create = false;
u8 *rss_config;
int ret;
@@ -1369,23 +1372,25 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
return -EOPNOTSUPP;
create = rxfh.rss_context == ETH_RXFH_CONTEXT_ALLOC;
- /* If either indir, hash key or function is valid, proceed further.
- * Must request at least one change: indir size, hash key, function
- * or input transformation.
- */
if ((rxfh.indir_size &&
rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE &&
rxfh.indir_size != dev_indir_size) ||
- (rxfh.key_size && (rxfh.key_size != dev_key_size)) ||
+ (rxfh.key_size && rxfh.key_size != dev_key_size))
+ return -EINVAL;
+
+ /* Must request at least one change: indir size, hash key, function
+ * or input transformation.
+ * There's no need for any of it in case of context creation.
+ */
+ if (!create &&
(rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE &&
rxfh.key_size == 0 && rxfh.hfunc == ETH_RSS_HASH_NO_CHANGE &&
rxfh.input_xfrm == RXH_XFRM_NO_CHANGE))
return -EINVAL;
- if (rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE)
- indir_bytes = dev_indir_size * sizeof(rxfh_dev.indir[0]);
+ indir_bytes = dev_indir_size * sizeof(rxfh_dev.indir[0]);
- rss_config = kzalloc(indir_bytes + rxfh.key_size, GFP_USER);
+ rss_config = kzalloc(indir_bytes + dev_key_size, GFP_USER);
if (!rss_config)
return -ENOMEM;
@@ -1400,6 +1405,7 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
*/
if (rxfh.indir_size &&
rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) {
+ user_indir_len = indir_bytes;
rxfh_dev.indir = (u32 *)rss_config;
rxfh_dev.indir_size = dev_indir_size;
ret = ethtool_copy_validate_indir(rxfh_dev.indir,
@@ -1426,7 +1432,7 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
rxfh_dev.key_size = dev_key_size;
rxfh_dev.key = rss_config + indir_bytes;
if (copy_from_user(rxfh_dev.key,
- useraddr + rss_cfg_offset + indir_bytes,
+ useraddr + rss_cfg_offset + user_indir_len,
rxfh.key_size)) {
ret = -EFAULT;
goto out;
@@ -1449,12 +1455,13 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
}
if (ops->create_rxfh_context) {
- u32 limit = ops->rxfh_max_context_id ?: U32_MAX;
+ u32 limit = ops->rxfh_max_num_contexts ?: U32_MAX;
u32 ctx_id;
/* driver uses new API, core allocates ID */
ret = xa_alloc(&dev->ethtool->rss_ctx, &ctx_id, ctx,
- XA_LIMIT(1, limit), GFP_KERNEL_ACCOUNT);
+ XA_LIMIT(1, limit - 1),
+ GFP_KERNEL_ACCOUNT);
if (ret < 0) {
kfree(ctx);
goto out;
@@ -1474,16 +1481,21 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
rxfh_dev.input_xfrm = rxfh.input_xfrm;
if (rxfh.rss_context && ops->create_rxfh_context) {
- if (create)
+ if (create) {
ret = ops->create_rxfh_context(dev, ctx, &rxfh_dev,
extack);
- else if (rxfh_dev.rss_delete)
+ /* Make sure driver populates defaults */
+ WARN_ON_ONCE(!ret && !rxfh_dev.key &&
+ !memchr_inv(ethtool_rxfh_context_key(ctx),
+ 0, ctx->key_size));
+ } else if (rxfh_dev.rss_delete) {
ret = ops->remove_rxfh_context(dev, ctx,
rxfh.rss_context,
extack);
- else
+ } else {
ret = ops->modify_rxfh_context(dev, ctx, &rxfh_dev,
extack);
+ }
} else {
ret = ops->set_rxfh(dev, &rxfh_dev, extack);
}
@@ -1522,6 +1534,22 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
kfree(ctx);
goto out;
}
+
+ /* Fetch the defaults for the old API, in the new API drivers
+ * should write defaults into ctx themselves.
+ */
+ rxfh_dev.indir = (u32 *)rss_config;
+ rxfh_dev.indir_size = dev_indir_size;
+
+ rxfh_dev.key = rss_config + indir_bytes;
+ rxfh_dev.key_size = dev_key_size;
+
+ ret = ops->get_rxfh(dev, &rxfh_dev);
+ if (WARN_ON(ret)) {
+ xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context);
+ kfree(ctx);
+ goto out;
+ }
}
if (rxfh_dev.rss_delete) {
WARN_ON(xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context) != ctx);
@@ -1530,12 +1558,14 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
if (rxfh_dev.indir) {
for (i = 0; i < dev_indir_size; i++)
ethtool_rxfh_context_indir(ctx)[i] = rxfh_dev.indir[i];
- ctx->indir_configured = 1;
+ ctx->indir_configured =
+ rxfh.indir_size &&
+ rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE;
}
if (rxfh_dev.key) {
memcpy(ethtool_rxfh_context_key(ctx), rxfh_dev.key,
dev_key_size);
- ctx->key_configured = 1;
+ ctx->key_configured = !!rxfh.key_size;
}
if (rxfh_dev.hfunc != ETH_RSS_HASH_NO_CHANGE)
ctx->hfunc = rxfh_dev.hfunc;
diff --git a/net/ethtool/rss.c b/net/ethtool/rss.c
index 71679137eff2..5c4c4505ab9a 100644
--- a/net/ethtool/rss.c
+++ b/net/ethtool/rss.c
@@ -111,7 +111,8 @@ rss_reply_size(const struct ethnl_req_info *req_base,
const struct rss_reply_data *data = RSS_REPDATA(reply_base);
int len;
- len = nla_total_size(sizeof(u32)) + /* _RSS_HFUNC */
+ len = nla_total_size(sizeof(u32)) + /* _RSS_CONTEXT */
+ nla_total_size(sizeof(u32)) + /* _RSS_HFUNC */
nla_total_size(sizeof(u32)) + /* _RSS_INPUT_XFRM */
nla_total_size(sizeof(u32) * data->indir_size) + /* _RSS_INDIR */
nla_total_size(data->hkey_size); /* _RSS_HKEY */
@@ -124,6 +125,11 @@ rss_fill_reply(struct sk_buff *skb, const struct ethnl_req_info *req_base,
const struct ethnl_reply_data *reply_base)
{
const struct rss_reply_data *data = RSS_REPDATA(reply_base);
+ struct rss_req_info *request = RSS_REQINFO(req_base);
+
+ if (request->rss_context &&
+ nla_put_u32(skb, ETHTOOL_A_RSS_CONTEXT, request->rss_context))
+ return -EMSGSIZE;
if ((data->hfunc &&
nla_put_u32(skb, ETHTOOL_A_RSS_HFUNC, data->hfunc)) ||
diff --git a/net/ipv4/fou_core.c b/net/ipv4/fou_core.c
index 0abbc413e0fe..78b869b31492 100644
--- a/net/ipv4/fou_core.c
+++ b/net/ipv4/fou_core.c
@@ -50,7 +50,7 @@ struct fou_net {
static inline struct fou *fou_from_sock(struct sock *sk)
{
- return sk->sk_user_data;
+ return rcu_dereference_sk_user_data(sk);
}
static int fou_recv_pull(struct sk_buff *skb, struct fou *fou, size_t len)
@@ -233,9 +233,15 @@ static struct sk_buff *fou_gro_receive(struct sock *sk,
struct sk_buff *skb)
{
const struct net_offload __rcu **offloads;
- u8 proto = fou_from_sock(sk)->protocol;
+ struct fou *fou = fou_from_sock(sk);
const struct net_offload *ops;
struct sk_buff *pp = NULL;
+ u8 proto;
+
+ if (!fou)
+ goto out;
+
+ proto = fou->protocol;
/* We can clear the encap_mark for FOU as we are essentially doing
* one of two possible things. We are either adding an L4 tunnel
@@ -263,14 +269,24 @@ static int fou_gro_complete(struct sock *sk, struct sk_buff *skb,
int nhoff)
{
const struct net_offload __rcu **offloads;
- u8 proto = fou_from_sock(sk)->protocol;
+ struct fou *fou = fou_from_sock(sk);
const struct net_offload *ops;
- int err = -ENOSYS;
+ u8 proto;
+ int err;
+
+ if (!fou) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ proto = fou->protocol;
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
ops = rcu_dereference(offloads[proto]);
- if (WARN_ON(!ops || !ops->callbacks.gro_complete))
+ if (WARN_ON(!ops || !ops->callbacks.gro_complete)) {
+ err = -ENOSYS;
goto out;
+ }
err = ops->callbacks.gro_complete(skb, nhoff);
@@ -320,6 +336,9 @@ static struct sk_buff *gue_gro_receive(struct sock *sk,
struct gro_remcsum grc;
u8 proto;
+ if (!fou)
+ goto out;
+
skb_gro_remcsum_init(&grc);
off = skb_gro_offset(skb);
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
index 4d42d0756fd7..a5db7c67d61b 100644
--- a/net/ipv4/netfilter/iptable_nat.c
+++ b/net/ipv4/netfilter/iptable_nat.c
@@ -145,25 +145,27 @@ static struct pernet_operations iptable_nat_net_ops = {
static int __init iptable_nat_init(void)
{
- int ret = xt_register_template(&nf_nat_ipv4_table,
- iptable_nat_table_init);
+ int ret;
+ /* net->gen->ptr[iptable_nat_net_id] must be allocated
+ * before calling iptable_nat_table_init().
+ */
+ ret = register_pernet_subsys(&iptable_nat_net_ops);
if (ret < 0)
return ret;
- ret = register_pernet_subsys(&iptable_nat_net_ops);
- if (ret < 0) {
- xt_unregister_template(&nf_nat_ipv4_table);
- return ret;
- }
+ ret = xt_register_template(&nf_nat_ipv4_table,
+ iptable_nat_table_init);
+ if (ret < 0)
+ unregister_pernet_subsys(&iptable_nat_net_ops);
return ret;
}
static void __exit iptable_nat_exit(void)
{
- unregister_pernet_subsys(&iptable_nat_net_ops);
xt_unregister_template(&nf_nat_ipv4_table);
+ unregister_pernet_subsys(&iptable_nat_net_ops);
}
module_init(iptable_nat_init);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index e03a342c9162..831a18dc7aa6 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -4637,6 +4637,13 @@ int tcp_abort(struct sock *sk, int err)
/* Don't race with userspace socket closes such as tcp_close. */
lock_sock(sk);
+ /* Avoid closing the same socket twice. */
+ if (sk->sk_state == TCP_CLOSE) {
+ if (!has_current_bpf_ctx())
+ release_sock(sk);
+ return -ENOENT;
+ }
+
if (sk->sk_state == TCP_LISTEN) {
tcp_set_state(sk, TCP_CLOSE);
inet_csk_listen_stop(sk);
@@ -4646,16 +4653,13 @@ int tcp_abort(struct sock *sk, int err)
local_bh_disable();
bh_lock_sock(sk);
- if (!sock_flag(sk, SOCK_DEAD)) {
- if (tcp_need_reset(sk->sk_state))
- tcp_send_active_reset(sk, GFP_ATOMIC,
- SK_RST_REASON_NOT_SPECIFIED);
- tcp_done_with_error(sk, err);
- }
+ if (tcp_need_reset(sk->sk_state))
+ tcp_send_active_reset(sk, GFP_ATOMIC,
+ SK_RST_REASON_NOT_SPECIFIED);
+ tcp_done_with_error(sk, err);
bh_unlock_sock(sk);
local_bh_enable();
- tcp_write_queue_purge(sk);
if (!has_current_bpf_ctx())
release_sock(sk);
return 0;
diff --git a/net/ipv4/tcp_ao.c b/net/ipv4/tcp_ao.c
index 85531437890c..db6516092daf 100644
--- a/net/ipv4/tcp_ao.c
+++ b/net/ipv4/tcp_ao.c
@@ -267,32 +267,49 @@ static void tcp_ao_key_free_rcu(struct rcu_head *head)
kfree_sensitive(key);
}
-void tcp_ao_destroy_sock(struct sock *sk, bool twsk)
+static void tcp_ao_info_free_rcu(struct rcu_head *head)
{
- struct tcp_ao_info *ao;
+ struct tcp_ao_info *ao = container_of(head, struct tcp_ao_info, rcu);
struct tcp_ao_key *key;
struct hlist_node *n;
+ hlist_for_each_entry_safe(key, n, &ao->head, node) {
+ hlist_del(&key->node);
+ tcp_sigpool_release(key->tcp_sigpool_id);
+ kfree_sensitive(key);
+ }
+ kfree(ao);
+ static_branch_slow_dec_deferred(&tcp_ao_needed);
+}
+
+static void tcp_ao_sk_omem_free(struct sock *sk, struct tcp_ao_info *ao)
+{
+ size_t total_ao_sk_mem = 0;
+ struct tcp_ao_key *key;
+
+ hlist_for_each_entry(key, &ao->head, node)
+ total_ao_sk_mem += tcp_ao_sizeof_key(key);
+ atomic_sub(total_ao_sk_mem, &sk->sk_omem_alloc);
+}
+
+void tcp_ao_destroy_sock(struct sock *sk, bool twsk)
+{
+ struct tcp_ao_info *ao;
+
if (twsk) {
ao = rcu_dereference_protected(tcp_twsk(sk)->ao_info, 1);
- tcp_twsk(sk)->ao_info = NULL;
+ rcu_assign_pointer(tcp_twsk(sk)->ao_info, NULL);
} else {
ao = rcu_dereference_protected(tcp_sk(sk)->ao_info, 1);
- tcp_sk(sk)->ao_info = NULL;
+ rcu_assign_pointer(tcp_sk(sk)->ao_info, NULL);
}
if (!ao || !refcount_dec_and_test(&ao->refcnt))
return;
- hlist_for_each_entry_safe(key, n, &ao->head, node) {
- hlist_del_rcu(&key->node);
- if (!twsk)
- atomic_sub(tcp_ao_sizeof_key(key), &sk->sk_omem_alloc);
- call_rcu(&key->rcu, tcp_ao_key_free_rcu);
- }
-
- kfree_rcu(ao, rcu);
- static_branch_slow_dec_deferred(&tcp_ao_needed);
+ if (!twsk)
+ tcp_ao_sk_omem_free(sk, ao);
+ call_rcu(&ao->rcu, tcp_ao_info_free_rcu);
}
void tcp_ao_time_wait(struct tcp_timewait_sock *tcptw, struct tcp_sock *tp)
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index 53b0d62fd2c2..fe6178715ba0 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -577,7 +577,7 @@ out_err:
err = sk_stream_error(sk, msg->msg_flags, err);
release_sock(sk);
sk_psock_put(sk, psock);
- return copied ? copied : err;
+ return copied > 0 ? copied : err;
}
enum {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 454362e359da..e37488d3453f 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -238,9 +238,14 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
*/
if (unlikely(len != icsk->icsk_ack.rcv_mss)) {
u64 val = (u64)skb->len << TCP_RMEM_TO_WIN_SCALE;
+ u8 old_ratio = tcp_sk(sk)->scaling_ratio;
do_div(val, skb->truesize);
tcp_sk(sk)->scaling_ratio = val ? val : 1;
+
+ if (old_ratio != tcp_sk(sk)->scaling_ratio)
+ WRITE_ONCE(tcp_sk(sk)->window_clamp,
+ tcp_win_from_space(sk, sk->sk_rcvbuf));
}
icsk->icsk_ack.rcv_mss = min_t(unsigned int, len,
tcp_sk(sk)->advmss);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index fd17f25ff288..a4e510846905 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -97,6 +97,8 @@ static DEFINE_PER_CPU(struct sock_bh_locked, ipv4_tcp_sk) = {
.bh_lock = INIT_LOCAL_LOCK(bh_lock),
};
+static DEFINE_MUTEX(tcp_exit_batch_mutex);
+
static u32 tcp_v4_init_seq(const struct sk_buff *skb)
{
return secure_tcp_seq(ip_hdr(skb)->daddr,
@@ -3514,6 +3516,16 @@ static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
{
struct net *net;
+ /* make sure concurrent calls to tcp_sk_exit_batch from net_cleanup_work
+ * and failed setup_net error unwinding path are serialized.
+ *
+ * tcp_twsk_purge() handles twsk in any dead netns, not just those in
+ * net_exit_list, the thread that dismantles a particular twsk must
+ * do so without other thread progressing to refcount_dec_and_test() of
+ * tcp_death_row.tw_refcount.
+ */
+ mutex_lock(&tcp_exit_batch_mutex);
+
tcp_twsk_purge(net_exit_list);
list_for_each_entry(net, net_exit_list, exit_list) {
@@ -3521,6 +3533,8 @@ static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
WARN_ON_ONCE(!refcount_dec_and_test(&net->ipv4.tcp_death_row.tw_refcount));
tcp_fastopen_ctx_destroy(net);
}
+
+ mutex_unlock(&tcp_exit_batch_mutex);
}
static struct pernet_operations __net_initdata tcp_sk_ops = {
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 4b791e74529e..e4ad3311e148 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -140,6 +140,9 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
if (thlen < sizeof(*th))
goto out;
+ if (unlikely(skb_checksum_start(skb) != skb_transport_header(skb)))
+ goto out;
+
if (!pskb_may_pull(skb, thlen))
goto out;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index aa2e0a28ca61..d842303587af 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -278,6 +278,17 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
if (gso_skb->len <= sizeof(*uh) + mss)
return ERR_PTR(-EINVAL);
+ if (unlikely(skb_checksum_start(gso_skb) !=
+ skb_transport_header(gso_skb) &&
+ !(skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST)))
+ return ERR_PTR(-EINVAL);
+
+ /* We don't know if egress device can segment and checksum the packet
+ * when IPv6 extension headers are present. Fall back to software GSO.
+ */
+ if (gso_skb->ip_summed != CHECKSUM_PARTIAL)
+ features &= ~(NETIF_F_GSO_UDP_L4 | NETIF_F_CSUM_MASK);
+
if (skb_gso_ok(gso_skb, features | NETIF_F_GSO_ROBUST)) {
/* Packet is from an untrusted source, reset gso_segs. */
skb_shinfo(gso_skb)->gso_segs = DIV_ROUND_UP(gso_skb->len - sizeof(*uh),
diff --git a/net/ipv6/ila/ila.h b/net/ipv6/ila/ila.h
index ad5f6f6ba333..85b92917849b 100644
--- a/net/ipv6/ila/ila.h
+++ b/net/ipv6/ila/ila.h
@@ -108,6 +108,7 @@ int ila_lwt_init(void);
void ila_lwt_fini(void);
int ila_xlat_init_net(struct net *net);
+void ila_xlat_pre_exit_net(struct net *net);
void ila_xlat_exit_net(struct net *net);
int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info);
diff --git a/net/ipv6/ila/ila_main.c b/net/ipv6/ila/ila_main.c
index 69caed07315f..976c78efbae1 100644
--- a/net/ipv6/ila/ila_main.c
+++ b/net/ipv6/ila/ila_main.c
@@ -71,6 +71,11 @@ ila_xlat_init_fail:
return err;
}
+static __net_exit void ila_pre_exit_net(struct net *net)
+{
+ ila_xlat_pre_exit_net(net);
+}
+
static __net_exit void ila_exit_net(struct net *net)
{
ila_xlat_exit_net(net);
@@ -78,6 +83,7 @@ static __net_exit void ila_exit_net(struct net *net)
static struct pernet_operations ila_net_ops = {
.init = ila_init_net,
+ .pre_exit = ila_pre_exit_net,
.exit = ila_exit_net,
.id = &ila_net_id,
.size = sizeof(struct ila_net),
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index 67e8c9440977..534a4498e280 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -619,6 +619,15 @@ int ila_xlat_init_net(struct net *net)
return 0;
}
+void ila_xlat_pre_exit_net(struct net *net)
+{
+ struct ila_net *ilan = net_generic(net, ila_net_id);
+
+ if (ilan->xlat.hooks_registered)
+ nf_unregister_net_hooks(net, ila_nf_hook_ops,
+ ARRAY_SIZE(ila_nf_hook_ops));
+}
+
void ila_xlat_exit_net(struct net *net)
{
struct ila_net *ilan = net_generic(net, ila_net_id);
@@ -626,10 +635,6 @@ void ila_xlat_exit_net(struct net *net)
rhashtable_free_and_destroy(&ilan->xlat.rhash_table, ila_free_cb, NULL);
free_bucket_spinlocks(ilan->xlat.locks);
-
- if (ilan->xlat.hooks_registered)
- nf_unregister_net_hooks(net, ila_nf_hook_ops,
- ARRAY_SIZE(ila_nf_hook_ops));
}
static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila)
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index ab504d31f0cd..f26841f1490f 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -70,11 +70,15 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
/* Be paranoid, rather than too clever. */
if (unlikely(hh_len > skb_headroom(skb)) && dev->header_ops) {
+ /* Make sure idev stays alive */
+ rcu_read_lock();
skb = skb_expand_head(skb, hh_len);
if (!skb) {
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
+ rcu_read_unlock();
return -ENOMEM;
}
+ rcu_read_unlock();
}
hdr = ipv6_hdr(skb);
@@ -283,11 +287,15 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
head_room += opt->opt_nflen + opt->opt_flen;
if (unlikely(head_room > skb_headroom(skb))) {
+ /* Make sure idev stays alive */
+ rcu_read_lock();
skb = skb_expand_head(skb, head_room);
if (!skb) {
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
+ rcu_read_unlock();
return -ENOBUFS;
}
+ rcu_read_unlock();
}
if (opt) {
@@ -1956,6 +1964,7 @@ int ip6_send_skb(struct sk_buff *skb)
struct rt6_info *rt = dst_rt6_info(skb_dst(skb));
int err;
+ rcu_read_lock();
err = ip6_local_out(net, skb->sk, skb);
if (err) {
if (err > 0)
@@ -1965,6 +1974,7 @@ int ip6_send_skb(struct sk_buff *skb)
IPSTATS_MIB_OUTDISCARDS);
}
+ rcu_read_unlock();
return err;
}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 9dee0c127955..87dfb565a9f8 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1507,7 +1507,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
tdev = __dev_get_by_index(t->net, p->link);
if (tdev) {
- dev->hard_header_len = tdev->hard_header_len + t_hlen;
+ dev->needed_headroom = tdev->hard_header_len +
+ tdev->needed_headroom + t_hlen;
mtu = min_t(unsigned int, tdev->mtu, IP6_MAX_MTU);
mtu = mtu - t_hlen;
@@ -1731,7 +1732,9 @@ ip6_tnl_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
{
struct ip6_tnl *tnl = netdev_priv(dev);
+ int t_hlen;
+ t_hlen = tnl->hlen + sizeof(struct ipv6hdr);
if (tnl->parms.proto == IPPROTO_IPV6) {
if (new_mtu < IPV6_MIN_MTU)
return -EINVAL;
@@ -1740,10 +1743,10 @@ int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
}
if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) {
- if (new_mtu > IP6_MAX_MTU - dev->hard_header_len)
+ if (new_mtu > IP6_MAX_MTU - dev->hard_header_len - t_hlen)
return -EINVAL;
} else {
- if (new_mtu > IP_MAX_MTU - dev->hard_header_len)
+ if (new_mtu > IP_MAX_MTU - dev->hard_header_len - t_hlen)
return -EINVAL;
}
WRITE_ONCE(dev->mtu, new_mtu);
@@ -1887,12 +1890,11 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
t_hlen = t->hlen + sizeof(struct ipv6hdr);
dev->type = ARPHRD_TUNNEL6;
- dev->hard_header_len = LL_MAX_HEADER + t_hlen;
dev->mtu = ETH_DATA_LEN - t_hlen;
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
dev->mtu -= 8;
dev->min_mtu = ETH_MIN_MTU;
- dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
+ dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len - t_hlen;
netdev_hold(dev, &t->dev_tracker, GFP_KERNEL);
netdev_lockdep_set_classes(dev);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 70a0b2ad6bd7..b8eec1b6cc2c 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -227,6 +227,7 @@ struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
return NULL;
memset(ndopts, 0, sizeof(*ndopts));
while (opt_len) {
+ bool unknown = false;
int l;
if (opt_len < sizeof(struct nd_opt_hdr))
return NULL;
@@ -262,22 +263,23 @@ struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
break;
#endif
default:
- if (ndisc_is_useropt(dev, nd_opt)) {
- ndopts->nd_useropts_end = nd_opt;
- if (!ndopts->nd_useropts)
- ndopts->nd_useropts = nd_opt;
- } else {
- /*
- * Unknown options must be silently ignored,
- * to accommodate future extension to the
- * protocol.
- */
- ND_PRINTK(2, notice,
- "%s: ignored unsupported option; type=%d, len=%d\n",
- __func__,
- nd_opt->nd_opt_type,
- nd_opt->nd_opt_len);
- }
+ unknown = true;
+ }
+ if (ndisc_is_useropt(dev, nd_opt)) {
+ ndopts->nd_useropts_end = nd_opt;
+ if (!ndopts->nd_useropts)
+ ndopts->nd_useropts = nd_opt;
+ } else if (unknown) {
+ /*
+ * Unknown options must be silently ignored,
+ * to accommodate future extension to the
+ * protocol.
+ */
+ ND_PRINTK(2, notice,
+ "%s: ignored unsupported option; type=%d, len=%d\n",
+ __func__,
+ nd_opt->nd_opt_type,
+ nd_opt->nd_opt_len);
}
next_opt:
opt_len -= l;
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
index 52cf104e3478..e119d4f090cc 100644
--- a/net/ipv6/netfilter/ip6table_nat.c
+++ b/net/ipv6/netfilter/ip6table_nat.c
@@ -147,23 +147,27 @@ static struct pernet_operations ip6table_nat_net_ops = {
static int __init ip6table_nat_init(void)
{
- int ret = xt_register_template(&nf_nat_ipv6_table,
- ip6table_nat_table_init);
+ int ret;
+ /* net->gen->ptr[ip6table_nat_net_id] must be allocated
+ * before calling ip6t_nat_register_lookups().
+ */
+ ret = register_pernet_subsys(&ip6table_nat_net_ops);
if (ret < 0)
return ret;
- ret = register_pernet_subsys(&ip6table_nat_net_ops);
+ ret = xt_register_template(&nf_nat_ipv6_table,
+ ip6table_nat_table_init);
if (ret)
- xt_unregister_template(&nf_nat_ipv6_table);
+ unregister_pernet_subsys(&ip6table_nat_net_ops);
return ret;
}
static void __exit ip6table_nat_exit(void)
{
- unregister_pernet_subsys(&ip6table_nat_net_ops);
xt_unregister_template(&nf_nat_ipv6_table);
+ unregister_pernet_subsys(&ip6table_nat_net_ops);
}
module_init(ip6table_nat_init);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 6f0844c9315d..4120e67a8ce6 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -154,6 +154,10 @@ static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
};
struct inet_frag_queue *q;
+ if (!(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_MULTICAST |
+ IPV6_ADDR_LINKLOCAL)))
+ key.iif = 0;
+
q = inet_frag_find(nf_frag->fqdir, &key);
if (!q)
return NULL;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index c3b0b610b0aa..c00323fa9eb6 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -335,8 +335,8 @@ static void iucv_sever_path(struct sock *sk, int with_user_data)
struct iucv_sock *iucv = iucv_sk(sk);
struct iucv_path *path = iucv->path;
- if (iucv->path) {
- iucv->path = NULL;
+ /* Whoever resets the path pointer, must sever and free it. */
+ if (xchg(&iucv->path, NULL)) {
if (with_user_data) {
low_nmcpy(user_data, iucv->src_name);
high_nmcpy(user_data, iucv->dst_name);
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 1e42e13ad24e..d3e9efab7f4b 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -86,13 +86,15 @@ struct device *iucv_alloc_device(const struct attribute_group **attrs,
{
struct device *dev;
va_list vargs;
+ char buf[20];
int rc;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
goto out_error;
va_start(vargs, fmt);
- rc = dev_set_name(dev, fmt, vargs);
+ vsnprintf(buf, sizeof(buf), fmt, vargs);
+ rc = dev_set_name(dev, "%s", buf);
va_end(vargs);
if (rc)
goto out_error;
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 2f191e50d4fc..d4118c796290 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -755,6 +755,7 @@ static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
!(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR);
int err = -EPIPE;
+ mutex_lock(&kcm->tx_mutex);
lock_sock(sk);
/* Per tcp_sendmsg this should be in poll */
@@ -926,6 +927,7 @@ partial_message:
KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
release_sock(sk);
+ mutex_unlock(&kcm->tx_mutex);
return copied;
out_error:
@@ -951,6 +953,7 @@ out_error:
sk->sk_write_space(sk);
release_sock(sk);
+ mutex_unlock(&kcm->tx_mutex);
return err;
}
@@ -1204,6 +1207,7 @@ static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
spin_unlock_bh(&mux->lock);
INIT_WORK(&kcm->tx_work, kcm_tx_work);
+ mutex_init(&kcm->tx_mutex);
spin_lock_bh(&mux->rx_lock);
kcm_rcv_ready(kcm);
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index c80ab3f26084..2e86f520f799 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -86,6 +86,11 @@
/* Default trace flags */
#define L2TP_DEFAULT_DEBUG_FLAGS 0
+#define L2TP_DEPTH_NESTING 2
+#if L2TP_DEPTH_NESTING == SINGLE_DEPTH_NESTING
+#error "L2TP requires its own lockdep subclass"
+#endif
+
/* Private data stored for received packets in the skb.
*/
struct l2tp_skb_cb {
@@ -1124,7 +1129,13 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, uns
IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
nf_reset_ct(skb);
- bh_lock_sock_nested(sk);
+ /* L2TP uses its own lockdep subclass to avoid lockdep splats caused by
+ * nested socket calls on the same lockdep socket class. This can
+ * happen when data from a user socket is routed over l2tp, which uses
+ * another userspace socket.
+ */
+ spin_lock_nested(&sk->sk_lock.slock, L2TP_DEPTH_NESTING);
+
if (sock_owned_by_user(sk)) {
kfree_skb(skb);
ret = NET_XMIT_DROP;
@@ -1176,7 +1187,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, uns
ret = l2tp_xmit_queue(tunnel, skb, &inet->cork.fl);
out_unlock:
- bh_unlock_sock(sk);
+ spin_unlock(&sk->sk_lock.slock);
return ret;
}
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 85cb71de370f..b02b84ce2130 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -114,7 +114,7 @@ static int ieee80211_set_mon_options(struct ieee80211_sub_if_data *sdata,
/* apply all changes now - no failures allowed */
- if (monitor_sdata)
+ if (monitor_sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
ieee80211_set_mu_mimo_follow(monitor_sdata, params);
if (params->flags) {
@@ -3053,6 +3053,9 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
+ if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
+ return -EOPNOTSUPP;
+
sdata = wiphy_dereference(local->hw.wiphy,
local->monitor_sdata);
if (!sdata)
@@ -3115,7 +3118,7 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
if (has_monitor) {
sdata = wiphy_dereference(local->hw.wiphy,
local->monitor_sdata);
- if (sdata) {
+ if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) {
sdata->deflink.user_power_level = local->user_power_level;
if (txp_type != sdata->vif.bss_conf.txpower_type)
update_txp_type = true;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 4779a18ab75d..f9526bbc3633 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -6664,7 +6664,7 @@ static bool ieee80211_mgd_ssid_mismatch(struct ieee80211_sub_if_data *sdata,
return true;
/* hidden SSID: zeroed out */
- if (memcmp(elems->ssid, zero_ssid, elems->ssid_len))
+ if (!memcmp(elems->ssid, zero_ssid, elems->ssid_len))
return false;
return memcmp(elems->ssid, cfg->ssid, cfg->ssid_len);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 72a9ba8bc5fd..bca7b341dd77 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1768,7 +1768,7 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
break;
}
sdata = rcu_dereference(local->monitor_sdata);
- if (sdata) {
+ if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) {
vif = &sdata->vif;
info->hw_queue =
vif->hw_queue[skb_get_queue_mapping(skb)];
@@ -3957,7 +3957,8 @@ begin:
break;
}
tx.sdata = rcu_dereference(local->monitor_sdata);
- if (tx.sdata) {
+ if (tx.sdata &&
+ ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) {
vif = &tx.sdata->vif;
info->hw_queue =
vif->hw_queue[skb_get_queue_mapping(skb)];
@@ -5347,8 +5348,10 @@ ieee80211_beacon_get_ap(struct ieee80211_hw *hw,
if (beacon->tail)
skb_put_data(skb, beacon->tail, beacon->tail_len);
- if (ieee80211_beacon_protect(skb, local, sdata, link) < 0)
+ if (ieee80211_beacon_protect(skb, local, sdata, link) < 0) {
+ dev_kfree_skb(skb);
return NULL;
+ }
ieee80211_beacon_get_finish(hw, vif, link, offs, beacon, skb,
chanctx_conf, csa_off_base);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index ced19ce7c51a..c7ad9bc5973a 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -776,7 +776,7 @@ static void __iterate_interfaces(struct ieee80211_local *local,
sdata = rcu_dereference_check(local->monitor_sdata,
lockdep_is_held(&local->iflist_mtx) ||
lockdep_is_held(&local->hw.wiphy->mtx));
- if (sdata &&
+ if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) &&
(iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL || !active_only ||
sdata->flags & IEEE80211_SDATA_IN_DRIVER))
iterator(data, sdata->vif.addr, &sdata->vif);
diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c
index 77e5dd422258..8551dab1d1e6 100644
--- a/net/mctp/test/route-test.c
+++ b/net/mctp/test/route-test.c
@@ -366,7 +366,7 @@ static void mctp_test_route_input_sk(struct kunit *test)
skb2 = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc);
KUNIT_EXPECT_NOT_ERR_OR_NULL(test, skb2);
- KUNIT_EXPECT_EQ(test, skb->len, 1);
+ KUNIT_EXPECT_EQ(test, skb2->len, 1);
skb_free_datagram(sock->sk, skb2);
diff --git a/net/mptcp/diag.c b/net/mptcp/diag.c
index 3ae46b545d2c..2d3efb405437 100644
--- a/net/mptcp/diag.c
+++ b/net/mptcp/diag.c
@@ -94,7 +94,7 @@ static size_t subflow_get_info_size(const struct sock *sk)
nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ */
nla_total_size_64bit(8) + /* MPTCP_SUBFLOW_ATTR_MAP_SEQ */
nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_MAP_SFSEQ */
- nla_total_size(2) + /* MPTCP_SUBFLOW_ATTR_SSN_OFFSET */
+ nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_SSN_OFFSET */
nla_total_size(2) + /* MPTCP_SUBFLOW_ATTR_MAP_DATALEN */
nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_FLAGS */
nla_total_size(1) + /* MPTCP_SUBFLOW_ATTR_ID_REM */
diff --git a/net/mptcp/fastopen.c b/net/mptcp/fastopen.c
index ad28da655f8b..a29ff901df75 100644
--- a/net/mptcp/fastopen.c
+++ b/net/mptcp/fastopen.c
@@ -68,12 +68,12 @@ void __mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflo
skb = skb_peek_tail(&sk->sk_receive_queue);
if (skb) {
WARN_ON_ONCE(MPTCP_SKB_CB(skb)->end_seq);
- pr_debug("msk %p moving seq %llx -> %llx end_seq %llx -> %llx", sk,
+ pr_debug("msk %p moving seq %llx -> %llx end_seq %llx -> %llx\n", sk,
MPTCP_SKB_CB(skb)->map_seq, MPTCP_SKB_CB(skb)->map_seq + msk->ack_seq,
MPTCP_SKB_CB(skb)->end_seq, MPTCP_SKB_CB(skb)->end_seq + msk->ack_seq);
MPTCP_SKB_CB(skb)->map_seq += msk->ack_seq;
MPTCP_SKB_CB(skb)->end_seq += msk->ack_seq;
}
- pr_debug("msk=%p ack_seq=%llx", msk, msk->ack_seq);
+ pr_debug("msk=%p ack_seq=%llx\n", msk, msk->ack_seq);
}
diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c
index c30405e76833..7884217f33eb 100644
--- a/net/mptcp/mib.c
+++ b/net/mptcp/mib.c
@@ -19,7 +19,9 @@ static const struct snmp_mib mptcp_snmp_list[] = {
SNMP_MIB_ITEM("MPTCPRetrans", MPTCP_MIB_RETRANSSEGS),
SNMP_MIB_ITEM("MPJoinNoTokenFound", MPTCP_MIB_JOINNOTOKEN),
SNMP_MIB_ITEM("MPJoinSynRx", MPTCP_MIB_JOINSYNRX),
+ SNMP_MIB_ITEM("MPJoinSynBackupRx", MPTCP_MIB_JOINSYNBACKUPRX),
SNMP_MIB_ITEM("MPJoinSynAckRx", MPTCP_MIB_JOINSYNACKRX),
+ SNMP_MIB_ITEM("MPJoinSynAckBackupRx", MPTCP_MIB_JOINSYNACKBACKUPRX),
SNMP_MIB_ITEM("MPJoinSynAckHMacFailure", MPTCP_MIB_JOINSYNACKMAC),
SNMP_MIB_ITEM("MPJoinAckRx", MPTCP_MIB_JOINACKRX),
SNMP_MIB_ITEM("MPJoinAckHMacFailure", MPTCP_MIB_JOINACKMAC),
diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h
index 2704afd0dfe4..66aa67f49d03 100644
--- a/net/mptcp/mib.h
+++ b/net/mptcp/mib.h
@@ -14,7 +14,9 @@ enum linux_mptcp_mib_field {
MPTCP_MIB_RETRANSSEGS, /* Segments retransmitted at the MPTCP-level */
MPTCP_MIB_JOINNOTOKEN, /* Received MP_JOIN but the token was not found */
MPTCP_MIB_JOINSYNRX, /* Received a SYN + MP_JOIN */
+ MPTCP_MIB_JOINSYNBACKUPRX, /* Received a SYN + MP_JOIN + backup flag */
MPTCP_MIB_JOINSYNACKRX, /* Received a SYN/ACK + MP_JOIN */
+ MPTCP_MIB_JOINSYNACKBACKUPRX, /* Received a SYN/ACK + MP_JOIN + backup flag */
MPTCP_MIB_JOINSYNACKMAC, /* HMAC was wrong on SYN/ACK + MP_JOIN */
MPTCP_MIB_JOINACKRX, /* Received an ACK + MP_JOIN */
MPTCP_MIB_JOINACKMAC, /* HMAC was wrong on ACK + MP_JOIN */
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 8e8dcfbc2993..370c3836b771 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -117,7 +117,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD;
ptr += 2;
}
- pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d csum=%u",
+ pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d csum=%u\n",
version, flags, opsize, mp_opt->sndr_key,
mp_opt->rcvr_key, mp_opt->data_len, mp_opt->csum);
break;
@@ -131,7 +131,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
ptr += 4;
mp_opt->nonce = get_unaligned_be32(ptr);
ptr += 4;
- pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u",
+ pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u\n",
mp_opt->backup, mp_opt->join_id,
mp_opt->token, mp_opt->nonce);
} else if (opsize == TCPOLEN_MPTCP_MPJ_SYNACK) {
@@ -142,19 +142,19 @@ static void mptcp_parse_option(const struct sk_buff *skb,
ptr += 8;
mp_opt->nonce = get_unaligned_be32(ptr);
ptr += 4;
- pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u",
+ pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u\n",
mp_opt->backup, mp_opt->join_id,
mp_opt->thmac, mp_opt->nonce);
} else if (opsize == TCPOLEN_MPTCP_MPJ_ACK) {
mp_opt->suboptions |= OPTION_MPTCP_MPJ_ACK;
ptr += 2;
memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN);
- pr_debug("MP_JOIN hmac");
+ pr_debug("MP_JOIN hmac\n");
}
break;
case MPTCPOPT_DSS:
- pr_debug("DSS");
+ pr_debug("DSS\n");
ptr++;
/* we must clear 'mpc_map' be able to detect MP_CAPABLE
@@ -169,7 +169,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->ack64 = (flags & MPTCP_DSS_ACK64) != 0;
mp_opt->use_ack = (flags & MPTCP_DSS_HAS_ACK);
- pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d",
+ pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d\n",
mp_opt->data_fin, mp_opt->dsn64,
mp_opt->use_map, mp_opt->ack64,
mp_opt->use_ack);
@@ -207,7 +207,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
ptr += 4;
}
- pr_debug("data_ack=%llu", mp_opt->data_ack);
+ pr_debug("data_ack=%llu\n", mp_opt->data_ack);
}
if (mp_opt->use_map) {
@@ -231,7 +231,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
ptr += 2;
}
- pr_debug("data_seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
+ pr_debug("data_seq=%llu subflow_seq=%u data_len=%u csum=%d:%u\n",
mp_opt->data_seq, mp_opt->subflow_seq,
mp_opt->data_len, !!(mp_opt->suboptions & OPTION_MPTCP_CSUMREQD),
mp_opt->csum);
@@ -293,7 +293,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->ahmac = get_unaligned_be64(ptr);
ptr += 8;
}
- pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d",
+ pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d\n",
(mp_opt->addr.family == AF_INET6) ? "6" : "",
mp_opt->addr.id, mp_opt->ahmac, mp_opt->echo, ntohs(mp_opt->addr.port));
break;
@@ -309,7 +309,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->rm_list.nr = opsize - TCPOLEN_MPTCP_RM_ADDR_BASE;
for (i = 0; i < mp_opt->rm_list.nr; i++)
mp_opt->rm_list.ids[i] = *ptr++;
- pr_debug("RM_ADDR: rm_list_nr=%d", mp_opt->rm_list.nr);
+ pr_debug("RM_ADDR: rm_list_nr=%d\n", mp_opt->rm_list.nr);
break;
case MPTCPOPT_MP_PRIO:
@@ -318,7 +318,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->suboptions |= OPTION_MPTCP_PRIO;
mp_opt->backup = *ptr++ & MPTCP_PRIO_BKUP;
- pr_debug("MP_PRIO: prio=%d", mp_opt->backup);
+ pr_debug("MP_PRIO: prio=%d\n", mp_opt->backup);
break;
case MPTCPOPT_MP_FASTCLOSE:
@@ -329,7 +329,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->rcvr_key = get_unaligned_be64(ptr);
ptr += 8;
mp_opt->suboptions |= OPTION_MPTCP_FASTCLOSE;
- pr_debug("MP_FASTCLOSE: recv_key=%llu", mp_opt->rcvr_key);
+ pr_debug("MP_FASTCLOSE: recv_key=%llu\n", mp_opt->rcvr_key);
break;
case MPTCPOPT_RST:
@@ -343,7 +343,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
flags = *ptr++;
mp_opt->reset_transient = flags & MPTCP_RST_TRANSIENT;
mp_opt->reset_reason = *ptr;
- pr_debug("MP_RST: transient=%u reason=%u",
+ pr_debug("MP_RST: transient=%u reason=%u\n",
mp_opt->reset_transient, mp_opt->reset_reason);
break;
@@ -354,7 +354,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
ptr += 2;
mp_opt->suboptions |= OPTION_MPTCP_FAIL;
mp_opt->fail_seq = get_unaligned_be64(ptr);
- pr_debug("MP_FAIL: data_seq=%llu", mp_opt->fail_seq);
+ pr_debug("MP_FAIL: data_seq=%llu\n", mp_opt->fail_seq);
break;
default:
@@ -417,7 +417,7 @@ bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
*size = TCPOLEN_MPTCP_MPC_SYN;
return true;
} else if (subflow->request_join) {
- pr_debug("remote_token=%u, nonce=%u", subflow->remote_token,
+ pr_debug("remote_token=%u, nonce=%u\n", subflow->remote_token,
subflow->local_nonce);
opts->suboptions = OPTION_MPTCP_MPJ_SYN;
opts->join_id = subflow->local_id;
@@ -500,7 +500,7 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
*size = TCPOLEN_MPTCP_MPC_ACK;
}
- pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d",
+ pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d\n",
subflow, subflow->local_key, subflow->remote_key,
data_len);
@@ -509,7 +509,7 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
opts->suboptions = OPTION_MPTCP_MPJ_ACK;
memcpy(opts->hmac, subflow->hmac, MPTCPOPT_HMAC_LEN);
*size = TCPOLEN_MPTCP_MPJ_ACK;
- pr_debug("subflow=%p", subflow);
+ pr_debug("subflow=%p\n", subflow);
/* we can use the full delegate action helper only from BH context
* If we are in process context - sk is flushing the backlog at
@@ -675,7 +675,7 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
*size = len;
if (drop_other_suboptions) {
- pr_debug("drop other suboptions");
+ pr_debug("drop other suboptions\n");
opts->suboptions = 0;
/* note that e.g. DSS could have written into the memory
@@ -695,7 +695,7 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
} else {
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADDTX);
}
- pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d",
+ pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d\n",
opts->addr.id, opts->ahmac, echo, ntohs(opts->addr.port));
return true;
@@ -726,7 +726,7 @@ static bool mptcp_established_options_rm_addr(struct sock *sk,
opts->rm_list = rm_list;
for (i = 0; i < opts->rm_list.nr; i++)
- pr_debug("rm_list_ids[%d]=%d", i, opts->rm_list.ids[i]);
+ pr_debug("rm_list_ids[%d]=%d\n", i, opts->rm_list.ids[i]);
MPTCP_ADD_STATS(sock_net(sk), MPTCP_MIB_RMADDRTX, opts->rm_list.nr);
return true;
}
@@ -752,7 +752,7 @@ static bool mptcp_established_options_mp_prio(struct sock *sk,
opts->suboptions |= OPTION_MPTCP_PRIO;
opts->backup = subflow->request_bkup;
- pr_debug("prio=%d", opts->backup);
+ pr_debug("prio=%d\n", opts->backup);
return true;
}
@@ -794,7 +794,7 @@ static bool mptcp_established_options_fastclose(struct sock *sk,
opts->suboptions |= OPTION_MPTCP_FASTCLOSE;
opts->rcvr_key = READ_ONCE(msk->remote_key);
- pr_debug("FASTCLOSE key=%llu", opts->rcvr_key);
+ pr_debug("FASTCLOSE key=%llu\n", opts->rcvr_key);
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFASTCLOSETX);
return true;
}
@@ -816,7 +816,7 @@ static bool mptcp_established_options_mp_fail(struct sock *sk,
opts->suboptions |= OPTION_MPTCP_FAIL;
opts->fail_seq = subflow->map_seq;
- pr_debug("MP_FAIL fail_seq=%llu", opts->fail_seq);
+ pr_debug("MP_FAIL fail_seq=%llu\n", opts->fail_seq);
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILTX);
return true;
@@ -904,16 +904,16 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
opts->csum_reqd = subflow_req->csum_reqd;
opts->allow_join_id0 = subflow_req->allow_join_id0;
*size = TCPOLEN_MPTCP_MPC_SYNACK;
- pr_debug("subflow_req=%p, local_key=%llu",
+ pr_debug("subflow_req=%p, local_key=%llu\n",
subflow_req, subflow_req->local_key);
return true;
} else if (subflow_req->mp_join) {
opts->suboptions = OPTION_MPTCP_MPJ_SYNACK;
- opts->backup = subflow_req->backup;
+ opts->backup = subflow_req->request_bkup;
opts->join_id = subflow_req->local_id;
opts->thmac = subflow_req->thmac;
opts->nonce = subflow_req->local_nonce;
- pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u",
+ pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u\n",
subflow_req, opts->backup, opts->join_id,
opts->thmac, opts->nonce);
*size = TCPOLEN_MPTCP_MPJ_SYNACK;
@@ -958,7 +958,8 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
if (subflow->remote_key_valid &&
(((mp_opt->suboptions & OPTION_MPTCP_DSS) && mp_opt->use_ack) ||
- ((mp_opt->suboptions & OPTION_MPTCP_ADD_ADDR) && !mp_opt->echo))) {
+ ((mp_opt->suboptions & OPTION_MPTCP_ADD_ADDR) &&
+ (!mp_opt->echo || subflow->mp_join)))) {
/* subflows are fully established as soon as we get any
* additional ack, including ADD_ADDR.
*/
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index 55406720c607..37f6dbcd8434 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -19,7 +19,7 @@ int mptcp_pm_announce_addr(struct mptcp_sock *msk,
{
u8 add_addr = READ_ONCE(msk->pm.addr_signal);
- pr_debug("msk=%p, local_id=%d, echo=%d", msk, addr->id, echo);
+ pr_debug("msk=%p, local_id=%d, echo=%d\n", msk, addr->id, echo);
lockdep_assert_held(&msk->pm.lock);
@@ -45,7 +45,7 @@ int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_
{
u8 rm_addr = READ_ONCE(msk->pm.addr_signal);
- pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr);
+ pr_debug("msk=%p, rm_list_nr=%d\n", msk, rm_list->nr);
if (rm_addr) {
MPTCP_ADD_STATS(sock_net((struct sock *)msk),
@@ -60,23 +60,13 @@ int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_
return 0;
}
-int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list)
-{
- pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr);
-
- spin_lock_bh(&msk->pm.lock);
- mptcp_pm_nl_rm_subflow_received(msk, rm_list);
- spin_unlock_bh(&msk->pm.lock);
- return 0;
-}
-
/* path manager event handlers */
void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side)
{
struct mptcp_pm_data *pm = &msk->pm;
- pr_debug("msk=%p, token=%u side=%d", msk, READ_ONCE(msk->token), server_side);
+ pr_debug("msk=%p, token=%u side=%d\n", msk, READ_ONCE(msk->token), server_side);
WRITE_ONCE(pm->server_side, server_side);
mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC);
@@ -100,7 +90,7 @@ bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
subflows_max = mptcp_pm_get_subflows_max(msk);
- pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
+ pr_debug("msk=%p subflows=%d max=%d allow=%d\n", msk, pm->subflows,
subflows_max, READ_ONCE(pm->accept_subflow));
/* try to avoid acquiring the lock below */
@@ -124,7 +114,7 @@ bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
enum mptcp_pm_status new_status)
{
- pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status,
+ pr_debug("msk=%p status=%x new=%lx\n", msk, msk->pm.status,
BIT(new_status));
if (msk->pm.status & BIT(new_status))
return false;
@@ -139,7 +129,7 @@ void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk)
struct mptcp_pm_data *pm = &msk->pm;
bool announce = false;
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
spin_lock_bh(&pm->lock);
@@ -163,14 +153,14 @@ void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk)
void mptcp_pm_connection_closed(struct mptcp_sock *msk)
{
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
}
void mptcp_pm_subflow_established(struct mptcp_sock *msk)
{
struct mptcp_pm_data *pm = &msk->pm;
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
if (!READ_ONCE(pm->work_pending))
return;
@@ -222,7 +212,7 @@ void mptcp_pm_add_addr_received(const struct sock *ssk,
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
struct mptcp_pm_data *pm = &msk->pm;
- pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
+ pr_debug("msk=%p remote_id=%d accept=%d\n", msk, addr->id,
READ_ONCE(pm->accept_addr));
mptcp_event_addr_announced(ssk, addr);
@@ -236,7 +226,9 @@ void mptcp_pm_add_addr_received(const struct sock *ssk,
} else {
__MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
}
- } else if (!READ_ONCE(pm->accept_addr)) {
+ /* id0 should not have a different address */
+ } else if ((addr->id == 0 && !mptcp_pm_nl_is_init_remote_addr(msk, addr)) ||
+ (addr->id > 0 && !READ_ONCE(pm->accept_addr))) {
mptcp_pm_announce_addr(msk, addr, true);
mptcp_pm_add_addr_send_ack(msk);
} else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
@@ -253,7 +245,7 @@ void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
{
struct mptcp_pm_data *pm = &msk->pm;
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
spin_lock_bh(&pm->lock);
@@ -277,7 +269,7 @@ void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
struct mptcp_pm_data *pm = &msk->pm;
u8 i;
- pr_debug("msk=%p remote_ids_nr=%d", msk, rm_list->nr);
+ pr_debug("msk=%p remote_ids_nr=%d\n", msk, rm_list->nr);
for (i = 0; i < rm_list->nr; i++)
mptcp_event_addr_removed(msk, rm_list->ids[i]);
@@ -309,19 +301,19 @@ void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
- pr_debug("fail_seq=%llu", fail_seq);
+ pr_debug("fail_seq=%llu\n", fail_seq);
if (!READ_ONCE(msk->allow_infinite_fallback))
return;
if (!subflow->fail_tout) {
- pr_debug("send MP_FAIL response and infinite map");
+ pr_debug("send MP_FAIL response and infinite map\n");
subflow->send_mp_fail = 1;
subflow->send_infinite_map = 1;
tcp_send_ack(sk);
} else {
- pr_debug("MP_FAIL response received");
+ pr_debug("MP_FAIL response received\n");
WRITE_ONCE(subflow->fail_tout, 0);
}
}
@@ -426,15 +418,24 @@ int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
return mptcp_pm_nl_get_local_id(msk, &skc_local);
}
+bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc)
+{
+ struct mptcp_addr_info skc_local;
+
+ mptcp_local_address((struct sock_common *)skc, &skc_local);
+
+ if (mptcp_pm_is_userspace(msk))
+ return mptcp_userspace_pm_is_backup(msk, &skc_local);
+
+ return mptcp_pm_nl_is_backup(msk, &skc_local);
+}
+
int mptcp_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id,
u8 *flags, int *ifindex)
{
*flags = 0;
*ifindex = 0;
- if (!id)
- return 0;
-
if (mptcp_pm_is_userspace(msk))
return mptcp_userspace_pm_get_flags_and_ifindex_by_id(msk, id, flags, ifindex);
return mptcp_pm_nl_get_flags_and_ifindex_by_id(msk, id, flags, ifindex);
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index ea9e5817b9e9..f891bc714668 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -130,12 +130,15 @@ static bool lookup_subflow_by_daddr(const struct list_head *list,
{
struct mptcp_subflow_context *subflow;
struct mptcp_addr_info cur;
- struct sock_common *skc;
list_for_each_entry(subflow, list, node) {
- skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow);
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
- remote_address(skc, &cur);
+ if (!((1 << inet_sk_state_load(ssk)) &
+ (TCPF_ESTABLISHED | TCPF_SYN_SENT | TCPF_SYN_RECV)))
+ continue;
+
+ remote_address((struct sock_common *)ssk, &cur);
if (mptcp_addresses_equal(&cur, daddr, daddr->port))
return true;
}
@@ -143,11 +146,13 @@ static bool lookup_subflow_by_daddr(const struct list_head *list,
return false;
}
-static struct mptcp_pm_addr_entry *
+static bool
select_local_address(const struct pm_nl_pernet *pernet,
- const struct mptcp_sock *msk)
+ const struct mptcp_sock *msk,
+ struct mptcp_pm_addr_entry *new_entry)
{
- struct mptcp_pm_addr_entry *entry, *ret = NULL;
+ struct mptcp_pm_addr_entry *entry;
+ bool found = false;
msk_owned_by_me(msk);
@@ -159,17 +164,21 @@ select_local_address(const struct pm_nl_pernet *pernet,
if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap))
continue;
- ret = entry;
+ *new_entry = *entry;
+ found = true;
break;
}
rcu_read_unlock();
- return ret;
+
+ return found;
}
-static struct mptcp_pm_addr_entry *
-select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk)
+static bool
+select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk,
+ struct mptcp_pm_addr_entry *new_entry)
{
- struct mptcp_pm_addr_entry *entry, *ret = NULL;
+ struct mptcp_pm_addr_entry *entry;
+ bool found = false;
rcu_read_lock();
/* do not keep any additional per socket state, just signal
@@ -184,11 +193,13 @@ select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk)
if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL))
continue;
- ret = entry;
+ *new_entry = *entry;
+ found = true;
break;
}
rcu_read_unlock();
- return ret;
+
+ return found;
}
unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk)
@@ -279,7 +290,7 @@ static void mptcp_pm_add_timer(struct timer_list *timer)
struct mptcp_sock *msk = entry->sock;
struct sock *sk = (struct sock *)msk;
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
if (!msk)
return;
@@ -298,7 +309,7 @@ static void mptcp_pm_add_timer(struct timer_list *timer)
spin_lock_bh(&msk->pm.lock);
if (!mptcp_pm_should_add_signal_addr(msk)) {
- pr_debug("retransmit ADD_ADDR id=%d", entry->addr.id);
+ pr_debug("retransmit ADD_ADDR id=%d\n", entry->addr.id);
mptcp_pm_announce_addr(msk, &entry->addr, false);
mptcp_pm_add_addr_send_ack(msk);
entry->retrans_times++;
@@ -348,7 +359,7 @@ bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
add_entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
if (add_entry) {
- if (mptcp_pm_is_kernel(msk))
+ if (WARN_ON_ONCE(mptcp_pm_is_kernel(msk)))
return false;
sk_reset_timer(sk, &add_entry->add_timer,
@@ -379,7 +390,7 @@ void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
struct sock *sk = (struct sock *)msk;
LIST_HEAD(free_list);
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
spin_lock_bh(&msk->pm.lock);
list_splice_init(&msk->pm.anno_list, &free_list);
@@ -465,13 +476,12 @@ static void __mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_con
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
bool slow;
- pr_debug("send ack for %s",
+ pr_debug("send ack for %s\n",
prio ? "mp_prio" : (mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr"));
slow = lock_sock_fast(ssk);
if (prio) {
subflow->send_mp_prio = 1;
- subflow->backup = backup;
subflow->request_bkup = backup;
}
@@ -514,8 +524,9 @@ __lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info)
static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
{
struct sock *sk = (struct sock *)msk;
- struct mptcp_pm_addr_entry *local;
+ struct mptcp_pm_addr_entry local;
unsigned int add_addr_signal_max;
+ bool signal_and_subflow = false;
unsigned int local_addr_max;
struct pm_nl_pernet *pernet;
unsigned int subflows_max;
@@ -556,8 +567,6 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
/* check first for announce */
if (msk->pm.add_addr_signaled < add_addr_signal_max) {
- local = select_signal_address(pernet, msk);
-
/* due to racing events on both ends we can reach here while
* previous add address is still running: if we invoke now
* mptcp_pm_announce_addr(), that will fail and the
@@ -568,16 +577,30 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
if (msk->pm.addr_signal & BIT(MPTCP_ADD_ADDR_SIGNAL))
return;
- if (local) {
- if (mptcp_pm_alloc_anno_list(msk, &local->addr)) {
- __clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
- msk->pm.add_addr_signaled++;
- mptcp_pm_announce_addr(msk, &local->addr, false);
- mptcp_pm_nl_addr_send_ack(msk);
- }
- }
+ if (!select_signal_address(pernet, msk, &local))
+ goto subflow;
+
+ /* If the alloc fails, we are on memory pressure, not worth
+ * continuing, and trying to create subflows.
+ */
+ if (!mptcp_pm_alloc_anno_list(msk, &local.addr))
+ return;
+
+ __clear_bit(local.addr.id, msk->pm.id_avail_bitmap);
+ msk->pm.add_addr_signaled++;
+
+ /* Special case for ID0: set the correct ID */
+ if (local.addr.id == msk->mpc_endpoint_id)
+ local.addr.id = 0;
+
+ mptcp_pm_announce_addr(msk, &local.addr, false);
+ mptcp_pm_nl_addr_send_ack(msk);
+
+ if (local.flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)
+ signal_and_subflow = true;
}
+subflow:
/* check if should create a new subflow */
while (msk->pm.local_addr_used < local_addr_max &&
msk->pm.subflows < subflows_max) {
@@ -585,21 +608,28 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
bool fullmesh;
int i, nr;
- local = select_local_address(pernet, msk);
- if (!local)
+ if (signal_and_subflow)
+ signal_and_subflow = false;
+ else if (!select_local_address(pernet, msk, &local))
break;
- fullmesh = !!(local->flags & MPTCP_PM_ADDR_FLAG_FULLMESH);
+ fullmesh = !!(local.flags & MPTCP_PM_ADDR_FLAG_FULLMESH);
+
+ __clear_bit(local.addr.id, msk->pm.id_avail_bitmap);
+
+ /* Special case for ID0: set the correct ID */
+ if (local.addr.id == msk->mpc_endpoint_id)
+ local.addr.id = 0;
+ else /* local_addr_used is not decr for ID 0 */
+ msk->pm.local_addr_used++;
- msk->pm.local_addr_used++;
- __clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
- nr = fill_remote_addresses_vec(msk, &local->addr, fullmesh, addrs);
+ nr = fill_remote_addresses_vec(msk, &local.addr, fullmesh, addrs);
if (nr == 0)
continue;
spin_unlock_bh(&msk->pm.lock);
for (i = 0; i < nr; i++)
- __mptcp_subflow_connect(sk, &local->addr, &addrs[i]);
+ __mptcp_subflow_connect(sk, &local.addr, &addrs[i]);
spin_lock_bh(&msk->pm.lock);
}
mptcp_pm_nl_check_work_pending(msk);
@@ -624,6 +654,7 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
{
struct sock *sk = (struct sock *)msk;
struct mptcp_pm_addr_entry *entry;
+ struct mptcp_addr_info mpc_addr;
struct pm_nl_pernet *pernet;
unsigned int subflows_max;
int i = 0;
@@ -631,6 +662,8 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
pernet = pm_nl_get_pernet_from_msk(msk);
subflows_max = mptcp_pm_get_subflows_max(msk);
+ mptcp_local_address((struct sock_common *)msk, &mpc_addr);
+
rcu_read_lock();
list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
if (!(entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH))
@@ -641,7 +674,13 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
if (msk->pm.subflows < subflows_max) {
msk->pm.subflows++;
- addrs[i++] = entry->addr;
+ addrs[i] = entry->addr;
+
+ /* Special case for ID0: set the correct ID */
+ if (mptcp_addresses_equal(&entry->addr, &mpc_addr, entry->addr.port))
+ addrs[i].id = 0;
+
+ i++;
}
}
rcu_read_unlock();
@@ -683,7 +722,7 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
subflows_max = mptcp_pm_get_subflows_max(msk);
- pr_debug("accepted %d:%d remote family %d",
+ pr_debug("accepted %d:%d remote family %d\n",
msk->pm.add_addr_accepted, add_addr_accept_max,
msk->pm.remote.family);
@@ -712,13 +751,24 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
spin_lock_bh(&msk->pm.lock);
if (sf_created) {
- msk->pm.add_addr_accepted++;
+ /* add_addr_accepted is not decr for ID 0 */
+ if (remote.id)
+ msk->pm.add_addr_accepted++;
if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
msk->pm.subflows >= subflows_max)
WRITE_ONCE(msk->pm.accept_addr, false);
}
}
+bool mptcp_pm_nl_is_init_remote_addr(struct mptcp_sock *msk,
+ const struct mptcp_addr_info *remote)
+{
+ struct mptcp_addr_info mpc_remote;
+
+ remote_address((struct sock_common *)msk, &mpc_remote);
+ return mptcp_addresses_equal(&mpc_remote, remote, remote->port);
+}
+
void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
{
struct mptcp_subflow_context *subflow;
@@ -730,9 +780,12 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
!mptcp_pm_should_rm_signal(msk))
return;
- subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node);
- if (subflow)
- mptcp_pm_send_ack(msk, subflow, false, false);
+ mptcp_for_each_subflow(msk, subflow) {
+ if (__mptcp_subflow_active(subflow)) {
+ mptcp_pm_send_ack(msk, subflow, false, false);
+ break;
+ }
+ }
}
int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
@@ -742,7 +795,7 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
{
struct mptcp_subflow_context *subflow;
- pr_debug("bkup=%d", bkup);
+ pr_debug("bkup=%d\n", bkup);
mptcp_for_each_subflow(msk, subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
@@ -765,11 +818,6 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
return -EINVAL;
}
-static bool mptcp_local_id_match(const struct mptcp_sock *msk, u8 local_id, u8 id)
-{
- return local_id == id || (!local_id && msk->mpc_endpoint_id == id);
-}
-
static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
const struct mptcp_rm_list *rm_list,
enum linux_mptcp_mib_field rm_type)
@@ -778,7 +826,7 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
struct sock *sk = (struct sock *)msk;
u8 i;
- pr_debug("%s rm_list_nr %d",
+ pr_debug("%s rm_list_nr %d\n",
rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", rm_list->nr);
msk_owned_by_me(msk);
@@ -802,12 +850,14 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
u8 id = subflow_get_local_id(subflow);
+ if (inet_sk_state_load(ssk) == TCP_CLOSE)
+ continue;
if (rm_type == MPTCP_MIB_RMADDR && remote_id != rm_id)
continue;
- if (rm_type == MPTCP_MIB_RMSUBFLOW && !mptcp_local_id_match(msk, id, rm_id))
+ if (rm_type == MPTCP_MIB_RMSUBFLOW && id != rm_id)
continue;
- pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u",
+ pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u\n",
rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow",
i, rm_id, id, remote_id, msk->mpc_endpoint_id);
spin_unlock_bh(&msk->pm.lock);
@@ -817,25 +867,27 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
mptcp_close_ssk(sk, ssk, subflow);
spin_lock_bh(&msk->pm.lock);
- removed = true;
+ removed |= subflow->request_join;
if (rm_type == MPTCP_MIB_RMSUBFLOW)
__MPTCP_INC_STATS(sock_net(sk), rm_type);
}
- if (rm_type == MPTCP_MIB_RMSUBFLOW)
- __set_bit(rm_id ? rm_id : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap);
- else if (rm_type == MPTCP_MIB_RMADDR)
+
+ if (rm_type == MPTCP_MIB_RMADDR)
__MPTCP_INC_STATS(sock_net(sk), rm_type);
+
if (!removed)
continue;
if (!mptcp_pm_is_kernel(msk))
continue;
- if (rm_type == MPTCP_MIB_RMADDR) {
- msk->pm.add_addr_accepted--;
- WRITE_ONCE(msk->pm.accept_addr, true);
- } else if (rm_type == MPTCP_MIB_RMSUBFLOW) {
- msk->pm.local_addr_used--;
+ if (rm_type == MPTCP_MIB_RMADDR && rm_id &&
+ !WARN_ON_ONCE(msk->pm.add_addr_accepted == 0)) {
+ /* Note: if the subflow has been closed before, this
+ * add_addr_accepted counter will not be decremented.
+ */
+ if (--msk->pm.add_addr_accepted < mptcp_pm_get_add_addr_accept_max(msk))
+ WRITE_ONCE(msk->pm.accept_addr, true);
}
}
}
@@ -845,8 +897,8 @@ static void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk)
mptcp_pm_nl_rm_addr_or_subflow(msk, &msk->pm.rm_list_rx, MPTCP_MIB_RMADDR);
}
-void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk,
- const struct mptcp_rm_list *rm_list)
+static void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk,
+ const struct mptcp_rm_list *rm_list)
{
mptcp_pm_nl_rm_addr_or_subflow(msk, rm_list, MPTCP_MIB_RMSUBFLOW);
}
@@ -862,7 +914,7 @@ void mptcp_pm_nl_work(struct mptcp_sock *msk)
spin_lock_bh(&msk->pm.lock);
- pr_debug("msk=%p status=%x", msk, pm->status);
+ pr_debug("msk=%p status=%x\n", msk, pm->status);
if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
mptcp_pm_nl_add_addr_received(msk);
@@ -1102,6 +1154,24 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc
return ret;
}
+bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc)
+{
+ struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
+ struct mptcp_pm_addr_entry *entry;
+ bool backup = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
+ if (mptcp_addresses_equal(&entry->addr, skc, entry->addr.port)) {
+ backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return backup;
+}
+
#define MPTCP_PM_CMD_GRP_OFFSET 0
#define MPTCP_PM_EV_GRP_OFFSET 1
@@ -1262,20 +1332,27 @@ static struct pm_nl_pernet *genl_info_pm_nl(struct genl_info *info)
return pm_nl_get_pernet(genl_info_net(info));
}
-static int mptcp_nl_add_subflow_or_signal_addr(struct net *net)
+static int mptcp_nl_add_subflow_or_signal_addr(struct net *net,
+ struct mptcp_addr_info *addr)
{
struct mptcp_sock *msk;
long s_slot = 0, s_num = 0;
while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
struct sock *sk = (struct sock *)msk;
+ struct mptcp_addr_info mpc_addr;
if (!READ_ONCE(msk->fully_established) ||
mptcp_pm_is_userspace(msk))
goto next;
+ /* if the endp linked to the init sf is re-added with a != ID */
+ mptcp_local_address((struct sock_common *)msk, &mpc_addr);
+
lock_sock(sk);
spin_lock_bh(&msk->pm.lock);
+ if (mptcp_addresses_equal(addr, &mpc_addr, addr->port))
+ msk->mpc_endpoint_id = addr->id;
mptcp_pm_create_subflow_or_signal_addr(msk);
spin_unlock_bh(&msk->pm.lock);
release_sock(sk);
@@ -1311,8 +1388,8 @@ int mptcp_pm_nl_add_addr_doit(struct sk_buff *skb, struct genl_info *info)
if (ret < 0)
return ret;
- if (addr.addr.port && !(addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) {
- GENL_SET_ERR_MSG(info, "flags must have signal when using port");
+ if (addr.addr.port && !address_use_port(&addr)) {
+ GENL_SET_ERR_MSG(info, "flags must have signal and not subflow when using port");
return -EINVAL;
}
@@ -1348,7 +1425,7 @@ int mptcp_pm_nl_add_addr_doit(struct sk_buff *skb, struct genl_info *info)
goto out_free;
}
- mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk));
+ mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk), &entry->addr);
return 0;
out_free:
@@ -1363,6 +1440,10 @@ int mptcp_pm_nl_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int
struct sock *sk = (struct sock *)msk;
struct net *net = sock_net(sk);
+ /* No entries with ID 0 */
+ if (id == 0)
+ return 0;
+
rcu_read_lock();
entry = __lookup_addr_by_id(pm_nl_get_pernet(net), id);
if (entry) {
@@ -1389,6 +1470,12 @@ static bool remove_anno_list_by_saddr(struct mptcp_sock *msk,
return false;
}
+static u8 mptcp_endp_get_local_id(struct mptcp_sock *msk,
+ const struct mptcp_addr_info *addr)
+{
+ return msk->mpc_endpoint_id == addr->id ? 0 : addr->id;
+}
+
static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
const struct mptcp_addr_info *addr,
bool force)
@@ -1396,28 +1483,38 @@ static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
struct mptcp_rm_list list = { .nr = 0 };
bool ret;
- list.ids[list.nr++] = addr->id;
+ list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr);
ret = remove_anno_list_by_saddr(msk, addr);
if (ret || force) {
spin_lock_bh(&msk->pm.lock);
+ if (ret) {
+ __set_bit(addr->id, msk->pm.id_avail_bitmap);
+ msk->pm.add_addr_signaled--;
+ }
mptcp_pm_remove_addr(msk, &list);
spin_unlock_bh(&msk->pm.lock);
}
return ret;
}
+static void __mark_subflow_endp_available(struct mptcp_sock *msk, u8 id)
+{
+ /* If it was marked as used, and not ID 0, decrement local_addr_used */
+ if (!__test_and_set_bit(id ? : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap) &&
+ id && !WARN_ON_ONCE(msk->pm.local_addr_used == 0))
+ msk->pm.local_addr_used--;
+}
+
static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
const struct mptcp_pm_addr_entry *entry)
{
const struct mptcp_addr_info *addr = &entry->addr;
- struct mptcp_rm_list list = { .nr = 0 };
+ struct mptcp_rm_list list = { .nr = 1 };
long s_slot = 0, s_num = 0;
struct mptcp_sock *msk;
- pr_debug("remove_id=%d", addr->id);
-
- list.ids[list.nr++] = addr->id;
+ pr_debug("remove_id=%d\n", addr->id);
while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
struct sock *sk = (struct sock *)msk;
@@ -1435,8 +1532,22 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
remove_subflow = lookup_subflow_by_saddr(&msk->conn_list, addr);
mptcp_pm_remove_anno_addr(msk, addr, remove_subflow &&
!(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT));
- if (remove_subflow)
- mptcp_pm_remove_subflow(msk, &list);
+
+ list.ids[0] = mptcp_endp_get_local_id(msk, addr);
+ if (remove_subflow) {
+ spin_lock_bh(&msk->pm.lock);
+ mptcp_pm_nl_rm_subflow_received(msk, &list);
+ spin_unlock_bh(&msk->pm.lock);
+ }
+
+ if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) {
+ spin_lock_bh(&msk->pm.lock);
+ __mark_subflow_endp_available(msk, list.ids[0]);
+ spin_unlock_bh(&msk->pm.lock);
+ }
+
+ if (msk->mpc_endpoint_id == entry->addr.id)
+ msk->mpc_endpoint_id = 0;
release_sock(sk);
next:
@@ -1471,6 +1582,7 @@ static int mptcp_nl_remove_id_zero_address(struct net *net,
spin_lock_bh(&msk->pm.lock);
mptcp_pm_remove_addr(msk, &list);
mptcp_pm_nl_rm_subflow_received(msk, &list);
+ __mark_subflow_endp_available(msk, 0);
spin_unlock_bh(&msk->pm.lock);
release_sock(sk);
@@ -1530,25 +1642,36 @@ int mptcp_pm_nl_del_addr_doit(struct sk_buff *skb, struct genl_info *info)
return ret;
}
+/* Called from the userspace PM only */
void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
{
struct mptcp_rm_list alist = { .nr = 0 };
struct mptcp_pm_addr_entry *entry;
+ int anno_nr = 0;
list_for_each_entry(entry, rm_list, list) {
- if ((remove_anno_list_by_saddr(msk, &entry->addr) ||
- lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) &&
- alist.nr < MPTCP_RM_IDS_MAX)
- alist.ids[alist.nr++] = entry->addr.id;
+ if (alist.nr >= MPTCP_RM_IDS_MAX)
+ break;
+
+ /* only delete if either announced or matching a subflow */
+ if (remove_anno_list_by_saddr(msk, &entry->addr))
+ anno_nr++;
+ else if (!lookup_subflow_by_saddr(&msk->conn_list,
+ &entry->addr))
+ continue;
+
+ alist.ids[alist.nr++] = entry->addr.id;
}
if (alist.nr) {
spin_lock_bh(&msk->pm.lock);
+ msk->pm.add_addr_signaled -= anno_nr;
mptcp_pm_remove_addr(msk, &alist);
spin_unlock_bh(&msk->pm.lock);
}
}
+/* Called from the in-kernel PM only */
static void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
struct list_head *rm_list)
{
@@ -1556,22 +1679,26 @@ static void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
struct mptcp_pm_addr_entry *entry;
list_for_each_entry(entry, rm_list, list) {
- if (lookup_subflow_by_saddr(&msk->conn_list, &entry->addr) &&
- slist.nr < MPTCP_RM_IDS_MAX)
- slist.ids[slist.nr++] = entry->addr.id;
+ if (slist.nr < MPTCP_RM_IDS_MAX &&
+ lookup_subflow_by_saddr(&msk->conn_list, &entry->addr))
+ slist.ids[slist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr);
- if (remove_anno_list_by_saddr(msk, &entry->addr) &&
- alist.nr < MPTCP_RM_IDS_MAX)
- alist.ids[alist.nr++] = entry->addr.id;
+ if (alist.nr < MPTCP_RM_IDS_MAX &&
+ remove_anno_list_by_saddr(msk, &entry->addr))
+ alist.ids[alist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr);
}
+ spin_lock_bh(&msk->pm.lock);
if (alist.nr) {
- spin_lock_bh(&msk->pm.lock);
+ msk->pm.add_addr_signaled -= alist.nr;
mptcp_pm_remove_addr(msk, &alist);
- spin_unlock_bh(&msk->pm.lock);
}
if (slist.nr)
- mptcp_pm_remove_subflow(msk, &slist);
+ mptcp_pm_nl_rm_subflow_received(msk, &slist);
+ /* Reset counters: maybe some subflows have been removed before */
+ bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
+ msk->pm.local_addr_used = 0;
+ spin_unlock_bh(&msk->pm.lock);
}
static void mptcp_nl_remove_addrs_list(struct net *net,
@@ -1855,10 +1982,11 @@ static void mptcp_pm_nl_fullmesh(struct mptcp_sock *msk,
{
struct mptcp_rm_list list = { .nr = 0 };
- list.ids[list.nr++] = addr->id;
+ list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr);
spin_lock_bh(&msk->pm.lock);
mptcp_pm_nl_rm_subflow_received(msk, &list);
+ __mark_subflow_endp_available(msk, list.ids[0]);
mptcp_pm_create_subflow_or_signal_addr(msk);
spin_unlock_bh(&msk->pm.lock);
}
diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
index f0a4590506c6..8eaa9fbe3e34 100644
--- a/net/mptcp/pm_userspace.c
+++ b/net/mptcp/pm_userspace.c
@@ -165,6 +165,24 @@ int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk,
return mptcp_userspace_pm_append_new_local_addr(msk, &new_entry, true);
}
+bool mptcp_userspace_pm_is_backup(struct mptcp_sock *msk,
+ struct mptcp_addr_info *skc)
+{
+ struct mptcp_pm_addr_entry *entry;
+ bool backup = false;
+
+ spin_lock_bh(&msk->pm.lock);
+ list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
+ if (mptcp_addresses_equal(&entry->addr, skc, false)) {
+ backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
+ break;
+ }
+ }
+ spin_unlock_bh(&msk->pm.lock);
+
+ return backup;
+}
+
int mptcp_pm_nl_announce_doit(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index a26c2c840fd9..37ebcb7640eb 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -139,7 +139,7 @@ static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
!skb_try_coalesce(to, from, &fragstolen, &delta))
return false;
- pr_debug("colesced seq %llx into %llx new len %d new end seq %llx",
+ pr_debug("colesced seq %llx into %llx new len %d new end seq %llx\n",
MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq,
to->len, MPTCP_SKB_CB(from)->end_seq);
MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq;
@@ -217,7 +217,7 @@ static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb)
end_seq = MPTCP_SKB_CB(skb)->end_seq;
max_seq = atomic64_read(&msk->rcv_wnd_sent);
- pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk, seq, max_seq,
+ pr_debug("msk=%p seq=%llx limit=%llx empty=%d\n", msk, seq, max_seq,
RB_EMPTY_ROOT(&msk->out_of_order_queue));
if (after64(end_seq, max_seq)) {
/* out of window */
@@ -350,8 +350,10 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
skb_orphan(skb);
/* try to fetch required memory from subflow */
- if (!mptcp_rmem_schedule(sk, ssk, skb->truesize))
+ if (!mptcp_rmem_schedule(sk, ssk, skb->truesize)) {
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
goto drop;
+ }
has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;
@@ -641,7 +643,7 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
}
}
- pr_debug("msk=%p ssk=%p", msk, ssk);
+ pr_debug("msk=%p ssk=%p\n", msk, ssk);
tp = tcp_sk(ssk);
do {
u32 map_remaining, offset;
@@ -722,7 +724,7 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
u64 end_seq;
p = rb_first(&msk->out_of_order_queue);
- pr_debug("msk=%p empty=%d", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
+ pr_debug("msk=%p empty=%d\n", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
while (p) {
skb = rb_to_skb(p);
if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq))
@@ -744,7 +746,7 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
/* skip overlapping data, if any */
- pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d",
+ pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d\n",
MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq,
delta);
MPTCP_SKB_CB(skb)->offset += delta;
@@ -844,10 +846,8 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
sk_rbuf = ssk_rbuf;
/* over limit? can't append more skbs to msk, Also, no need to wake-up*/
- if (__mptcp_rmem(sk) > sk_rbuf) {
- MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
+ if (__mptcp_rmem(sk) > sk_rbuf)
return;
- }
/* Wake-up the reader only for in-sequence data */
mptcp_data_lock(sk);
@@ -1240,7 +1240,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
size_t copy;
int i;
- pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u",
+ pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u\n",
msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);
if (WARN_ON_ONCE(info->sent > info->limit ||
@@ -1341,7 +1341,7 @@ alloc_skb:
mpext->use_map = 1;
mpext->dsn64 = 1;
- pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
+ pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d\n",
mpext->data_seq, mpext->subflow_seq, mpext->data_len,
mpext->dsn64);
@@ -1422,13 +1422,15 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
}
mptcp_for_each_subflow(msk, subflow) {
+ bool backup = subflow->backup || subflow->request_bkup;
+
trace_mptcp_subflow_get_send(subflow);
ssk = mptcp_subflow_tcp_sock(subflow);
if (!mptcp_subflow_active(subflow))
continue;
tout = max(tout, mptcp_timeout_from_subflow(subflow));
- nr_active += !subflow->backup;
+ nr_active += !backup;
pace = subflow->avg_pacing_rate;
if (unlikely(!pace)) {
/* init pacing rate from socket */
@@ -1439,9 +1441,9 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
}
linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace);
- if (linger_time < send_info[subflow->backup].linger_time) {
- send_info[subflow->backup].ssk = ssk;
- send_info[subflow->backup].linger_time = linger_time;
+ if (linger_time < send_info[backup].linger_time) {
+ send_info[backup].ssk = ssk;
+ send_info[backup].linger_time = linger_time;
}
}
__mptcp_set_timeout(sk, tout);
@@ -1890,7 +1892,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (!msk->first_pending)
WRITE_ONCE(msk->first_pending, dfrag);
}
- pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d", msk,
+ pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk,
dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
!dfrag_collapsed);
@@ -2246,7 +2248,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
}
}
- pr_debug("block timeout %ld", timeo);
+ pr_debug("block timeout %ld\n", timeo);
sk_wait_data(sk, &timeo, NULL);
}
@@ -2262,7 +2264,7 @@ out_err:
}
}
- pr_debug("msk=%p rx queue empty=%d:%d copied=%d",
+ pr_debug("msk=%p rx queue empty=%d:%d copied=%d\n",
msk, skb_queue_empty_lockless(&sk->sk_receive_queue),
skb_queue_empty(&msk->receive_queue), copied);
if (!(flags & MSG_PEEK))
@@ -2324,7 +2326,7 @@ struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk)
continue;
}
- if (subflow->backup) {
+ if (subflow->backup || subflow->request_bkup) {
if (!backup)
backup = ssk;
continue;
@@ -2506,6 +2508,12 @@ out:
void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
struct mptcp_subflow_context *subflow)
{
+ /* The first subflow can already be closed and still in the list */
+ if (subflow->close_event_done)
+ return;
+
+ subflow->close_event_done = true;
+
if (sk->sk_state == TCP_ESTABLISHED)
mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL);
@@ -2531,8 +2539,11 @@ static void __mptcp_close_subflow(struct sock *sk)
mptcp_for_each_subflow_safe(msk, subflow, tmp) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ int ssk_state = inet_sk_state_load(ssk);
- if (inet_sk_state_load(ssk) != TCP_CLOSE)
+ if (ssk_state != TCP_CLOSE &&
+ (ssk_state != TCP_CLOSE_WAIT ||
+ inet_sk_state_load(sk) != TCP_ESTABLISHED))
continue;
/* 'subflow_data_ready' will re-sched once rx queue is empty */
@@ -2712,7 +2723,7 @@ static void mptcp_mp_fail_no_response(struct mptcp_sock *msk)
if (!ssk)
return;
- pr_debug("MP_FAIL doesn't respond, reset the subflow");
+ pr_debug("MP_FAIL doesn't respond, reset the subflow\n");
slow = lock_sock_fast(ssk);
mptcp_subflow_reset(ssk);
@@ -2886,7 +2897,7 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
break;
default:
if (__mptcp_check_fallback(mptcp_sk(sk))) {
- pr_debug("Fallback");
+ pr_debug("Fallback\n");
ssk->sk_shutdown |= how;
tcp_shutdown(ssk, how);
@@ -2896,7 +2907,7 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
WRITE_ONCE(mptcp_sk(sk)->snd_una, mptcp_sk(sk)->snd_nxt);
mptcp_schedule_work(sk);
} else {
- pr_debug("Sending DATA_FIN on subflow %p", ssk);
+ pr_debug("Sending DATA_FIN on subflow %p\n", ssk);
tcp_send_ack(ssk);
if (!mptcp_rtx_timer_pending(sk))
mptcp_reset_rtx_timer(sk);
@@ -2962,7 +2973,7 @@ static void mptcp_check_send_data_fin(struct sock *sk)
struct mptcp_subflow_context *subflow;
struct mptcp_sock *msk = mptcp_sk(sk);
- pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu",
+ pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu\n",
msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk),
msk->snd_nxt, msk->write_seq);
@@ -2986,7 +2997,7 @@ static void __mptcp_wr_shutdown(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d",
+ pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d\n",
msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state,
!!mptcp_send_head(sk));
@@ -3001,7 +3012,7 @@ static void __mptcp_destroy_sock(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
might_sleep();
@@ -3109,7 +3120,7 @@ cleanup:
mptcp_set_state(sk, TCP_CLOSE);
sock_hold(sk);
- pr_debug("msk=%p state=%d", sk, sk->sk_state);
+ pr_debug("msk=%p state=%d\n", sk, sk->sk_state);
if (msk->token)
mptcp_event(MPTCP_EVENT_CLOSED, msk, NULL, GFP_KERNEL);
@@ -3541,7 +3552,7 @@ static int mptcp_get_port(struct sock *sk, unsigned short snum)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- pr_debug("msk=%p, ssk=%p", msk, msk->first);
+ pr_debug("msk=%p, ssk=%p\n", msk, msk->first);
if (WARN_ON_ONCE(!msk->first))
return -EINVAL;
@@ -3558,7 +3569,7 @@ void mptcp_finish_connect(struct sock *ssk)
sk = subflow->conn;
msk = mptcp_sk(sk);
- pr_debug("msk=%p, token=%u", sk, subflow->token);
+ pr_debug("msk=%p, token=%u\n", sk, subflow->token);
subflow->map_seq = subflow->iasn;
subflow->map_subflow_seq = 1;
@@ -3587,7 +3598,7 @@ bool mptcp_finish_join(struct sock *ssk)
struct sock *parent = (void *)msk;
bool ret = true;
- pr_debug("msk=%p, subflow=%p", msk, subflow);
+ pr_debug("msk=%p, subflow=%p\n", msk, subflow);
/* mptcp socket already closing? */
if (!mptcp_is_fully_established(parent)) {
@@ -3633,7 +3644,7 @@ err_prohibited:
static void mptcp_shutdown(struct sock *sk, int how)
{
- pr_debug("sk=%p, how=%d", sk, how);
+ pr_debug("sk=%p, how=%d\n", sk, how);
if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk))
__mptcp_wr_shutdown(sk);
@@ -3854,7 +3865,7 @@ static int mptcp_listen(struct socket *sock, int backlog)
struct sock *ssk;
int err;
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
lock_sock(sk);
@@ -3893,7 +3904,7 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
struct mptcp_sock *msk = mptcp_sk(sock->sk);
struct sock *ssk, *newsk;
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
/* Buggy applications can call accept on socket states other then LISTEN
* but no need to allocate the first subflow just to error out.
@@ -3902,12 +3913,12 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
if (!ssk)
return -EINVAL;
- pr_debug("ssk=%p, listener=%p", ssk, mptcp_subflow_ctx(ssk));
+ pr_debug("ssk=%p, listener=%p\n", ssk, mptcp_subflow_ctx(ssk));
newsk = inet_csk_accept(ssk, arg);
if (!newsk)
return arg->err;
- pr_debug("newsk=%p, subflow is mptcp=%d", newsk, sk_is_mptcp(newsk));
+ pr_debug("newsk=%p, subflow is mptcp=%d\n", newsk, sk_is_mptcp(newsk));
if (sk_is_mptcp(newsk)) {
struct mptcp_subflow_context *subflow;
struct sock *new_mptcp_sock;
@@ -4000,7 +4011,7 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
sock_poll_wait(file, sock, wait);
state = inet_sk_state_load(sk);
- pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags);
+ pr_debug("msk=%p state=%d flags=%lx\n", msk, state, msk->flags);
if (state == TCP_LISTEN) {
struct sock *ssk = READ_ONCE(msk->first);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index b11a4e50d52b..3b22313d1b86 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -448,6 +448,7 @@ struct mptcp_subflow_request_sock {
u16 mp_capable : 1,
mp_join : 1,
backup : 1,
+ request_bkup : 1,
csum_reqd : 1,
allow_join_id0 : 1;
u8 local_id;
@@ -523,7 +524,8 @@ struct mptcp_subflow_context {
stale : 1, /* unable to snd/rcv data, do not use for xmit */
valid_csum_seen : 1, /* at least one csum validated */
is_mptfo : 1, /* subflow is doing TFO */
- __unused : 10;
+ close_event_done : 1, /* has done the post-closed part */
+ __unused : 9;
bool data_avail;
bool scheduled;
u32 remote_nonce;
@@ -991,6 +993,8 @@ void mptcp_pm_add_addr_received(const struct sock *ssk,
void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
const struct mptcp_addr_info *addr);
void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk);
+bool mptcp_pm_nl_is_init_remote_addr(struct mptcp_sock *msk,
+ const struct mptcp_addr_info *remote);
void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk);
void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
const struct mptcp_rm_list *rm_list);
@@ -1025,7 +1029,6 @@ int mptcp_pm_announce_addr(struct mptcp_sock *msk,
const struct mptcp_addr_info *addr,
bool echo);
int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
-int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list);
void mptcp_free_local_addr_list(struct mptcp_sock *msk);
@@ -1108,6 +1111,9 @@ bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
+bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc);
+bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
+bool mptcp_userspace_pm_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
int mptcp_pm_dump_addr(struct sk_buff *msg, struct netlink_callback *cb);
int mptcp_pm_nl_dump_addr(struct sk_buff *msg,
struct netlink_callback *cb);
@@ -1129,8 +1135,6 @@ static inline u8 subflow_get_local_id(const struct mptcp_subflow_context *subflo
void __init mptcp_pm_nl_init(void);
void mptcp_pm_nl_work(struct mptcp_sock *msk);
-void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk,
- const struct mptcp_rm_list *rm_list);
unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk);
unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk);
unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk);
@@ -1176,7 +1180,7 @@ static inline bool mptcp_check_fallback(const struct sock *sk)
static inline void __mptcp_do_fallback(struct mptcp_sock *msk)
{
if (__mptcp_check_fallback(msk)) {
- pr_debug("TCP fallback already done (msk=%p)", msk);
+ pr_debug("TCP fallback already done (msk=%p)\n", msk);
return;
}
set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
@@ -1212,7 +1216,7 @@ static inline void mptcp_do_fallback(struct sock *ssk)
}
}
-#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)", __func__, a)
+#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)\n", __func__, a)
static inline bool mptcp_check_infinite_map(struct sk_buff *skb)
{
diff --git a/net/mptcp/sched.c b/net/mptcp/sched.c
index 4a7fd0508ad2..78ed508ebc1b 100644
--- a/net/mptcp/sched.c
+++ b/net/mptcp/sched.c
@@ -86,7 +86,7 @@ int mptcp_register_scheduler(struct mptcp_sched_ops *sched)
list_add_tail_rcu(&sched->list, &mptcp_sched_list);
spin_unlock(&mptcp_sched_list_lock);
- pr_debug("%s registered", sched->name);
+ pr_debug("%s registered\n", sched->name);
return 0;
}
@@ -118,7 +118,7 @@ int mptcp_init_sched(struct mptcp_sock *msk,
if (msk->sched->init)
msk->sched->init(msk);
- pr_debug("sched=%s", msk->sched->name);
+ pr_debug("sched=%s\n", msk->sched->name);
return 0;
}
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index 2026a9a36f80..505445a9598f 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -873,7 +873,7 @@ int mptcp_setsockopt(struct sock *sk, int level, int optname,
struct mptcp_sock *msk = mptcp_sk(sk);
struct sock *ssk;
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
if (level == SOL_SOCKET)
return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen);
@@ -1453,7 +1453,7 @@ int mptcp_getsockopt(struct sock *sk, int level, int optname,
struct mptcp_sock *msk = mptcp_sk(sk);
struct sock *ssk;
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
/* @@ the meaning of setsockopt() when the socket is connected and
* there are multiple subflows is not yet defined. It is up to the
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 39e2cbdf3801..064ab3235893 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -39,7 +39,7 @@ static void subflow_req_destructor(struct request_sock *req)
{
struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
- pr_debug("subflow_req=%p", subflow_req);
+ pr_debug("subflow_req=%p\n", subflow_req);
if (subflow_req->msk)
sock_put((struct sock *)subflow_req->msk);
@@ -100,6 +100,7 @@ static struct mptcp_sock *subflow_token_join_request(struct request_sock *req)
return NULL;
}
subflow_req->local_id = local_id;
+ subflow_req->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)req);
return msk;
}
@@ -145,7 +146,7 @@ static int subflow_check_req(struct request_sock *req,
struct mptcp_options_received mp_opt;
bool opt_mp_capable, opt_mp_join;
- pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
+ pr_debug("subflow_req=%p, listener=%p\n", subflow_req, listener);
#ifdef CONFIG_TCP_MD5SIG
/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
@@ -168,6 +169,9 @@ static int subflow_check_req(struct request_sock *req,
return 0;
} else if (opt_mp_join) {
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
+
+ if (mp_opt.backup)
+ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNBACKUPRX);
}
if (opt_mp_capable && listener->request_mptcp) {
@@ -217,7 +221,7 @@ again:
}
if (subflow_use_different_sport(subflow_req->msk, sk_listener)) {
- pr_debug("syn inet_sport=%d %d",
+ pr_debug("syn inet_sport=%d %d\n",
ntohs(inet_sk(sk_listener)->inet_sport),
ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
@@ -239,7 +243,7 @@ again:
subflow_init_req_cookie_join_save(subflow_req, skb);
}
- pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
+ pr_debug("token=%u, remote_nonce=%u msk=%p\n", subflow_req->token,
subflow_req->remote_nonce, subflow_req->msk);
}
@@ -523,7 +527,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
subflow->rel_write_seq = 1;
subflow->conn_finished = 1;
subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
- pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
+ pr_debug("subflow=%p synack seq=%x\n", subflow, subflow->ssn_offset);
mptcp_get_options(skb, &mp_opt);
if (subflow->request_mptcp) {
@@ -555,7 +559,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
subflow->thmac = mp_opt.thmac;
subflow->remote_nonce = mp_opt.nonce;
WRITE_ONCE(subflow->remote_id, mp_opt.join_id);
- pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d",
+ pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d\n",
subflow, subflow->thmac, subflow->remote_nonce,
subflow->backup);
@@ -577,8 +581,11 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
subflow->mp_join = 1;
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
+ if (subflow->backup)
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKBACKUPRX);
+
if (subflow_use_different_dport(msk, sk)) {
- pr_debug("synack inet_dport=%d %d",
+ pr_debug("synack inet_dport=%d %d\n",
ntohs(inet_sk(sk)->inet_dport),
ntohs(inet_sk(parent)->inet_dport));
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX);
@@ -614,6 +621,8 @@ static int subflow_chk_local_id(struct sock *sk)
return err;
subflow_set_local_id(subflow, err);
+ subflow->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)sk);
+
return 0;
}
@@ -646,7 +655,7 @@ static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
- pr_debug("subflow=%p", subflow);
+ pr_debug("subflow=%p\n", subflow);
/* Never answer to SYNs sent to broadcast or multicast */
if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -677,7 +686,7 @@ static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
- pr_debug("subflow=%p", subflow);
+ pr_debug("subflow=%p\n", subflow);
if (skb->protocol == htons(ETH_P_IP))
return subflow_v4_conn_request(sk, skb);
@@ -798,7 +807,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
struct mptcp_sock *owner;
struct sock *child;
- pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
+ pr_debug("listener=%p, req=%p, conn=%p\n", listener, req, listener->conn);
/* After child creation we must look for MPC even when options
* are not parsed
@@ -889,7 +898,7 @@ create_child:
ctx->conn = (struct sock *)owner;
if (subflow_use_different_sport(owner, sk)) {
- pr_debug("ack inet_sport=%d %d",
+ pr_debug("ack inet_sport=%d %d\n",
ntohs(inet_sk(sk)->inet_sport),
ntohs(inet_sk((struct sock *)owner)->inet_sport));
if (!mptcp_pm_sport_in_anno_list(owner, sk)) {
@@ -952,7 +961,7 @@ enum mapping_status {
static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
{
- pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
+ pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d\n",
ssn, subflow->map_subflow_seq, subflow->map_data_len);
}
@@ -1112,7 +1121,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
data_len = mpext->data_len;
if (data_len == 0) {
- pr_debug("infinite mapping received");
+ pr_debug("infinite mapping received\n");
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
subflow->map_data_len = 0;
return MAPPING_INVALID;
@@ -1124,7 +1133,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
if (data_len == 1) {
bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
mpext->dsn64);
- pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq);
+ pr_debug("DATA_FIN with no payload seq=%llu\n", mpext->data_seq);
if (subflow->map_valid) {
/* A DATA_FIN might arrive in a DSS
* option before the previous mapping
@@ -1150,7 +1159,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
data_fin_seq &= GENMASK_ULL(31, 0);
mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
- pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d",
+ pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d\n",
data_fin_seq, mpext->dsn64);
/* Adjust for DATA_FIN using 1 byte of sequence space */
@@ -1196,7 +1205,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
if (unlikely(subflow->map_csum_reqd != csum_reqd))
return MAPPING_INVALID;
- pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
+ pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u\n",
subflow->map_seq, subflow->map_subflow_seq,
subflow->map_data_len, subflow->map_csum_reqd,
subflow->map_data_csum);
@@ -1221,14 +1230,22 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
- u32 incr;
+ struct tcp_sock *tp = tcp_sk(ssk);
+ u32 offset, incr, avail_len;
+
+ offset = tp->copied_seq - TCP_SKB_CB(skb)->seq;
+ if (WARN_ON_ONCE(offset > skb->len))
+ goto out;
- incr = limit >= skb->len ? skb->len + fin : limit;
+ avail_len = skb->len - offset;
+ incr = limit >= avail_len ? avail_len + fin : limit;
- pr_debug("discarding=%d len=%d seq=%d", incr, skb->len,
- subflow->map_subflow_seq);
+ pr_debug("discarding=%d len=%d offset=%d seq=%d\n", incr, skb->len,
+ offset, subflow->map_subflow_seq);
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
tcp_sk(ssk)->copied_seq += incr;
+
+out:
if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
sk_eat_skb(ssk, skb);
if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
@@ -1238,12 +1255,16 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
/* sched mptcp worker to remove the subflow if no more data is pending */
static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
{
- if (likely(ssk->sk_state != TCP_CLOSE))
+ struct sock *sk = (struct sock *)msk;
+
+ if (likely(ssk->sk_state != TCP_CLOSE &&
+ (ssk->sk_state != TCP_CLOSE_WAIT ||
+ inet_sk_state_load(sk) != TCP_ESTABLISHED)))
return;
if (skb_queue_empty(&ssk->sk_receive_queue) &&
!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
- mptcp_schedule_work((struct sock *)msk);
+ mptcp_schedule_work(sk);
}
static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
@@ -1320,7 +1341,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
old_ack = READ_ONCE(msk->ack_seq);
ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
- pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
+ pr_debug("msk ack_seq=%llx subflow ack_seq=%llx\n", old_ack,
ack_seq);
if (unlikely(before64(ack_seq, old_ack))) {
mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
@@ -1392,7 +1413,7 @@ bool mptcp_subflow_data_available(struct sock *sk)
subflow->map_valid = 0;
WRITE_ONCE(subflow->data_avail, false);
- pr_debug("Done with mapping: seq=%u data_len=%u",
+ pr_debug("Done with mapping: seq=%u data_len=%u\n",
subflow->map_subflow_seq,
subflow->map_data_len);
}
@@ -1502,7 +1523,7 @@ void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
- pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
+ pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d\n",
subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
if (likely(icsk->icsk_af_ops == target))
@@ -1595,7 +1616,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
goto failed;
mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
- pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
+ pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d\n", msk,
remote_token, local_id, remote_id);
subflow->remote_token = remote_token;
WRITE_ONCE(subflow->remote_id, remote_id);
@@ -1730,7 +1751,7 @@ int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
subflow = mptcp_subflow_ctx(sf->sk);
- pr_debug("subflow=%p", subflow);
+ pr_debug("subflow=%p\n", subflow);
*new_sock = sf;
sock_hold(sk);
@@ -1759,7 +1780,7 @@ static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
INIT_LIST_HEAD(&ctx->node);
INIT_LIST_HEAD(&ctx->delegated_node);
- pr_debug("subflow=%p", ctx);
+ pr_debug("subflow=%p\n", ctx);
ctx->tcp_sock = sk;
WRITE_ONCE(ctx->local_id, -1);
@@ -1910,7 +1931,7 @@ static int subflow_ulp_init(struct sock *sk)
goto out;
}
- pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
+ pr_debug("subflow=%p, family=%d\n", ctx, sk->sk_family);
tp->is_mptcp = 1;
ctx->icsk_af_ops = icsk->icsk_af_ops;
@@ -2005,6 +2026,7 @@ static void subflow_ulp_clone(const struct request_sock *req,
new_ctx->fully_established = 1;
new_ctx->remote_key_valid = 1;
new_ctx->backup = subflow_req->backup;
+ new_ctx->request_bkup = subflow_req->request_bkup;
WRITE_ONCE(new_ctx->remote_id, subflow_req->remote_id);
new_ctx->token = subflow_req->token;
new_ctx->thmac = subflow_req->thmac;
diff --git a/net/netfilter/nf_flow_table_inet.c b/net/netfilter/nf_flow_table_inet.c
index 88787b45e30d..8b541a080342 100644
--- a/net/netfilter/nf_flow_table_inet.c
+++ b/net/netfilter/nf_flow_table_inet.c
@@ -17,6 +17,9 @@ nf_flow_offload_inet_hook(void *priv, struct sk_buff *skb,
switch (skb->protocol) {
case htons(ETH_P_8021Q):
+ if (!pskb_may_pull(skb, skb_mac_offset(skb) + sizeof(*veth)))
+ return NF_ACCEPT;
+
veth = (struct vlan_ethhdr *)skb_mac_header(skb);
proto = veth->h_vlan_encapsulated_proto;
break;
diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
index c2c005234dcd..98edcaa37b38 100644
--- a/net/netfilter/nf_flow_table_ip.c
+++ b/net/netfilter/nf_flow_table_ip.c
@@ -281,6 +281,9 @@ static bool nf_flow_skb_encap_protocol(struct sk_buff *skb, __be16 proto,
switch (skb->protocol) {
case htons(ETH_P_8021Q):
+ if (!pskb_may_pull(skb, skb_mac_offset(skb) + sizeof(*veth)))
+ return false;
+
veth = (struct vlan_ethhdr *)skb_mac_header(skb);
if (veth->h_vlan_encapsulated_proto == proto) {
*offset += VLAN_HLEN;
diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
index ff1a4e36c2b5..e06bc36f49fe 100644
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -841,8 +841,8 @@ static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
struct list_head *block_cb_list)
{
struct flow_cls_offload cls_flow = {};
+ struct netlink_ext_ack extack = {};
struct flow_block_cb *block_cb;
- struct netlink_ext_ack extack;
__be16 proto = ETH_P_ALL;
int err, i = 0;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 481ee78e77bc..0a2f79346958 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -8020,6 +8020,19 @@ cont:
return skb->len;
}
+static int nf_tables_dumpreset_obj(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct nftables_pernet *nft_net = nft_pernet(sock_net(skb->sk));
+ int ret;
+
+ mutex_lock(&nft_net->commit_mutex);
+ ret = nf_tables_dump_obj(skb, cb);
+ mutex_unlock(&nft_net->commit_mutex);
+
+ return ret;
+}
+
static int nf_tables_dump_obj_start(struct netlink_callback *cb)
{
struct nft_obj_dump_ctx *ctx = (void *)cb->ctx;
@@ -8036,12 +8049,18 @@ static int nf_tables_dump_obj_start(struct netlink_callback *cb)
if (nla[NFTA_OBJ_TYPE])
ctx->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
- if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) == NFT_MSG_GETOBJ_RESET)
- ctx->reset = true;
-
return 0;
}
+static int nf_tables_dumpreset_obj_start(struct netlink_callback *cb)
+{
+ struct nft_obj_dump_ctx *ctx = (void *)cb->ctx;
+
+ ctx->reset = true;
+
+ return nf_tables_dump_obj_start(cb);
+}
+
static int nf_tables_dump_obj_done(struct netlink_callback *cb)
{
struct nft_obj_dump_ctx *ctx = (void *)cb->ctx;
@@ -8052,8 +8071,9 @@ static int nf_tables_dump_obj_done(struct netlink_callback *cb)
}
/* called with rcu_read_lock held */
-static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info,
- const struct nlattr * const nla[])
+static struct sk_buff *
+nf_tables_getobj_single(u32 portid, const struct nfnl_info *info,
+ const struct nlattr * const nla[], bool reset)
{
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_cur(info->net);
@@ -8062,72 +8082,109 @@ static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info,
struct net *net = info->net;
struct nft_object *obj;
struct sk_buff *skb2;
- bool reset = false;
u32 objtype;
int err;
- if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
- struct netlink_dump_control c = {
- .start = nf_tables_dump_obj_start,
- .dump = nf_tables_dump_obj,
- .done = nf_tables_dump_obj_done,
- .module = THIS_MODULE,
- .data = (void *)nla,
- };
-
- return nft_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c);
- }
-
if (!nla[NFTA_OBJ_NAME] ||
!nla[NFTA_OBJ_TYPE])
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
table = nft_table_lookup(net, nla[NFTA_OBJ_TABLE], family, genmask, 0);
if (IS_ERR(table)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_TABLE]);
- return PTR_ERR(table);
+ return ERR_CAST(table);
}
objtype = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
obj = nft_obj_lookup(net, table, nla[NFTA_OBJ_NAME], objtype, genmask);
if (IS_ERR(obj)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_NAME]);
- return PTR_ERR(obj);
+ return ERR_CAST(obj);
}
skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
if (!skb2)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
- if (NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_GETOBJ_RESET)
- reset = true;
+ err = nf_tables_fill_obj_info(skb2, net, portid,
+ info->nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0,
+ family, table, obj, reset);
+ if (err < 0) {
+ kfree_skb(skb2);
+ return ERR_PTR(err);
+ }
- if (reset) {
- const struct nftables_pernet *nft_net;
- char *buf;
+ return skb2;
+}
- nft_net = nft_pernet(net);
- buf = kasprintf(GFP_ATOMIC, "%s:%u", table->name, nft_net->base_seq);
+static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info,
+ const struct nlattr * const nla[])
+{
+ u32 portid = NETLINK_CB(skb).portid;
+ struct sk_buff *skb2;
- audit_log_nfcfg(buf,
- family,
- 1,
- AUDIT_NFT_OP_OBJ_RESET,
- GFP_ATOMIC);
- kfree(buf);
+ if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
+ struct netlink_dump_control c = {
+ .start = nf_tables_dump_obj_start,
+ .dump = nf_tables_dump_obj,
+ .done = nf_tables_dump_obj_done,
+ .module = THIS_MODULE,
+ .data = (void *)nla,
+ };
+
+ return nft_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c);
}
- err = nf_tables_fill_obj_info(skb2, net, NETLINK_CB(skb).portid,
- info->nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0,
- family, table, obj, reset);
- if (err < 0)
- goto err_fill_obj_info;
+ skb2 = nf_tables_getobj_single(portid, info, nla, false);
+ if (IS_ERR(skb2))
+ return PTR_ERR(skb2);
- return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
+ return nfnetlink_unicast(skb2, info->net, portid);
+}
-err_fill_obj_info:
- kfree_skb(skb2);
- return err;
+static int nf_tables_getobj_reset(struct sk_buff *skb,
+ const struct nfnl_info *info,
+ const struct nlattr * const nla[])
+{
+ struct nftables_pernet *nft_net = nft_pernet(info->net);
+ u32 portid = NETLINK_CB(skb).portid;
+ struct net *net = info->net;
+ struct sk_buff *skb2;
+ char *buf;
+
+ if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
+ struct netlink_dump_control c = {
+ .start = nf_tables_dumpreset_obj_start,
+ .dump = nf_tables_dumpreset_obj,
+ .done = nf_tables_dump_obj_done,
+ .module = THIS_MODULE,
+ .data = (void *)nla,
+ };
+
+ return nft_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c);
+ }
+
+ if (!try_module_get(THIS_MODULE))
+ return -EINVAL;
+ rcu_read_unlock();
+ mutex_lock(&nft_net->commit_mutex);
+ skb2 = nf_tables_getobj_single(portid, info, nla, true);
+ mutex_unlock(&nft_net->commit_mutex);
+ rcu_read_lock();
+ module_put(THIS_MODULE);
+
+ if (IS_ERR(skb2))
+ return PTR_ERR(skb2);
+
+ buf = kasprintf(GFP_ATOMIC, "%.*s:%u",
+ nla_len(nla[NFTA_OBJ_TABLE]),
+ (char *)nla_data(nla[NFTA_OBJ_TABLE]),
+ nft_net->base_seq);
+ audit_log_nfcfg(buf, info->nfmsg->nfgen_family, 1,
+ AUDIT_NFT_OP_OBJ_RESET, GFP_ATOMIC);
+ kfree(buf);
+
+ return nfnetlink_unicast(skb2, net, portid);
}
static void nft_obj_destroy(const struct nft_ctx *ctx, struct nft_object *obj)
@@ -9410,7 +9467,7 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
.policy = nft_obj_policy,
},
[NFT_MSG_GETOBJ_RESET] = {
- .call = nf_tables_getobj,
+ .call = nf_tables_getobj_reset,
.type = NFNL_CB_RCU,
.attr_count = NFTA_OBJ_MAX,
.policy = nft_obj_policy,
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 4abf660c7baf..932b3ddb34f1 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -427,8 +427,10 @@ replay_abort:
nfnl_unlock(subsys_id);
- if (nlh->nlmsg_flags & NLM_F_ACK)
+ if (nlh->nlmsg_flags & NLM_F_ACK) {
+ memset(&extack, 0, sizeof(extack));
nfnl_err_add(&err_list, nlh, 0, &extack);
+ }
while (skb->len >= nlmsg_total_size(0)) {
int msglen, type;
@@ -577,6 +579,7 @@ done:
ss->abort(net, oskb, NFNL_ABORT_NONE);
netlink_ack(oskb, nlmsg_hdr(oskb), err, NULL);
} else if (nlh->nlmsg_flags & NLM_F_ACK) {
+ memset(&extack, 0, sizeof(extack));
nfnl_err_add(&err_list, nlh, 0, &extack);
}
} else {
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 55e28e1da66e..e0716da256bf 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -820,10 +820,41 @@ static bool nf_ct_drop_unconfirmed(const struct nf_queue_entry *entry)
{
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
static const unsigned long flags = IPS_CONFIRMED | IPS_DYING;
- const struct nf_conn *ct = (void *)skb_nfct(entry->skb);
+ struct nf_conn *ct = (void *)skb_nfct(entry->skb);
+ unsigned long status;
+ unsigned int use;
- if (ct && ((ct->status & flags) == IPS_DYING))
+ if (!ct)
+ return false;
+
+ status = READ_ONCE(ct->status);
+ if ((status & flags) == IPS_DYING)
return true;
+
+ if (status & IPS_CONFIRMED)
+ return false;
+
+ /* in some cases skb_clone() can occur after initial conntrack
+ * pickup, but conntrack assumes exclusive skb->_nfct ownership for
+ * unconfirmed entries.
+ *
+ * This happens for br_netfilter and with ip multicast routing.
+ * We can't be solved with serialization here because one clone could
+ * have been queued for local delivery.
+ */
+ use = refcount_read(&ct->ct_general.use);
+ if (likely(use == 1))
+ return false;
+
+ /* Can't decrement further? Exclusive ownership. */
+ if (!refcount_dec_not_one(&ct->ct_general.use))
+ return false;
+
+ skb_set_nfct(entry->skb, 0);
+ /* No nf_ct_put(): we already decremented .use and it cannot
+ * drop down to 0.
+ */
+ return true;
#endif
return false;
}
diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
index 291ed2026367..eab0dc66bee6 100644
--- a/net/netfilter/nft_counter.c
+++ b/net/netfilter/nft_counter.c
@@ -107,11 +107,16 @@ static void nft_counter_reset(struct nft_counter_percpu_priv *priv,
struct nft_counter *total)
{
struct nft_counter *this_cpu;
+ seqcount_t *myseq;
local_bh_disable();
this_cpu = this_cpu_ptr(priv->counter);
+ myseq = this_cpu_ptr(&nft_counter_seq);
+
+ write_seqcount_begin(myseq);
this_cpu->packets -= total->packets;
this_cpu->bytes -= total->bytes;
+ write_seqcount_end(myseq);
local_bh_enable();
}
@@ -265,7 +270,7 @@ static void nft_counter_offload_stats(struct nft_expr *expr,
struct nft_counter *this_cpu;
seqcount_t *myseq;
- preempt_disable();
+ local_bh_disable();
this_cpu = this_cpu_ptr(priv->counter);
myseq = this_cpu_ptr(&nft_counter_seq);
@@ -273,7 +278,7 @@ static void nft_counter_offload_stats(struct nft_expr *expr,
this_cpu->packets += stats->pkts;
this_cpu->bytes += stats->bytes;
write_seqcount_end(myseq);
- preempt_enable();
+ local_bh_enable();
}
void nft_counter_init_seqcount(void)
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 99d72543abd3..78d9961fcd44 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -2706,7 +2706,7 @@ static struct pernet_operations ovs_net_ops = {
};
static const char * const ovs_drop_reasons[] = {
-#define S(x) (#x),
+#define S(x) [(x) & ~SKB_DROP_REASON_SUBSYS_MASK] = (#x),
OVS_DROP_REASONS(S)
#undef S
};
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 113b907da0f7..3ba8e7e739b5 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -44,6 +44,8 @@ static DEFINE_MUTEX(zones_mutex);
struct zones_ht_key {
struct net *net;
u16 zone;
+ /* Note : pad[] must be the last field. */
+ u8 pad[];
};
struct tcf_ct_flow_table {
@@ -60,7 +62,7 @@ struct tcf_ct_flow_table {
static const struct rhashtable_params zones_params = {
.head_offset = offsetof(struct tcf_ct_flow_table, node),
.key_offset = offsetof(struct tcf_ct_flow_table, key),
- .key_len = sizeof_field(struct tcf_ct_flow_table, key),
+ .key_len = offsetof(struct zones_ht_key, pad),
.automatic_shrinking = true,
};
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 9602dafe32e6..d2f49db70523 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -786,12 +786,15 @@ skip_hash:
* queue, accept the collision, update the host tags.
*/
q->way_collisions++;
- if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
- q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
- q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
- }
allocate_src = cake_dsrc(flow_mode);
allocate_dst = cake_ddst(flow_mode);
+
+ if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
+ if (allocate_src)
+ q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
+ if (allocate_dst)
+ q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
+ }
found:
/* reserve queue for future packets in same flow */
reduced_hash = outer_hash + k;
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 238974725679..19a49af5a9e5 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -663,7 +663,9 @@ begin:
pband = &q->band_flows[q->band_nr];
pband->credit = min(pband->credit + pband->quantum,
pband->quantum);
- goto begin;
+ if (pband->credit > 0)
+ goto begin;
+ retry = 0;
}
if (q->time_next_delayed_flow != ~0ULL)
qdisc_watchdog_schedule_range_ns(&q->watchdog,
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index edc72962ae63..39382ee1e331 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -446,12 +446,10 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct netem_sched_data *q = qdisc_priv(sch);
/* We don't fill cb now as skb_unshare() may invalidate it */
struct netem_skb_cb *cb;
- struct sk_buff *skb2;
+ struct sk_buff *skb2 = NULL;
struct sk_buff *segs = NULL;
unsigned int prev_len = qdisc_pkt_len(skb);
int count = 1;
- int rc = NET_XMIT_SUCCESS;
- int rc_drop = NET_XMIT_DROP;
/* Do not fool qdisc_drop_all() */
skb->prev = NULL;
@@ -480,19 +478,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb_orphan_partial(skb);
/*
- * If we need to duplicate packet, then re-insert at top of the
- * qdisc tree, since parent queuer expects that only one
- * skb will be queued.
+ * If we need to duplicate packet, then clone it before
+ * original is modified.
*/
- if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
- struct Qdisc *rootq = qdisc_root_bh(sch);
- u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
-
- q->duplicate = 0;
- rootq->enqueue(skb2, rootq, to_free);
- q->duplicate = dupsave;
- rc_drop = NET_XMIT_SUCCESS;
- }
+ if (count > 1)
+ skb2 = skb_clone(skb, GFP_ATOMIC);
/*
* Randomized packet corruption.
@@ -504,7 +494,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (skb_is_gso(skb)) {
skb = netem_segment(skb, sch, to_free);
if (!skb)
- return rc_drop;
+ goto finish_segs;
+
segs = skb->next;
skb_mark_not_on_list(skb);
qdisc_skb_cb(skb)->pkt_len = skb->len;
@@ -530,7 +521,24 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
/* re-link segs, so that qdisc_drop_all() frees them all */
skb->next = segs;
qdisc_drop_all(skb, sch, to_free);
- return rc_drop;
+ if (skb2)
+ __qdisc_drop(skb2, to_free);
+ return NET_XMIT_DROP;
+ }
+
+ /*
+ * If doing duplication then re-insert at top of the
+ * qdisc tree, since parent queuer expects that only one
+ * skb will be queued.
+ */
+ if (skb2) {
+ struct Qdisc *rootq = qdisc_root_bh(sch);
+ u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
+
+ q->duplicate = 0;
+ rootq->enqueue(skb2, rootq, to_free);
+ q->duplicate = dupsave;
+ skb2 = NULL;
}
qdisc_qstats_backlog_inc(sch, skb);
@@ -601,9 +609,12 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
}
finish_segs:
+ if (skb2)
+ __qdisc_drop(skb2, to_free);
+
if (segs) {
unsigned int len, last_len;
- int nb;
+ int rc, nb;
len = skb ? skb->len : 0;
nb = skb ? 1 : 0;
@@ -731,11 +742,10 @@ deliver:
err = qdisc_enqueue(skb, q->qdisc, &to_free);
kfree_skb_list(to_free);
- if (err != NET_XMIT_SUCCESS &&
- net_xmit_drop_count(err)) {
- qdisc_qstats_drop(sch);
- qdisc_tree_reduce_backlog(sch, 1,
- pkt_len);
+ if (err != NET_XMIT_SUCCESS) {
+ if (net_xmit_drop_count(err))
+ qdisc_qstats_drop(sch);
+ qdisc_tree_reduce_backlog(sch, 1, pkt_len);
}
goto tfifo_dequeue;
}
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 17fcaa9b0df9..a8a254a5008e 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -735,15 +735,19 @@ static int __sctp_hash_endpoint(struct sctp_endpoint *ep)
struct sock *sk = ep->base.sk;
struct net *net = sock_net(sk);
struct sctp_hashbucket *head;
+ int err = 0;
ep->hashent = sctp_ep_hashfn(net, ep->base.bind_addr.port);
head = &sctp_ep_hashtable[ep->hashent];
+ write_lock(&head->lock);
if (sk->sk_reuseport) {
bool any = sctp_is_ep_boundall(sk);
struct sctp_endpoint *ep2;
struct list_head *list;
- int cnt = 0, err = 1;
+ int cnt = 0;
+
+ err = 1;
list_for_each(list, &ep->base.bind_addr.address_list)
cnt++;
@@ -761,24 +765,24 @@ static int __sctp_hash_endpoint(struct sctp_endpoint *ep)
if (!err) {
err = reuseport_add_sock(sk, sk2, any);
if (err)
- return err;
+ goto out;
break;
} else if (err < 0) {
- return err;
+ goto out;
}
}
if (err) {
err = reuseport_alloc(sk, any);
if (err)
- return err;
+ goto out;
}
}
- write_lock(&head->lock);
hlist_add_head(&ep->node, &head->chain);
+out:
write_unlock(&head->lock);
- return 0;
+ return err;
}
/* Add an endpoint to the hash. Local BH-safe. */
@@ -803,10 +807,9 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
head = &sctp_ep_hashtable[ep->hashent];
+ write_lock(&head->lock);
if (rcu_access_pointer(sk->sk_reuseport_cb))
reuseport_detach_sock(sk);
-
- write_lock(&head->lock);
hlist_del_init(&ep->node);
write_unlock(&head->lock);
}
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 5adf0c0a6c1a..7d315a18612b 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -2260,12 +2260,6 @@ enum sctp_disposition sctp_sf_do_5_2_4_dupcook(
}
}
- /* Update socket peer label if first association. */
- if (security_sctp_assoc_request(new_asoc, chunk->head_skb ?: chunk->skb)) {
- sctp_association_free(new_asoc);
- return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
- }
-
/* Set temp so that it won't be added into hashtable */
new_asoc->temp = 1;
@@ -2274,6 +2268,22 @@ enum sctp_disposition sctp_sf_do_5_2_4_dupcook(
*/
action = sctp_tietags_compare(new_asoc, asoc);
+ /* In cases C and E the association doesn't enter the ESTABLISHED
+ * state, so there is no need to call security_sctp_assoc_request().
+ */
+ switch (action) {
+ case 'A': /* Association restart. */
+ case 'B': /* Collision case B. */
+ case 'D': /* Collision case D. */
+ /* Update socket peer label if first association. */
+ if (security_sctp_assoc_request((struct sctp_association *)asoc,
+ chunk->head_skb ?: chunk->skb)) {
+ sctp_association_free(new_asoc);
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
+ break;
+ }
+
switch (action) {
case 'A': /* Association restart. */
retval = sctp_sf_do_dupcook_a(net, ep, asoc, chunk, commands,
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 73a875573e7a..8e3093938cd2 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -3319,10 +3319,8 @@ int smc_create_clcsk(struct net *net, struct sock *sk, int family)
rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
&smc->clcsock);
- if (rc) {
- sk_common_release(sk);
+ if (rc)
return rc;
- }
/* smc_clcsock_release() does not wait smc->clcsock->sk's
* destruction; its sk_state might not be TCP_CLOSE after
@@ -3368,6 +3366,9 @@ static int __smc_create(struct net *net, struct socket *sock, int protocol,
smc->clcsock = clcsock;
else
rc = smc_create_clcsk(net, sk, family);
+
+ if (rc)
+ sk_common_release(sk);
out:
return rc;
}
diff --git a/net/smc/smc.h b/net/smc/smc.h
index 34b781e463c4..ad77d6b6b8d3 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -284,6 +284,9 @@ struct smc_connection {
struct smc_sock { /* smc sock container */
struct sock sk;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct ipv6_pinfo *pinet6;
+#endif
struct socket *clcsock; /* internal tcp socket */
void (*clcsk_state_change)(struct sock *sk);
/* original stat_change fct. */
diff --git a/net/smc/smc_inet.c b/net/smc/smc_inet.c
index bece346dd8e9..a5b2041600f9 100644
--- a/net/smc/smc_inet.c
+++ b/net/smc/smc_inet.c
@@ -60,6 +60,11 @@ static struct inet_protosw smc_inet_protosw = {
};
#if IS_ENABLED(CONFIG_IPV6)
+struct smc6_sock {
+ struct smc_sock smc;
+ struct ipv6_pinfo inet6;
+};
+
static struct proto smc_inet6_prot = {
.name = "INET6_SMC",
.owner = THIS_MODULE,
@@ -67,9 +72,10 @@ static struct proto smc_inet6_prot = {
.hash = smc_hash_sk,
.unhash = smc_unhash_sk,
.release_cb = smc_release_cb,
- .obj_size = sizeof(struct smc_sock),
+ .obj_size = sizeof(struct smc6_sock),
.h.smc_hash = &smc_v6_hashinfo,
.slab_flags = SLAB_TYPESAFE_BY_RCU,
+ .ipv6_pinfo_offset = offsetof(struct smc6_sock, inet6),
};
static const struct proto_ops smc_inet6_stream_ops = {
diff --git a/net/smc/smc_stats.h b/net/smc/smc_stats.h
index 9d32058db2b5..e19177ce4092 100644
--- a/net/smc/smc_stats.h
+++ b/net/smc/smc_stats.h
@@ -19,7 +19,7 @@
#include "smc_clc.h"
-#define SMC_MAX_FBACK_RSN_CNT 30
+#define SMC_MAX_FBACK_RSN_CNT 36
enum {
SMC_BUF_8K,
diff --git a/net/socket.c b/net/socket.c
index fcbdd5bc47ac..0a2bd22ec105 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2362,7 +2362,7 @@ INDIRECT_CALLABLE_DECLARE(bool tcp_bpf_bypass_getsockopt(int level,
int do_sock_getsockopt(struct socket *sock, bool compat, int level,
int optname, sockptr_t optval, sockptr_t optlen)
{
- int max_optlen __maybe_unused;
+ int max_optlen __maybe_unused = 0;
const struct proto_ops *ops;
int err;
@@ -2371,7 +2371,7 @@ int do_sock_getsockopt(struct socket *sock, bool compat, int level,
return err;
if (!compat)
- max_optlen = BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen);
+ copy_from_sockptr(&max_optlen, optlen, sizeof(int));
ops = READ_ONCE(sock->ops);
if (level == SOL_SOCKET) {
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index e03f14024e47..88a59cfa5583 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -161,7 +161,7 @@ param_get_pool_mode(char *buf, const struct kernel_param *kp)
str[len] = '\n';
str[len + 1] = '\0';
- return sysfs_emit(buf, str);
+ return sysfs_emit(buf, "%s", str);
}
module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode,
diff --git a/net/sunrpc/xprtrdma/ib_client.c b/net/sunrpc/xprtrdma/ib_client.c
index a938c19c3490..8507cd4d8921 100644
--- a/net/sunrpc/xprtrdma/ib_client.c
+++ b/net/sunrpc/xprtrdma/ib_client.c
@@ -62,10 +62,11 @@ int rpcrdma_rn_register(struct ib_device *device,
if (!rd || test_bit(RPCRDMA_RD_F_REMOVING, &rd->rd_flags))
return -ENETUNREACH;
- kref_get(&rd->rd_kref);
if (xa_alloc(&rd->rd_xa, &rn->rn_index, rn, xa_limit_32b, GFP_KERNEL) < 0)
return -ENOMEM;
+ kref_get(&rd->rd_kref);
rn->rn_done = done;
+ trace_rpcrdma_client_register(device, rn);
return 0;
}
@@ -91,6 +92,7 @@ void rpcrdma_rn_unregister(struct ib_device *device,
if (!rd)
return;
+ trace_rpcrdma_client_unregister(device, rn);
xa_erase(&rd->rd_xa, rn->rn_index);
kref_put(&rd->rd_kref, rpcrdma_rn_release);
}
@@ -111,7 +113,7 @@ static int rpcrdma_add_one(struct ib_device *device)
return -ENOMEM;
kref_init(&rd->rd_kref);
- xa_init_flags(&rd->rd_xa, XA_FLAGS_ALLOC1);
+ xa_init_flags(&rd->rd_xa, XA_FLAGS_ALLOC);
rd->rd_device = device;
init_completion(&rd->rd_done);
ib_set_client_data(device, &rpcrdma_ib_client, rd);
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 4b040285aa78..0ff9b2dd86ba 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1270,25 +1270,28 @@ out:
return err;
}
+int __vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t len, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct vsock_sock *vsk = vsock_sk(sk);
+
+ return vsk->transport->dgram_dequeue(vsk, msg, len, flags);
+}
+
int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags)
{
#ifdef CONFIG_BPF_SYSCALL
+ struct sock *sk = sock->sk;
const struct proto *prot;
-#endif
- struct vsock_sock *vsk;
- struct sock *sk;
- sk = sock->sk;
- vsk = vsock_sk(sk);
-
-#ifdef CONFIG_BPF_SYSCALL
prot = READ_ONCE(sk->sk_prot);
if (prot != &vsock_proto)
return prot->recvmsg(sk, msg, len, flags, NULL);
#endif
- return vsk->transport->dgram_dequeue(vsk, msg, len, flags);
+ return __vsock_dgram_recvmsg(sock, msg, len, flags);
}
EXPORT_SYMBOL_GPL(vsock_dgram_recvmsg);
@@ -2174,15 +2177,12 @@ out:
}
int
-vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
- int flags)
+__vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags)
{
struct sock *sk;
struct vsock_sock *vsk;
const struct vsock_transport *transport;
-#ifdef CONFIG_BPF_SYSCALL
- const struct proto *prot;
-#endif
int err;
sk = sock->sk;
@@ -2233,14 +2233,6 @@ vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
goto out;
}
-#ifdef CONFIG_BPF_SYSCALL
- prot = READ_ONCE(sk->sk_prot);
- if (prot != &vsock_proto) {
- release_sock(sk);
- return prot->recvmsg(sk, msg, len, flags, NULL);
- }
-#endif
-
if (sk->sk_type == SOCK_STREAM)
err = __vsock_stream_recvmsg(sk, msg, len, flags);
else
@@ -2250,6 +2242,22 @@ out:
release_sock(sk);
return err;
}
+
+int
+vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags)
+{
+#ifdef CONFIG_BPF_SYSCALL
+ struct sock *sk = sock->sk;
+ const struct proto *prot;
+
+ prot = READ_ONCE(sk->sk_prot);
+ if (prot != &vsock_proto)
+ return prot->recvmsg(sk, msg, len, flags, NULL);
+#endif
+
+ return __vsock_connectible_recvmsg(sock, msg, len, flags);
+}
EXPORT_SYMBOL_GPL(vsock_connectible_recvmsg);
static int vsock_set_rcvlowat(struct sock *sk, int val)
diff --git a/net/vmw_vsock/vsock_bpf.c b/net/vmw_vsock/vsock_bpf.c
index a3c97546ab84..c42c5cc18f32 100644
--- a/net/vmw_vsock/vsock_bpf.c
+++ b/net/vmw_vsock/vsock_bpf.c
@@ -64,9 +64,9 @@ static int __vsock_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int
int err;
if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)
- err = vsock_connectible_recvmsg(sock, msg, len, flags);
+ err = __vsock_connectible_recvmsg(sock, msg, len, flags);
else if (sk->sk_type == SOCK_DGRAM)
- err = vsock_dgram_recvmsg(sock, msg, len, flags);
+ err = __vsock_dgram_recvmsg(sock, msg, len, flags);
else
err = -EPROTOTYPE;
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index d99319d82205..64eeed82d43d 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -3178,8 +3178,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
struct ieee80211_mgmt *mgmt, size_t len,
gfp_t gfp)
{
- size_t min_hdr_len = offsetof(struct ieee80211_mgmt,
- u.probe_resp.variable);
+ size_t min_hdr_len;
struct ieee80211_ext *ext = NULL;
enum cfg80211_bss_frame_type ftype;
u16 beacon_interval;
@@ -3202,10 +3201,16 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
ext = (void *) mgmt;
- min_hdr_len = offsetof(struct ieee80211_ext, u.s1g_beacon);
if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
min_hdr_len = offsetof(struct ieee80211_ext,
u.s1g_short_beacon.variable);
+ else
+ min_hdr_len = offsetof(struct ieee80211_ext,
+ u.s1g_beacon.variable);
+ } else {
+ /* same for beacons */
+ min_hdr_len = offsetof(struct ieee80211_mgmt,
+ u.probe_resp.variable);
}
if (WARN_ON(len < min_hdr_len))
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index e419aa8c4a5a..d9d7bf8bb5c1 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -1045,6 +1045,7 @@ void cfg80211_connect_done(struct net_device *dev,
cfg80211_hold_bss(
bss_from_pub(params->links[link].bss));
ev->cr.links[link].bss = params->links[link].bss;
+ ev->cr.links[link].status = params->links[link].status;
if (params->links[link].addr) {
ev->cr.links[link].addr = next;