summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/9p/protocol.c22
-rw-r--r--net/Kconfig4
-rw-r--r--net/batman-adv/unicast.c15
-rw-r--r--net/batman-adv/vis.c14
-rw-r--r--net/bluetooth/hci_conn.c16
-rw-r--r--net/bluetooth/hci_core.c4
-rw-r--r--net/bluetooth/hci_event.c9
-rw-r--r--net/bluetooth/l2cap.c85
-rw-r--r--net/bluetooth/rfcomm/core.c3
-rw-r--r--net/bridge/br_fdb.c4
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_multicast.c19
-rw-r--r--net/bridge/br_private.h3
-rw-r--r--net/caif/chnl_net.c4
-rw-r--r--net/ceph/ceph_hash.c3
-rw-r--r--net/ceph/messenger.c46
-rw-r--r--net/ceph/osdmap.c4
-rw-r--r--net/core/dev.c46
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/rtnetlink.c6
-rw-r--r--net/core/skbuff.c9
-rw-r--r--net/core/sock.c6
-rw-r--r--net/dcb/dcbnl.c22
-rw-r--r--net/dccp/Kconfig4
-rw-r--r--net/decnet/dn_dev.c2
-rw-r--r--net/dsa/dsa.c2
-rw-r--r--net/econet/af_econet.c4
-rw-r--r--net/ipv4/Kconfig4
-rw-r--r--net/ipv4/af_inet.c16
-rw-r--r--net/ipv4/arp.c11
-rw-r--r--net/ipv4/devinet.c30
-rw-r--r--net/ipv4/inetpeer.c2
-rw-r--r--net/ipv4/ip_gre.c1
-rw-r--r--net/ipv4/ipmr.c76
-rw-r--r--net/ipv4/raw.c19
-rw-r--r--net/ipv4/route.c7
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c1
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv6/addrconf.c81
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/ip6mr.c75
-rw-r--r--net/ipv6/raw.c19
-rw-r--r--net/ipv6/route.c20
-rw-r--r--net/ipv6/sysctl_net_ipv6.c9
-rw-r--r--net/ipv6/xfrm6_policy.c6
-rw-r--r--net/mac80211/Kconfig6
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/status.c7
-rw-r--r--net/mac80211/tx.c5
-rw-r--r--net/mac80211/util.c2
-rw-r--r--net/rfkill/Kconfig4
-rw-r--r--net/rxrpc/af_rxrpc.c2
-rw-r--r--net/sched/Kconfig2
-rw-r--r--net/sched/sch_cbq.c3
-rw-r--r--net/sched/sch_drr.c2
-rw-r--r--net/sched/sch_dsmark.c2
-rw-r--r--net/sched/sch_fifo.c5
-rw-r--r--net/sched/sch_hfsc.c2
-rw-r--r--net/sched/sch_htb.c12
-rw-r--r--net/sched/sch_multiq.c2
-rw-r--r--net/sched/sch_netem.c3
-rw-r--r--net/sched/sch_prio.c2
-rw-r--r--net/sched/sch_red.c11
-rw-r--r--net/sched/sch_sfq.c5
-rw-r--r--net/sched/sch_tbf.c2
-rw-r--r--net/sched/sch_teql.c3
-rw-r--r--net/socket.c30
-rw-r--r--net/sunrpc/auth.c28
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c44
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/bc_svc.c2
-rw-r--r--net/sunrpc/cache.c54
-rw-r--r--net/sunrpc/clnt.c21
-rw-r--r--net/sunrpc/rpc_pipe.c2
-rw-r--r--net/sunrpc/rpcb_clnt.c147
-rw-r--r--net/sunrpc/svc.c39
-rw-r--r--net/sunrpc/svc_xprt.c97
-rw-r--r--net/sunrpc/svcauth.c1
-rw-r--r--net/sunrpc/svcauth_unix.c17
-rw-r--r--net/sunrpc/svcsock.c122
-rw-r--r--net/sunrpc/xdr.c155
-rw-r--r--net/sunrpc/xprt.c5
-rw-r--r--net/sunrpc/xprtsock.c34
-rw-r--r--net/wireless/Kconfig2
-rw-r--r--net/x25/x25_facilities.c28
-rw-r--r--net/x25/x25_in.c14
-rw-r--r--net/x25/x25_link.c5
-rw-r--r--net/xfrm/xfrm_policy.c7
91 files changed, 1062 insertions, 625 deletions
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 798beac7f100..1e308f210928 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -178,27 +178,24 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
break;
case 's':{
char **sptr = va_arg(ap, char **);
- int16_t len;
- int size;
+ uint16_t len;
errcode = p9pdu_readf(pdu, proto_version,
"w", &len);
if (errcode)
break;
- size = max_t(int16_t, len, 0);
-
- *sptr = kmalloc(size + 1, GFP_KERNEL);
+ *sptr = kmalloc(len + 1, GFP_KERNEL);
if (*sptr == NULL) {
errcode = -EFAULT;
break;
}
- if (pdu_read(pdu, *sptr, size)) {
+ if (pdu_read(pdu, *sptr, len)) {
errcode = -EFAULT;
kfree(*sptr);
*sptr = NULL;
} else
- (*sptr)[size] = 0;
+ (*sptr)[len] = 0;
}
break;
case 'Q':{
@@ -234,14 +231,14 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
}
break;
case 'D':{
- int32_t *count = va_arg(ap, int32_t *);
+ uint32_t *count = va_arg(ap, uint32_t *);
void **data = va_arg(ap, void **);
errcode =
p9pdu_readf(pdu, proto_version, "d", count);
if (!errcode) {
*count =
- min_t(int32_t, *count,
+ min_t(uint32_t, *count,
pdu->size - pdu->offset);
*data = &pdu->sdata[pdu->offset];
}
@@ -404,9 +401,10 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
break;
case 's':{
const char *sptr = va_arg(ap, const char *);
- int16_t len = 0;
+ uint16_t len = 0;
if (sptr)
- len = min_t(int16_t, strlen(sptr), USHRT_MAX);
+ len = min_t(uint16_t, strlen(sptr),
+ USHRT_MAX);
errcode = p9pdu_writef(pdu, proto_version,
"w", len);
@@ -438,7 +436,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
stbuf->n_gid, stbuf->n_muid);
} break;
case 'D':{
- int32_t count = va_arg(ap, int32_t);
+ uint32_t count = va_arg(ap, uint32_t);
const void *data = va_arg(ap, const void *);
errcode = p9pdu_writef(pdu, proto_version, "d",
diff --git a/net/Kconfig b/net/Kconfig
index ad0aafe903f8..72840626284b 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -253,7 +253,9 @@ config NET_TCPPROBE
what was just said, you don't need it: say N.
Documentation on how to use TCP connection probing can be found
- at http://linux-net.osdl.org/index.php/TcpProbe
+ at:
+
+ http://www.linuxfoundation.org/collaborate/workgroups/networking/tcpprobe
To compile this code as a module, choose M here: the
module will be called tcp_probe.
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index ee41fef04b21..d1a611322549 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -50,12 +50,12 @@ static struct sk_buff *frag_merge_packet(struct list_head *head,
skb = tfp->skb;
}
+ if (skb_linearize(skb) < 0 || skb_linearize(tmp_skb) < 0)
+ goto err;
+
skb_pull(tmp_skb, sizeof(struct unicast_frag_packet));
- if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0) {
- /* free buffered skb, skb will be freed later */
- kfree_skb(tfp->skb);
- return NULL;
- }
+ if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0)
+ goto err;
/* move free entry to end */
tfp->skb = NULL;
@@ -70,6 +70,11 @@ static struct sk_buff *frag_merge_packet(struct list_head *head,
unicast_packet->packet_type = BAT_UNICAST;
return skb;
+
+err:
+ /* free buffered skb, skb will be freed later */
+ kfree_skb(tfp->skb);
+ return NULL;
}
static void frag_create_entry(struct list_head *head, struct sk_buff *skb)
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index cd4c4231fa48..de1022cacaf7 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -64,6 +64,7 @@ static void free_info(struct kref *ref)
spin_unlock_bh(&bat_priv->vis_list_lock);
kfree_skb(info->skb_packet);
+ kfree(info);
}
/* Compare two vis packets, used by the hashing algorithm */
@@ -268,10 +269,10 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
buff_pos += sprintf(buff + buff_pos, "%pM,",
entry->addr);
- for (i = 0; i < packet->entries; i++)
+ for (j = 0; j < packet->entries; j++)
buff_pos += vis_data_read_entry(
buff + buff_pos,
- &entries[i],
+ &entries[j],
entry->addr,
entry->primary);
@@ -444,7 +445,7 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
info);
if (hash_added < 0) {
/* did not work (for some reason) */
- kref_put(&old_info->refcount, free_info);
+ kref_put(&info->refcount, free_info);
info = NULL;
}
@@ -815,7 +816,7 @@ static void send_vis_packets(struct work_struct *work)
container_of(work, struct delayed_work, work);
struct bat_priv *bat_priv =
container_of(delayed_work, struct bat_priv, vis_work);
- struct vis_info *info, *temp;
+ struct vis_info *info;
spin_lock_bh(&bat_priv->vis_hash_lock);
purge_vis_packets(bat_priv);
@@ -825,8 +826,9 @@ static void send_vis_packets(struct work_struct *work)
send_list_add(bat_priv, bat_priv->my_vis_info);
}
- list_for_each_entry_safe(info, temp, &bat_priv->vis_send_list,
- send_list) {
+ while (!list_empty(&bat_priv->vis_send_list)) {
+ info = list_first_entry(&bat_priv->vis_send_list,
+ typeof(*info), send_list);
kref_get(&info->refcount);
spin_unlock_bh(&bat_priv->vis_hash_lock);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 6b90a4191734..99cd8d9d891b 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -379,14 +379,10 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
hci_conn_hold(acl);
if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
- acl->sec_level = sec_level;
+ acl->sec_level = BT_SECURITY_LOW;
+ acl->pending_sec_level = sec_level;
acl->auth_type = auth_type;
hci_acl_connect(acl);
- } else {
- if (acl->sec_level < sec_level)
- acl->sec_level = sec_level;
- if (acl->auth_type < auth_type)
- acl->auth_type = auth_type;
}
if (type == ACL_LINK)
@@ -442,11 +438,17 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
{
BT_DBG("conn %p", conn);
+ if (conn->pending_sec_level > sec_level)
+ sec_level = conn->pending_sec_level;
+
if (sec_level > conn->sec_level)
- conn->sec_level = sec_level;
+ conn->pending_sec_level = sec_level;
else if (conn->link_mode & HCI_LM_AUTH)
return 1;
+ /* Make sure we preserve an existing MITM requirement*/
+ auth_type |= (conn->auth_type & 0x01);
+
conn->auth_type = auth_type;
if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 8b602d881fd7..9c4541bc488a 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1011,6 +1011,10 @@ int hci_unregister_dev(struct hci_dev *hdev)
destroy_workqueue(hdev->workqueue);
+ hci_dev_lock_bh(hdev);
+ hci_blacklist_clear(hdev);
+ hci_dev_unlock_bh(hdev);
+
__hci_dev_put(hdev);
return 0;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 38100170d380..a290854fdaa6 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -692,13 +692,13 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev,
if (conn->state != BT_CONFIG || !conn->out)
return 0;
- if (conn->sec_level == BT_SECURITY_SDP)
+ if (conn->pending_sec_level == BT_SECURITY_SDP)
return 0;
/* Only request authentication for SSP connections or non-SSP
* devices with sec_level HIGH */
if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
- conn->sec_level != BT_SECURITY_HIGH)
+ conn->pending_sec_level != BT_SECURITY_HIGH)
return 0;
return 1;
@@ -1095,9 +1095,10 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
if (conn) {
- if (!ev->status)
+ if (!ev->status) {
conn->link_mode |= HCI_LM_AUTH;
- else
+ conn->sec_level = conn->pending_sec_level;
+ } else
conn->sec_level = BT_SECURITY_LOW;
clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index c791fcda7b2d..675614e38e14 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -305,33 +305,44 @@ static void l2cap_chan_del(struct sock *sk, int err)
}
}
-/* Service level security */
-static inline int l2cap_check_security(struct sock *sk)
+static inline u8 l2cap_get_auth_type(struct sock *sk)
{
- struct l2cap_conn *conn = l2cap_pi(sk)->conn;
- __u8 auth_type;
+ if (sk->sk_type == SOCK_RAW) {
+ switch (l2cap_pi(sk)->sec_level) {
+ case BT_SECURITY_HIGH:
+ return HCI_AT_DEDICATED_BONDING_MITM;
+ case BT_SECURITY_MEDIUM:
+ return HCI_AT_DEDICATED_BONDING;
+ default:
+ return HCI_AT_NO_BONDING;
+ }
+ } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
+ if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
+ l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
- if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
- auth_type = HCI_AT_NO_BONDING_MITM;
+ return HCI_AT_NO_BONDING_MITM;
else
- auth_type = HCI_AT_NO_BONDING;
-
- if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
- l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
+ return HCI_AT_NO_BONDING;
} else {
switch (l2cap_pi(sk)->sec_level) {
case BT_SECURITY_HIGH:
- auth_type = HCI_AT_GENERAL_BONDING_MITM;
- break;
+ return HCI_AT_GENERAL_BONDING_MITM;
case BT_SECURITY_MEDIUM:
- auth_type = HCI_AT_GENERAL_BONDING;
- break;
+ return HCI_AT_GENERAL_BONDING;
default:
- auth_type = HCI_AT_NO_BONDING;
- break;
+ return HCI_AT_NO_BONDING;
}
}
+}
+
+/* Service level security */
+static inline int l2cap_check_security(struct sock *sk)
+{
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+ __u8 auth_type;
+
+ auth_type = l2cap_get_auth_type(sk);
return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
auth_type);
@@ -848,6 +859,7 @@ static void __l2cap_sock_close(struct sock *sk, int reason)
result = L2CAP_CR_SEC_BLOCK;
else
result = L2CAP_CR_BAD_PSM;
+ sk->sk_state = BT_DISCONN;
rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
@@ -1068,39 +1080,7 @@ static int l2cap_do_connect(struct sock *sk)
err = -ENOMEM;
- if (sk->sk_type == SOCK_RAW) {
- switch (l2cap_pi(sk)->sec_level) {
- case BT_SECURITY_HIGH:
- auth_type = HCI_AT_DEDICATED_BONDING_MITM;
- break;
- case BT_SECURITY_MEDIUM:
- auth_type = HCI_AT_DEDICATED_BONDING;
- break;
- default:
- auth_type = HCI_AT_NO_BONDING;
- break;
- }
- } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
- if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
- auth_type = HCI_AT_NO_BONDING_MITM;
- else
- auth_type = HCI_AT_NO_BONDING;
-
- if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
- l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
- } else {
- switch (l2cap_pi(sk)->sec_level) {
- case BT_SECURITY_HIGH:
- auth_type = HCI_AT_GENERAL_BONDING_MITM;
- break;
- case BT_SECURITY_MEDIUM:
- auth_type = HCI_AT_GENERAL_BONDING;
- break;
- default:
- auth_type = HCI_AT_NO_BONDING;
- break;
- }
- }
+ auth_type = l2cap_get_auth_type(sk);
hcon = hci_connect(hdev, ACL_LINK, dst,
l2cap_pi(sk)->sec_level, auth_type);
@@ -1127,7 +1107,8 @@ static int l2cap_do_connect(struct sock *sk)
if (sk->sk_type != SOCK_SEQPACKET &&
sk->sk_type != SOCK_STREAM) {
l2cap_sock_clear_timer(sk);
- sk->sk_state = BT_CONNECTED;
+ if (l2cap_check_security(sk))
+ sk->sk_state = BT_CONNECTED;
} else
l2cap_do_start(sk);
}
@@ -1893,8 +1874,8 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
if (pi->mode == L2CAP_MODE_STREAMING) {
l2cap_streaming_send(sk);
} else {
- if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
- pi->conn_state && L2CAP_CONN_WAIT_F) {
+ if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
+ (pi->conn_state & L2CAP_CONN_WAIT_F)) {
err = len;
break;
}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index ff8aaa736650..6b83776534fb 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -1164,7 +1164,8 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
* initiator rfcomm_process_rx already calls
* rfcomm_session_put() */
if (s->sock->sk->sk_state != BT_CLOSED)
- rfcomm_session_put(s);
+ if (list_empty(&s->dlcs))
+ rfcomm_session_put(s);
break;
}
}
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 2872393b2939..88485cc74dc3 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -328,12 +328,12 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
if (fdb) {
memcpy(fdb->addr.addr, addr, ETH_ALEN);
- hlist_add_head_rcu(&fdb->hlist, head);
-
fdb->dst = source;
fdb->is_local = is_local;
fdb->is_static = is_local;
fdb->ageing_timer = jiffies;
+
+ hlist_add_head_rcu(&fdb->hlist, head);
}
return fdb;
}
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 6f6d8e1b776f..88e4aa9cb1f9 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -80,7 +80,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
if (is_multicast_ether_addr(dest)) {
mdst = br_mdb_get(br, skb);
if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) {
- if ((mdst && !hlist_unhashed(&mdst->mglist)) ||
+ if ((mdst && mdst->mglist) ||
br_multicast_is_router(br))
skb2 = skb;
br_multicast_forward(mdst, skb, skb2);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index f701a21acb34..09d5c0987925 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -232,8 +232,7 @@ static void br_multicast_group_expired(unsigned long data)
if (!netif_running(br->dev) || timer_pending(&mp->timer))
goto out;
- if (!hlist_unhashed(&mp->mglist))
- hlist_del_init(&mp->mglist);
+ mp->mglist = false;
if (mp->ports)
goto out;
@@ -276,7 +275,7 @@ static void br_multicast_del_pg(struct net_bridge *br,
del_timer(&p->query_timer);
call_rcu_bh(&p->rcu, br_multicast_free_pg);
- if (!mp->ports && hlist_unhashed(&mp->mglist) &&
+ if (!mp->ports && !mp->mglist &&
netif_running(br->dev))
mod_timer(&mp->timer, jiffies);
@@ -528,7 +527,7 @@ static void br_multicast_group_query_expired(unsigned long data)
struct net_bridge *br = mp->br;
spin_lock(&br->multicast_lock);
- if (!netif_running(br->dev) || hlist_unhashed(&mp->mglist) ||
+ if (!netif_running(br->dev) || !mp->mglist ||
mp->queries_sent >= br->multicast_last_member_count)
goto out;
@@ -719,7 +718,7 @@ static int br_multicast_add_group(struct net_bridge *br,
goto err;
if (!port) {
- hlist_add_head(&mp->mglist, &br->mglist);
+ mp->mglist = true;
mod_timer(&mp->timer, now + br->multicast_membership_interval);
goto out;
}
@@ -1165,7 +1164,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
max_delay *= br->multicast_last_member_count;
- if (!hlist_unhashed(&mp->mglist) &&
+ if (mp->mglist &&
(timer_pending(&mp->timer) ?
time_after(mp->timer.expires, now + max_delay) :
try_to_del_timer_sync(&mp->timer) >= 0))
@@ -1177,7 +1176,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
if (timer_pending(&p->timer) ?
time_after(p->timer.expires, now + max_delay) :
try_to_del_timer_sync(&p->timer) >= 0)
- mod_timer(&mp->timer, now + max_delay);
+ mod_timer(&p->timer, now + max_delay);
}
out:
@@ -1236,7 +1235,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
goto out;
max_delay *= br->multicast_last_member_count;
- if (!hlist_unhashed(&mp->mglist) &&
+ if (mp->mglist &&
(timer_pending(&mp->timer) ?
time_after(mp->timer.expires, now + max_delay) :
try_to_del_timer_sync(&mp->timer) >= 0))
@@ -1248,7 +1247,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
if (timer_pending(&p->timer) ?
time_after(p->timer.expires, now + max_delay) :
try_to_del_timer_sync(&p->timer) >= 0)
- mod_timer(&mp->timer, now + max_delay);
+ mod_timer(&p->timer, now + max_delay);
}
out:
@@ -1283,7 +1282,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
br->multicast_last_member_interval;
if (!port) {
- if (!hlist_unhashed(&mp->mglist) &&
+ if (mp->mglist &&
(timer_pending(&mp->timer) ?
time_after(mp->timer.expires, time) :
try_to_del_timer_sync(&mp->timer) >= 0)) {
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 84aac7734bfc..4e1b620b6be6 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -84,13 +84,13 @@ struct net_bridge_port_group {
struct net_bridge_mdb_entry
{
struct hlist_node hlist[2];
- struct hlist_node mglist;
struct net_bridge *br;
struct net_bridge_port_group __rcu *ports;
struct rcu_head rcu;
struct timer_list timer;
struct timer_list query_timer;
struct br_ip addr;
+ bool mglist;
u32 queries_sent;
};
@@ -238,7 +238,6 @@ struct net_bridge
spinlock_t multicast_lock;
struct net_bridge_mdb_htable __rcu *mdb;
struct hlist_head router_list;
- struct hlist_head mglist;
struct timer_list multicast_router_timer;
struct timer_list multicast_querier_timer;
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index fa9dab372b68..6008d6dc18a0 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -394,9 +394,7 @@ static void ipcaif_net_setup(struct net_device *dev)
priv->conn_req.sockaddr.u.dgm.connection_id = -1;
priv->flowenabled = false;
- ASSERT_RTNL();
init_waitqueue_head(&priv->netmgmt_wq);
- list_add(&priv->list_field, &chnl_net_list);
}
@@ -453,6 +451,8 @@ static int ipcaif_newlink(struct net *src_net, struct net_device *dev,
ret = register_netdevice(dev);
if (ret)
pr_warn("device rtml registration failed\n");
+ else
+ list_add(&caifdev->list_field, &chnl_net_list);
return ret;
}
diff --git a/net/ceph/ceph_hash.c b/net/ceph/ceph_hash.c
index 815ef8826796..0a1b53bce76d 100644
--- a/net/ceph/ceph_hash.c
+++ b/net/ceph/ceph_hash.c
@@ -1,5 +1,6 @@
#include <linux/ceph/types.h>
+#include <linux/module.h>
/*
* Robert Jenkin's hash function.
@@ -104,6 +105,7 @@ unsigned ceph_str_hash(int type, const char *s, unsigned len)
return -1;
}
}
+EXPORT_SYMBOL(ceph_str_hash);
const char *ceph_str_hash_name(int type)
{
@@ -116,3 +118,4 @@ const char *ceph_str_hash_name(int type)
return "unknown";
}
}
+EXPORT_SYMBOL(ceph_str_hash_name);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index b6ff4a1519ab..dff633d62e5b 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -96,7 +96,7 @@ struct workqueue_struct *ceph_msgr_wq;
int ceph_msgr_init(void)
{
- ceph_msgr_wq = create_workqueue("ceph-msgr");
+ ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0);
if (!ceph_msgr_wq) {
pr_err("msgr_init failed to create workqueue\n");
return -ENOMEM;
@@ -1920,20 +1920,6 @@ bad_tag:
/*
* Atomically queue work on a connection. Bump @con reference to
* avoid races with connection teardown.
- *
- * There is some trickery going on with QUEUED and BUSY because we
- * only want a _single_ thread operating on each connection at any
- * point in time, but we want to use all available CPUs.
- *
- * The worker thread only proceeds if it can atomically set BUSY. It
- * clears QUEUED and does it's thing. When it thinks it's done, it
- * clears BUSY, then rechecks QUEUED.. if it's set again, it loops
- * (tries again to set BUSY).
- *
- * To queue work, we first set QUEUED, _then_ if BUSY isn't set, we
- * try to queue work. If that fails (work is already queued, or BUSY)
- * we give up (work also already being done or is queued) but leave QUEUED
- * set so that the worker thread will loop if necessary.
*/
static void queue_con(struct ceph_connection *con)
{
@@ -1948,11 +1934,7 @@ static void queue_con(struct ceph_connection *con)
return;
}
- set_bit(QUEUED, &con->state);
- if (test_bit(BUSY, &con->state)) {
- dout("queue_con %p - already BUSY\n", con);
- con->ops->put(con);
- } else if (!queue_work(ceph_msgr_wq, &con->work.work)) {
+ if (!queue_delayed_work(ceph_msgr_wq, &con->work, 0)) {
dout("queue_con %p - already queued\n", con);
con->ops->put(con);
} else {
@@ -1967,15 +1949,6 @@ static void con_work(struct work_struct *work)
{
struct ceph_connection *con = container_of(work, struct ceph_connection,
work.work);
- int backoff = 0;
-
-more:
- if (test_and_set_bit(BUSY, &con->state) != 0) {
- dout("con_work %p BUSY already set\n", con);
- goto out;
- }
- dout("con_work %p start, clearing QUEUED\n", con);
- clear_bit(QUEUED, &con->state);
mutex_lock(&con->mutex);
@@ -1994,28 +1967,13 @@ more:
try_read(con) < 0 ||
try_write(con) < 0) {
mutex_unlock(&con->mutex);
- backoff = 1;
ceph_fault(con); /* error/fault path */
goto done_unlocked;
}
done:
mutex_unlock(&con->mutex);
-
done_unlocked:
- clear_bit(BUSY, &con->state);
- dout("con->state=%lu\n", con->state);
- if (test_bit(QUEUED, &con->state)) {
- if (!backoff || test_bit(OPENING, &con->state)) {
- dout("con_work %p QUEUED reset, looping\n", con);
- goto more;
- }
- dout("con_work %p QUEUED reset, but just faulted\n", con);
- clear_bit(QUEUED, &con->state);
- }
- dout("con_work %p done\n", con);
-
-out:
con->ops->put(con);
}
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index d73f3f6efa36..71603ac3dff5 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -605,8 +605,10 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
goto bad;
}
err = __decode_pool(p, end, pi);
- if (err < 0)
+ if (err < 0) {
+ kfree(pi);
goto bad;
+ }
__insert_pg_pool(&map->pg_pools, pi);
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 8393ec408cd4..8ae6631abcc2 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -749,7 +749,8 @@ EXPORT_SYMBOL(dev_get_by_index);
* @ha: hardware address
*
* Search for an interface by MAC address. Returns NULL if the device
- * is not found or a pointer to the device. The caller must hold RCU
+ * is not found or a pointer to the device.
+ * The caller must hold RCU or RTNL.
* The returned device has not had its ref count increased
* and the caller must therefore be careful about locking
*
@@ -1279,10 +1280,13 @@ static int __dev_close_many(struct list_head *head)
static int __dev_close(struct net_device *dev)
{
+ int retval;
LIST_HEAD(single);
list_add(&dev->unreg_list, &single);
- return __dev_close_many(&single);
+ retval = __dev_close_many(&single);
+ list_del(&single);
+ return retval;
}
int dev_close_many(struct list_head *head)
@@ -1324,7 +1328,7 @@ int dev_close(struct net_device *dev)
list_add(&dev->unreg_list, &single);
dev_close_many(&single);
-
+ list_del(&single);
return 0;
}
EXPORT_SYMBOL(dev_close);
@@ -2562,7 +2566,8 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
map = rcu_dereference(rxqueue->rps_map);
if (map) {
- if (map->len == 1) {
+ if (map->len == 1 &&
+ !rcu_dereference_raw(rxqueue->rps_flow_table)) {
tcpu = map->cpus[0];
if (cpu_online(tcpu))
cpu = tcpu;
@@ -3423,6 +3428,8 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
__skb_pull(skb, skb_headlen(skb));
skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
skb->vlan_tci = 0;
+ skb->dev = napi->dev;
+ skb->skb_iif = 0;
napi->skb = skb;
}
@@ -5059,6 +5066,7 @@ static void rollback_registered(struct net_device *dev)
list_add(&dev->unreg_list, &single);
rollback_registered_many(&single);
+ list_del(&single);
}
unsigned long netdev_fix_features(unsigned long features, const char *name)
@@ -5656,30 +5664,35 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
dev_net_set(dev, &init_net);
+ dev->gso_max_size = GSO_MAX_SIZE;
+
+ INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
+ dev->ethtool_ntuple_list.count = 0;
+ INIT_LIST_HEAD(&dev->napi_list);
+ INIT_LIST_HEAD(&dev->unreg_list);
+ INIT_LIST_HEAD(&dev->link_watch_list);
+ dev->priv_flags = IFF_XMIT_DST_RELEASE;
+ setup(dev);
+
dev->num_tx_queues = txqs;
dev->real_num_tx_queues = txqs;
if (netif_alloc_netdev_queues(dev))
- goto free_pcpu;
+ goto free_all;
#ifdef CONFIG_RPS
dev->num_rx_queues = rxqs;
dev->real_num_rx_queues = rxqs;
if (netif_alloc_rx_queues(dev))
- goto free_pcpu;
+ goto free_all;
#endif
- dev->gso_max_size = GSO_MAX_SIZE;
-
- INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
- dev->ethtool_ntuple_list.count = 0;
- INIT_LIST_HEAD(&dev->napi_list);
- INIT_LIST_HEAD(&dev->unreg_list);
- INIT_LIST_HEAD(&dev->link_watch_list);
- dev->priv_flags = IFF_XMIT_DST_RELEASE;
- setup(dev);
strcpy(dev->name, name);
return dev;
+free_all:
+ free_netdev(dev);
+ return NULL;
+
free_pcpu:
free_percpu(dev->pcpu_refcnt);
kfree(dev->_tx);
@@ -6189,7 +6202,7 @@ static void __net_exit default_device_exit(struct net *net)
static void __net_exit default_device_exit_batch(struct list_head *net_list)
{
/* At exit all network devices most be removed from a network
- * namespace. Do this in the reverse order of registeration.
+ * namespace. Do this in the reverse order of registration.
* Do this across as many network namespaces as possible to
* improve batching efficiency.
*/
@@ -6207,6 +6220,7 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
}
}
unregister_netdevice_many(&dev_kill_list);
+ list_del(&dev_kill_list);
rtnl_unlock();
}
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 17741782a345..ff2302910b5e 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -817,7 +817,7 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
if (regs.len > reglen)
regs.len = reglen;
- regbuf = vmalloc(reglen);
+ regbuf = vzalloc(reglen);
if (!regbuf)
return -ENOMEM;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 750db57f3bb3..2d65c6bb24c1 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1121,8 +1121,7 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
return -EOPNOTSUPP;
if (af_ops->validate_link_af) {
- err = af_ops->validate_link_af(dev,
- tb[IFLA_AF_SPEC]);
+ err = af_ops->validate_link_af(dev, af);
if (err < 0)
return err;
}
@@ -1672,6 +1671,9 @@ replay:
snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
dest_net = rtnl_link_get_net(net, tb);
+ if (IS_ERR(dest_net))
+ return PTR_ERR(dest_net);
+
dev = rtnl_create_link(net, dest_net, ifname, ops, tb);
if (IS_ERR(dev))
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d31bb36ae0dc..d883dcc78b6b 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -210,6 +210,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
shinfo = skb_shinfo(skb);
memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
atomic_set(&shinfo->dataref, 1);
+ kmemcheck_annotate_variable(shinfo->destructor_arg);
if (fclone) {
struct sk_buff *child = skb + 1;
@@ -2744,8 +2745,12 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
merge:
if (offset > headlen) {
- skbinfo->frags[0].page_offset += offset - headlen;
- skbinfo->frags[0].size -= offset - headlen;
+ unsigned int eat = offset - headlen;
+
+ skbinfo->frags[0].page_offset += eat;
+ skbinfo->frags[0].size -= eat;
+ skb->data_len -= eat;
+ skb->len -= eat;
offset = headlen;
}
diff --git a/net/core/sock.c b/net/core/sock.c
index a658aeb6d554..7dfed792434d 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -157,7 +157,7 @@ static const char *const af_family_key_strings[AF_MAX+1] = {
"sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
"sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
"sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
- "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" ,
+ "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
"sk_lock-AF_MAX"
};
static const char *const af_family_slock_key_strings[AF_MAX+1] = {
@@ -173,7 +173,7 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = {
"slock-27" , "slock-28" , "slock-AF_CAN" ,
"slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
"slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
- "slock-AF_IEEE802154", "slock-AF_CAIF" ,
+ "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
"slock-AF_MAX"
};
static const char *const af_family_clock_key_strings[AF_MAX+1] = {
@@ -189,7 +189,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = {
"clock-27" , "clock-28" , "clock-AF_CAN" ,
"clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
"clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
- "clock-AF_IEEE802154", "clock-AF_CAIF" ,
+ "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
"clock-AF_MAX"
};
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index d900ab99814a..d5074a567289 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -583,7 +583,7 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
u8 up, idtype;
int ret = -EINVAL;
- if (!tb[DCB_ATTR_APP] || !netdev->dcbnl_ops->getapp)
+ if (!tb[DCB_ATTR_APP])
goto out;
ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
@@ -604,7 +604,16 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
goto out;
id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
- up = netdev->dcbnl_ops->getapp(netdev, idtype, id);
+
+ if (netdev->dcbnl_ops->getapp) {
+ up = netdev->dcbnl_ops->getapp(netdev, idtype, id);
+ } else {
+ struct dcb_app app = {
+ .selector = idtype,
+ .protocol = id,
+ };
+ up = dcb_getapp(netdev, &app);
+ }
/* send this back */
dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
@@ -617,6 +626,9 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
dcb->cmd = DCB_CMD_GAPP;
app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP);
+ if (!app_nest)
+ goto out_cancel;
+
ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype);
if (ret)
goto out_cancel;
@@ -1604,6 +1616,10 @@ EXPORT_SYMBOL(dcb_getapp);
u8 dcb_setapp(struct net_device *dev, struct dcb_app *new)
{
struct dcb_app_type *itr;
+ struct dcb_app_type event;
+
+ memcpy(&event.name, dev->name, sizeof(event.name));
+ memcpy(&event.app, new, sizeof(event.app));
spin_lock(&dcb_lock);
/* Search for existing match and replace */
@@ -1635,7 +1651,7 @@ u8 dcb_setapp(struct net_device *dev, struct dcb_app *new)
}
out:
spin_unlock(&dcb_lock);
- call_dcbevent_notifiers(DCB_APP_EVENT, new);
+ call_dcbevent_notifiers(DCB_APP_EVENT, &event);
return 0;
}
EXPORT_SYMBOL(dcb_setapp);
diff --git a/net/dccp/Kconfig b/net/dccp/Kconfig
index ad6dffd9070e..b75968a04017 100644
--- a/net/dccp/Kconfig
+++ b/net/dccp/Kconfig
@@ -49,7 +49,9 @@ config NET_DCCPPROBE
what was just said, you don't need it: say N.
Documentation on how to use DCCP connection probing can be found
- at http://linux-net.osdl.org/index.php/DccpProbe
+ at:
+
+ http://www.linuxfoundation.org/collaborate/workgroups/networking/dccpprobe
To compile this code as a module, choose M here: the
module will be called dccp_probe.
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 0ba15633c418..0dcaa903e00e 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -1130,7 +1130,7 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
/*
* This processes a device up event. We only start up
* the loopback device & ethernet devices with correct
- * MAC addreses automatically. Others must be started
+ * MAC addresses automatically. Others must be started
* specifically.
*
* FIXME: How should we configure the loopback address ? If we could dispense
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 0c877a74e1f4..3fb14b7c13cf 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -428,7 +428,7 @@ static void __exit dsa_cleanup_module(void)
}
module_exit(dsa_cleanup_module);
-MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>")
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:dsa");
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 15dcc1a586b4..0c2826337919 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -265,13 +265,13 @@ static void ec_tx_done(struct sk_buff *skb, int result)
static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
- struct sock *sk = sock->sk;
struct sockaddr_ec *saddr=(struct sockaddr_ec *)msg->msg_name;
struct net_device *dev;
struct ec_addr addr;
int err;
unsigned char port, cb;
#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
+ struct sock *sk = sock->sk;
struct sk_buff *skb;
struct ec_cb *eb;
#endif
@@ -488,10 +488,10 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
error_free_buf:
vfree(userbuf);
+error:
#else
err = -EPROTOTYPE;
#endif
- error:
mutex_unlock(&econet_mutex);
return err;
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 9e95d7fb6d5a..a5a1050595d1 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -432,7 +432,9 @@ config INET_DIAG
---help---
Support for INET (TCP, DCCP, etc) socket monitoring interface used by
native Linux tools such as ss. ss is included in iproute2, currently
- downloadable at <http://linux-net.osdl.org/index.php/Iproute2>.
+ downloadable at:
+
+ http://www.linuxfoundation.org/collaborate/workgroups/networking/iproute2
If unsure, say Y.
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f2b61107df6c..45b89d7bda5a 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -880,6 +880,19 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
}
EXPORT_SYMBOL(inet_ioctl);
+#ifdef CONFIG_COMPAT
+int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+ struct sock *sk = sock->sk;
+ int err = -ENOIOCTLCMD;
+
+ if (sk->sk_prot->compat_ioctl)
+ err = sk->sk_prot->compat_ioctl(sk, cmd, arg);
+
+ return err;
+}
+#endif
+
const struct proto_ops inet_stream_ops = {
.family = PF_INET,
.owner = THIS_MODULE,
@@ -903,6 +916,7 @@ const struct proto_ops inet_stream_ops = {
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
+ .compat_ioctl = inet_compat_ioctl,
#endif
};
EXPORT_SYMBOL(inet_stream_ops);
@@ -929,6 +943,7 @@ const struct proto_ops inet_dgram_ops = {
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
+ .compat_ioctl = inet_compat_ioctl,
#endif
};
EXPORT_SYMBOL(inet_dgram_ops);
@@ -959,6 +974,7 @@ static const struct proto_ops inet_sockraw_ops = {
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
+ .compat_ioctl = inet_compat_ioctl,
#endif
};
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 04c8b69fd426..7927589813b5 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1017,14 +1017,13 @@ static int arp_req_set_proxy(struct net *net, struct net_device *dev, int on)
IPV4_DEVCONF_ALL(net, PROXY_ARP) = on;
return 0;
}
- if (__in_dev_get_rcu(dev)) {
- IN_DEV_CONF_SET(__in_dev_get_rcu(dev), PROXY_ARP, on);
+ if (__in_dev_get_rtnl(dev)) {
+ IN_DEV_CONF_SET(__in_dev_get_rtnl(dev), PROXY_ARP, on);
return 0;
}
return -ENXIO;
}
-/* must be called with rcu_read_lock() */
static int arp_req_set_public(struct net *net, struct arpreq *r,
struct net_device *dev)
{
@@ -1233,10 +1232,10 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
if (!(r.arp_flags & ATF_NETMASK))
((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr =
htonl(0xFFFFFFFFUL);
- rcu_read_lock();
+ rtnl_lock();
if (r.arp_dev[0]) {
err = -ENODEV;
- dev = dev_get_by_name_rcu(net, r.arp_dev);
+ dev = __dev_get_by_name(net, r.arp_dev);
if (dev == NULL)
goto out;
@@ -1263,7 +1262,7 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
break;
}
out:
- rcu_read_unlock();
+ rtnl_unlock();
if (cmd == SIOCGARP && !err && copy_to_user(arg, &r, sizeof(r)))
err = -EFAULT;
return err;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 748cb5b337bd..df4616fce929 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1030,6 +1030,21 @@ static inline bool inetdev_valid_mtu(unsigned mtu)
return mtu >= 68;
}
+static void inetdev_send_gratuitous_arp(struct net_device *dev,
+ struct in_device *in_dev)
+
+{
+ struct in_ifaddr *ifa = in_dev->ifa_list;
+
+ if (!ifa)
+ return;
+
+ arp_send(ARPOP_REQUEST, ETH_P_ARP,
+ ifa->ifa_address, dev,
+ ifa->ifa_address, NULL,
+ dev->dev_addr, NULL);
+}
+
/* Called only under RTNL semaphore */
static int inetdev_event(struct notifier_block *this, unsigned long event,
@@ -1082,18 +1097,13 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
}
ip_mc_up(in_dev);
/* fall through */
- case NETDEV_NOTIFY_PEERS:
case NETDEV_CHANGEADDR:
+ if (!IN_DEV_ARP_NOTIFY(in_dev))
+ break;
+ /* fall through */
+ case NETDEV_NOTIFY_PEERS:
/* Send gratuitous ARP to notify of link change */
- if (IN_DEV_ARP_NOTIFY(in_dev)) {
- struct in_ifaddr *ifa = in_dev->ifa_list;
-
- if (ifa)
- arp_send(ARPOP_REQUEST, ETH_P_ARP,
- ifa->ifa_address, dev,
- ifa->ifa_address, NULL,
- dev->dev_addr, NULL);
- }
+ inetdev_send_gratuitous_arp(dev, in_dev);
break;
case NETDEV_DOWN:
ip_mc_down(in_dev);
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index d9bc85751c74..a96e65674ac3 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -475,7 +475,7 @@ static int cleanup_once(unsigned long ttl)
struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
{
struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
- struct inet_peer_base *base = family_to_base(AF_INET);
+ struct inet_peer_base *base = family_to_base(daddr->family);
struct inet_peer *p;
/* Look up for the address quickly, lockless.
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index eb68a0e34e49..6613edfac28c 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -775,6 +775,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
.fl4_dst = dst,
.fl4_src = tiph->saddr,
.fl4_tos = RT_TOS(tos),
+ .proto = IPPROTO_GRE,
.fl_gre_key = tunnel->parms.o_key
};
if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 3f3a9afd73e0..8b65a12654e7 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -60,6 +60,7 @@
#include <linux/notifier.h>
#include <linux/if_arp.h>
#include <linux/netfilter_ipv4.h>
+#include <linux/compat.h>
#include <net/ipip.h>
#include <net/checksum.h>
#include <net/netlink.h>
@@ -1434,6 +1435,81 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
}
}
+#ifdef CONFIG_COMPAT
+struct compat_sioc_sg_req {
+ struct in_addr src;
+ struct in_addr grp;
+ compat_ulong_t pktcnt;
+ compat_ulong_t bytecnt;
+ compat_ulong_t wrong_if;
+};
+
+struct compat_sioc_vif_req {
+ vifi_t vifi; /* Which iface */
+ compat_ulong_t icount;
+ compat_ulong_t ocount;
+ compat_ulong_t ibytes;
+ compat_ulong_t obytes;
+};
+
+int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
+{
+ struct compat_sioc_sg_req sr;
+ struct compat_sioc_vif_req vr;
+ struct vif_device *vif;
+ struct mfc_cache *c;
+ struct net *net = sock_net(sk);
+ struct mr_table *mrt;
+
+ mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
+ if (mrt == NULL)
+ return -ENOENT;
+
+ switch (cmd) {
+ case SIOCGETVIFCNT:
+ if (copy_from_user(&vr, arg, sizeof(vr)))
+ return -EFAULT;
+ if (vr.vifi >= mrt->maxvif)
+ return -EINVAL;
+ read_lock(&mrt_lock);
+ vif = &mrt->vif_table[vr.vifi];
+ if (VIF_EXISTS(mrt, vr.vifi)) {
+ vr.icount = vif->pkt_in;
+ vr.ocount = vif->pkt_out;
+ vr.ibytes = vif->bytes_in;
+ vr.obytes = vif->bytes_out;
+ read_unlock(&mrt_lock);
+
+ if (copy_to_user(arg, &vr, sizeof(vr)))
+ return -EFAULT;
+ return 0;
+ }
+ read_unlock(&mrt_lock);
+ return -EADDRNOTAVAIL;
+ case SIOCGETSGCNT:
+ if (copy_from_user(&sr, arg, sizeof(sr)))
+ return -EFAULT;
+
+ rcu_read_lock();
+ c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
+ if (c) {
+ sr.pktcnt = c->mfc_un.res.pkt;
+ sr.bytecnt = c->mfc_un.res.bytes;
+ sr.wrong_if = c->mfc_un.res.wrong_if;
+ rcu_read_unlock();
+
+ if (copy_to_user(arg, &sr, sizeof(sr)))
+ return -EFAULT;
+ return 0;
+ }
+ rcu_read_unlock();
+ return -EADDRNOTAVAIL;
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+#endif
+
static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
{
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index a3d5ab786e81..6390ba299b3d 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -76,6 +76,7 @@
#include <linux/seq_file.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
+#include <linux/compat.h>
static struct raw_hashinfo raw_v4_hashinfo = {
.lock = __RW_LOCK_UNLOCKED(raw_v4_hashinfo.lock),
@@ -838,6 +839,23 @@ static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
}
}
+#ifdef CONFIG_COMPAT
+static int compat_raw_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case SIOCOUTQ:
+ case SIOCINQ:
+ return -ENOIOCTLCMD;
+ default:
+#ifdef CONFIG_IP_MROUTE
+ return ipmr_compat_ioctl(sk, cmd, compat_ptr(arg));
+#else
+ return -ENOIOCTLCMD;
+#endif
+ }
+}
+#endif
+
struct proto raw_prot = {
.name = "RAW",
.owner = THIS_MODULE,
@@ -860,6 +878,7 @@ struct proto raw_prot = {
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_raw_setsockopt,
.compat_getsockopt = compat_raw_getsockopt,
+ .compat_ioctl = compat_raw_ioctl,
#endif
};
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 351dc4e85242..6ed6603c2f6d 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2707,6 +2707,11 @@ static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 coo
return NULL;
}
+static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst)
+{
+ return 0;
+}
+
static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
{
}
@@ -2716,6 +2721,8 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
.protocol = cpu_to_be16(ETH_P_IP),
.destroy = ipv4_dst_destroy,
.check = ipv4_blackhole_dst_check,
+ .default_mtu = ipv4_blackhole_default_mtu,
+ .default_advmss = ipv4_default_advmss,
.update_pmtu = ipv4_rt_blackhole_update_pmtu,
};
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2549b29b062d..eb7f82ebf4a3 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4399,7 +4399,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) {
tp->ucopy.len -= chunk;
tp->copied_seq += chunk;
- eaten = (chunk == skb->len && !th->fin);
+ eaten = (chunk == skb->len);
tcp_rcv_space_adjust(sk);
}
local_bh_disable();
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 856f68466d49..02f583b3744a 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1994,7 +1994,6 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
}
req = req->dl_next;
}
- st->offset = 0;
if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
break;
get_req:
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index dc7c096ddfef..406f320336e6 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1350,7 +1350,7 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp,
return 0;
}
-/* Intialize TSO state of a skb.
+/* Initialize TSO state of a skb.
* This must be invoked the first time we consider transmitting
* SKB onto the wire.
*/
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 24a1cf110d80..fd6782e3a038 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2661,14 +2661,12 @@ static int addrconf_ifdown(struct net_device *dev, int how)
struct net *net = dev_net(dev);
struct inet6_dev *idev;
struct inet6_ifaddr *ifa;
- LIST_HEAD(keep_list);
- int state;
+ int state, i;
ASSERT_RTNL();
- /* Flush routes if device is being removed or it is not loopback */
- if (how || !(dev->flags & IFF_LOOPBACK))
- rt6_ifdown(net, dev);
+ rt6_ifdown(net, dev);
+ neigh_ifdown(&nd_tbl, dev);
idev = __in6_dev_get(dev);
if (idev == NULL)
@@ -2689,6 +2687,23 @@ static int addrconf_ifdown(struct net_device *dev, int how)
}
+ /* Step 2: clear hash table */
+ for (i = 0; i < IN6_ADDR_HSIZE; i++) {
+ struct hlist_head *h = &inet6_addr_lst[i];
+ struct hlist_node *n;
+
+ spin_lock_bh(&addrconf_hash_lock);
+ restart:
+ hlist_for_each_entry_rcu(ifa, n, h, addr_lst) {
+ if (ifa->idev == idev) {
+ hlist_del_init_rcu(&ifa->addr_lst);
+ addrconf_del_timer(ifa);
+ goto restart;
+ }
+ }
+ spin_unlock_bh(&addrconf_hash_lock);
+ }
+
write_lock_bh(&idev->lock);
/* Step 2: clear flags for stateless addrconf */
@@ -2722,52 +2737,23 @@ static int addrconf_ifdown(struct net_device *dev, int how)
struct inet6_ifaddr, if_list);
addrconf_del_timer(ifa);
- /* If just doing link down, and address is permanent
- and not link-local, then retain it. */
- if (!how &&
- (ifa->flags&IFA_F_PERMANENT) &&
- !(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) {
- list_move_tail(&ifa->if_list, &keep_list);
-
- /* If not doing DAD on this address, just keep it. */
- if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) ||
- idev->cnf.accept_dad <= 0 ||
- (ifa->flags & IFA_F_NODAD))
- continue;
+ list_del(&ifa->if_list);
- /* If it was tentative already, no need to notify */
- if (ifa->flags & IFA_F_TENTATIVE)
- continue;
+ write_unlock_bh(&idev->lock);
- /* Flag it for later restoration when link comes up */
- ifa->flags |= IFA_F_TENTATIVE;
- ifa->state = INET6_IFADDR_STATE_DAD;
- } else {
- list_del(&ifa->if_list);
-
- /* clear hash table */
- spin_lock_bh(&addrconf_hash_lock);
- hlist_del_init_rcu(&ifa->addr_lst);
- spin_unlock_bh(&addrconf_hash_lock);
-
- write_unlock_bh(&idev->lock);
- spin_lock_bh(&ifa->state_lock);
- state = ifa->state;
- ifa->state = INET6_IFADDR_STATE_DEAD;
- spin_unlock_bh(&ifa->state_lock);
-
- if (state != INET6_IFADDR_STATE_DEAD) {
- __ipv6_ifa_notify(RTM_DELADDR, ifa);
- atomic_notifier_call_chain(&inet6addr_chain,
- NETDEV_DOWN, ifa);
- }
+ spin_lock_bh(&ifa->state_lock);
+ state = ifa->state;
+ ifa->state = INET6_IFADDR_STATE_DEAD;
+ spin_unlock_bh(&ifa->state_lock);
- in6_ifa_put(ifa);
- write_lock_bh(&idev->lock);
+ if (state != INET6_IFADDR_STATE_DEAD) {
+ __ipv6_ifa_notify(RTM_DELADDR, ifa);
+ atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa);
}
- }
+ in6_ifa_put(ifa);
- list_splice(&keep_list, &idev->addr_list);
+ write_lock_bh(&idev->lock);
+ }
write_unlock_bh(&idev->lock);
@@ -4156,8 +4142,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
addrconf_leave_solict(ifp->idev, &ifp->addr);
dst_hold(&ifp->rt->dst);
- if (ifp->state == INET6_IFADDR_STATE_DEAD &&
- ip6_del_rt(ifp->rt))
+ if (ip6_del_rt(ifp->rt))
dst_free(&ifp->rt->dst);
break;
}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 059a3de647db..978e80e2c4a8 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -300,7 +300,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out;
}
- /* Reproduce AF_INET checks to make the bindings consitant */
+ /* Reproduce AF_INET checks to make the bindings consistent */
v4addr = addr->sin6_addr.s6_addr32[3];
chk_addr_ret = inet_addr_type(net, v4addr);
if (!sysctl_ip_nonlocal_bind &&
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 9fab274019c0..0e1d53bcf1e0 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -34,6 +34,7 @@
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/compat.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
#include <net/sock.h>
@@ -1804,6 +1805,80 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
}
}
+#ifdef CONFIG_COMPAT
+struct compat_sioc_sg_req6 {
+ struct sockaddr_in6 src;
+ struct sockaddr_in6 grp;
+ compat_ulong_t pktcnt;
+ compat_ulong_t bytecnt;
+ compat_ulong_t wrong_if;
+};
+
+struct compat_sioc_mif_req6 {
+ mifi_t mifi;
+ compat_ulong_t icount;
+ compat_ulong_t ocount;
+ compat_ulong_t ibytes;
+ compat_ulong_t obytes;
+};
+
+int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
+{
+ struct compat_sioc_sg_req6 sr;
+ struct compat_sioc_mif_req6 vr;
+ struct mif_device *vif;
+ struct mfc6_cache *c;
+ struct net *net = sock_net(sk);
+ struct mr6_table *mrt;
+
+ mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
+ if (mrt == NULL)
+ return -ENOENT;
+
+ switch (cmd) {
+ case SIOCGETMIFCNT_IN6:
+ if (copy_from_user(&vr, arg, sizeof(vr)))
+ return -EFAULT;
+ if (vr.mifi >= mrt->maxvif)
+ return -EINVAL;
+ read_lock(&mrt_lock);
+ vif = &mrt->vif6_table[vr.mifi];
+ if (MIF_EXISTS(mrt, vr.mifi)) {
+ vr.icount = vif->pkt_in;
+ vr.ocount = vif->pkt_out;
+ vr.ibytes = vif->bytes_in;
+ vr.obytes = vif->bytes_out;
+ read_unlock(&mrt_lock);
+
+ if (copy_to_user(arg, &vr, sizeof(vr)))
+ return -EFAULT;
+ return 0;
+ }
+ read_unlock(&mrt_lock);
+ return -EADDRNOTAVAIL;
+ case SIOCGETSGCNT_IN6:
+ if (copy_from_user(&sr, arg, sizeof(sr)))
+ return -EFAULT;
+
+ read_lock(&mrt_lock);
+ c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
+ if (c) {
+ sr.pktcnt = c->mfc_un.res.pkt;
+ sr.bytecnt = c->mfc_un.res.bytes;
+ sr.wrong_if = c->mfc_un.res.wrong_if;
+ read_unlock(&mrt_lock);
+
+ if (copy_to_user(arg, &sr, sizeof(sr)))
+ return -EFAULT;
+ return 0;
+ }
+ read_unlock(&mrt_lock);
+ return -EADDRNOTAVAIL;
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+#endif
static inline int ip6mr_forward2_finish(struct sk_buff *skb)
{
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 86c39526ba5e..c5b0915d106b 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -31,6 +31,7 @@
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>
#include <linux/skbuff.h>
+#include <linux/compat.h>
#include <asm/uaccess.h>
#include <asm/ioctls.h>
@@ -1157,6 +1158,23 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
}
}
+#ifdef CONFIG_COMPAT
+static int compat_rawv6_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case SIOCOUTQ:
+ case SIOCINQ:
+ return -ENOIOCTLCMD;
+ default:
+#ifdef CONFIG_IPV6_MROUTE
+ return ip6mr_compat_ioctl(sk, cmd, compat_ptr(arg));
+#else
+ return -ENOIOCTLCMD;
+#endif
+ }
+}
+#endif
+
static void rawv6_close(struct sock *sk, long timeout)
{
if (inet_sk(sk)->inet_num == IPPROTO_RAW)
@@ -1215,6 +1233,7 @@ struct proto rawv6_prot = {
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_rawv6_setsockopt,
.compat_getsockopt = compat_rawv6_getsockopt,
+ .compat_ioctl = compat_rawv6_ioctl,
#endif
};
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 373bd0416f69..a998db6e7895 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -72,8 +72,6 @@
#define RT6_TRACE(x...) do { ; } while (0)
#endif
-#define CLONE_OFFLINK_ROUTE 0
-
static struct rt6_info * ip6_rt_copy(struct rt6_info *ort);
static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int ip6_default_advmss(const struct dst_entry *dst);
@@ -115,6 +113,11 @@ static struct dst_ops ip6_dst_ops_template = {
.local_out = __ip6_local_out,
};
+static unsigned int ip6_blackhole_default_mtu(const struct dst_entry *dst)
+{
+ return 0;
+}
+
static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
{
}
@@ -124,6 +127,8 @@ static struct dst_ops ip6_dst_blackhole_ops = {
.protocol = cpu_to_be16(ETH_P_IPV6),
.destroy = ip6_dst_destroy,
.check = ip6_dst_check,
+ .default_mtu = ip6_blackhole_default_mtu,
+ .default_advmss = ip6_default_advmss,
.update_pmtu = ip6_rt_blackhole_update_pmtu,
};
@@ -196,7 +201,6 @@ static void ip6_dst_destroy(struct dst_entry *dst)
in6_dev_put(idev);
}
if (peer) {
- BUG_ON(!(rt->rt6i_flags & RTF_CACHE));
rt->rt6i_peer = NULL;
inet_putpeer(peer);
}
@@ -206,9 +210,6 @@ void rt6_bind_peer(struct rt6_info *rt, int create)
{
struct inet_peer *peer;
- if (WARN_ON(!(rt->rt6i_flags & RTF_CACHE)))
- return;
-
peer = inet_getpeer_v6(&rt->rt6i_dst.addr, create);
if (peer && cmpxchg(&rt->rt6i_peer, NULL, peer) != NULL)
inet_putpeer(peer);
@@ -738,13 +739,8 @@ restart:
if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src);
- else {
-#if CLONE_OFFLINK_ROUTE
+ else
nrt = rt6_alloc_clone(rt, &fl->fl6_dst);
-#else
- goto out2;
-#endif
- }
dst_release(&rt->dst);
rt = nrt ? : net->ipv6.ip6_null_entry;
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index fa1d8f4e0051..7cb65ef79f9c 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -15,6 +15,8 @@
#include <net/addrconf.h>
#include <net/inet_frag.h>
+static struct ctl_table empty[1];
+
static ctl_table ipv6_table_template[] = {
{
.procname = "route",
@@ -35,6 +37,12 @@ static ctl_table ipv6_table_template[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
+ {
+ .procname = "neigh",
+ .maxlen = 0,
+ .mode = 0555,
+ .child = empty,
+ },
{ }
};
@@ -152,7 +160,6 @@ static struct ctl_table_header *ip6_base;
int ipv6_static_sysctl_register(void)
{
- static struct ctl_table empty[1];
ip6_base = register_sysctl_paths(net_ipv6_ctl_path, empty);
if (ip6_base == NULL)
return -ENOMEM;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 7e74023ea6e4..da87428681cc 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -98,6 +98,10 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
if (!xdst->u.rt6.rt6i_idev)
return -ENODEV;
+ xdst->u.rt6.rt6i_peer = rt->rt6i_peer;
+ if (rt->rt6i_peer)
+ atomic_inc(&rt->rt6i_peer->refcnt);
+
/* Sheit... I remember I did this right. Apparently,
* it was magically lost, so this code needs audit */
xdst->u.rt6.rt6i_flags = rt->rt6i_flags & (RTF_ANYCAST |
@@ -216,6 +220,8 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
if (likely(xdst->u.rt6.rt6i_idev))
in6_dev_put(xdst->u.rt6.rt6i_idev);
+ if (likely(xdst->u.rt6.rt6i_peer))
+ inet_putpeer(xdst->u.rt6.rt6i_peer);
xfrm_dst_destroy(xdst);
}
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 9109262abd24..c766056d0488 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -20,7 +20,7 @@ config MAC80211_HAS_RC
def_bool n
config MAC80211_RC_PID
- bool "PID controller based rate control algorithm" if EMBEDDED
+ bool "PID controller based rate control algorithm" if EXPERT
select MAC80211_HAS_RC
---help---
This option enables a TX rate control algorithm for
@@ -28,14 +28,14 @@ config MAC80211_RC_PID
rate.
config MAC80211_RC_MINSTREL
- bool "Minstrel" if EMBEDDED
+ bool "Minstrel" if EXPERT
select MAC80211_HAS_RC
default y
---help---
This option enables the 'minstrel' TX rate control algorithm
config MAC80211_RC_MINSTREL_HT
- bool "Minstrel 802.11n support" if EMBEDDED
+ bool "Minstrel 802.11n support" if EXPERT
depends on MAC80211_RC_MINSTREL
default y
---help---
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 4bc8a9250cfd..9cd73b11506e 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1822,6 +1822,7 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
*cookie ^= 2;
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
local->hw_roc_skb = skb;
+ local->hw_roc_skb_for_status = skb;
mutex_unlock(&local->mtx);
return 0;
@@ -1875,6 +1876,7 @@ static int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
if (ret == 0) {
kfree_skb(local->hw_roc_skb);
local->hw_roc_skb = NULL;
+ local->hw_roc_skb_for_status = NULL;
}
mutex_unlock(&local->mtx);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index c47d7c0e48a4..533fd32f49ff 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -953,7 +953,7 @@ struct ieee80211_local {
struct ieee80211_channel *hw_roc_channel;
struct net_device *hw_roc_dev;
- struct sk_buff *hw_roc_skb;
+ struct sk_buff *hw_roc_skb, *hw_roc_skb_for_status;
struct work_struct hw_roc_start, hw_roc_done;
enum nl80211_channel_type hw_roc_channel_type;
unsigned int hw_roc_duration;
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 38a797217a91..071ac95c4aa0 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -323,6 +323,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
struct ieee80211_work *wk;
+ u64 cookie = (unsigned long)skb;
rcu_read_lock();
list_for_each_entry_rcu(wk, &local->work_list, list) {
@@ -334,8 +335,12 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
break;
}
rcu_read_unlock();
+ if (local->hw_roc_skb_for_status == skb) {
+ cookie = local->hw_roc_cookie ^ 2;
+ local->hw_roc_skb_for_status = NULL;
+ }
cfg80211_mgmt_tx_status(
- skb->dev, (unsigned long) skb, skb->data, skb->len,
+ skb->dev, cookie, skb->data, skb->len,
!!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC);
}
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 5950e3abead9..b0beaa58246b 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1547,7 +1547,7 @@ static int ieee80211_skb_resize(struct ieee80211_local *local,
skb_orphan(skb);
}
- if (skb_header_cloned(skb))
+ if (skb_cloned(skb))
I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
else if (head_need || tail_need)
I802_DEBUG_INC(local->tx_expand_skb_head);
@@ -2230,6 +2230,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
sdata = vif_to_sdata(vif);
+ if (!ieee80211_sdata_running(sdata))
+ goto out;
+
if (tim_offset)
*tim_offset = 0;
if (tim_length)
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index cf68700abffa..d036597aabbe 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1210,7 +1210,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
switch (sdata->vif.type) {
case NL80211_IFTYPE_STATION:
changed |= BSS_CHANGED_ASSOC;
+ mutex_lock(&sdata->u.mgd.mtx);
ieee80211_bss_info_change_notify(sdata, changed);
+ mutex_unlock(&sdata->u.mgd.mtx);
break;
case NL80211_IFTYPE_ADHOC:
changed |= BSS_CHANGED_IBSS;
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index eaf765876458..7fce6dfd2180 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -18,7 +18,7 @@ config RFKILL_LEDS
default y
config RFKILL_INPUT
- bool "RF switch input support" if EMBEDDED
+ bool "RF switch input support" if EXPERT
depends on RFKILL
depends on INPUT = y || RFKILL = INPUT
- default y if !EMBEDDED
+ default y if !EXPERT
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 0b9bb2085ce4..74c064c0dfdd 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -808,7 +808,7 @@ static int __init af_rxrpc_init(void)
goto error_call_jar;
}
- rxrpc_workqueue = create_workqueue("krxrpcd");
+ rxrpc_workqueue = alloc_workqueue("krxrpcd", 0, 1);
if (!rxrpc_workqueue) {
printk(KERN_NOTICE "RxRPC: Failed to allocate work queue\n");
goto error_work_queue;
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index a36270a994d7..f04d4a484d53 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -24,7 +24,7 @@ menuconfig NET_SCHED
To administer these schedulers, you'll need the user-level utilities
from the package iproute2+tc at <ftp://ftp.tux.org/pub/net/ip-routing/>.
That package also contains some documentation; for more, check out
- <http://linux-net.osdl.org/index.php/Iproute2>.
+ <http://www.linuxfoundation.org/collaborate/workgroups/networking/iproute2>.
This Quality of Service (QoS) support will enable you to use
Differentiated Services (diffserv) and Resource Reservation Protocol
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index c80d1c210c5d..5f63ec58942c 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -390,7 +390,6 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, cl->q);
if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++;
- qdisc_bstats_update(sch, skb);
cbq_mark_toplevel(q, cl);
if (!cl->next_alive)
cbq_activate_class(cl);
@@ -649,7 +648,6 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
ret = qdisc_enqueue(skb, cl->q);
if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++;
- qdisc_bstats_update(sch, skb);
if (!cl->next_alive)
cbq_activate_class(cl);
return 0;
@@ -971,6 +969,7 @@ cbq_dequeue(struct Qdisc *sch)
skb = cbq_dequeue_1(sch);
if (skb) {
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
sch->flags &= ~TCQ_F_THROTTLED;
return skb;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index de55e642eafc..6b7fe4a84f13 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -376,7 +376,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
bstats_update(&cl->bstats, skb);
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return err;
@@ -403,6 +402,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
skb = qdisc_dequeue_peeked(cl->qdisc);
if (cl->qdisc->q.qlen == 0)
list_del(&cl->alist);
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
}
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 60f4bdd4408e..0f7bf3fdfea5 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -260,7 +260,6 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return err;
}
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
@@ -283,6 +282,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
if (skb == NULL)
return NULL;
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
index = skb->tc_index & (p->indices - 1);
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index aa4d6337e43c..d468b479aa93 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -46,17 +46,14 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct sk_buff *skb_head;
struct fifo_sched_data *q = qdisc_priv(sch);
if (likely(skb_queue_len(&sch->q) < q->limit))
return qdisc_enqueue_tail(skb, sch);
/* queue full, remove one skb to fulfill the limit */
- skb_head = qdisc_dequeue_head(sch);
+ __qdisc_queue_drop_head(sch, &sch->q);
sch->qstats.drops++;
- kfree_skb(skb_head);
-
qdisc_enqueue_tail(skb, sch);
return NET_XMIT_CN;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 2e45791d4f6c..14a799de1c35 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1600,7 +1600,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
set_active(cl, qdisc_pkt_len(skb));
bstats_update(&cl->bstats, skb);
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
@@ -1666,6 +1665,7 @@ hfsc_dequeue(struct Qdisc *sch)
}
sch->flags &= ~TCQ_F_THROTTLED;
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 984c1b0c6836..fc12fe6f5597 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -574,7 +574,6 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
sch->q.qlen++;
- qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS;
}
@@ -842,7 +841,7 @@ next:
static struct sk_buff *htb_dequeue(struct Qdisc *sch)
{
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
struct htb_sched *q = qdisc_priv(sch);
int level;
psched_time_t next_event;
@@ -851,6 +850,8 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
/* try to dequeue direct packets as high prio (!) to minimize cpu work */
skb = __skb_dequeue(&q->direct_queue);
if (skb != NULL) {
+ok:
+ qdisc_bstats_update(sch, skb);
sch->flags &= ~TCQ_F_THROTTLED;
sch->q.qlen--;
return skb;
@@ -884,11 +885,8 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
int prio = ffz(m);
m |= 1 << prio;
skb = htb_dequeue_tree(q, prio, level);
- if (likely(skb != NULL)) {
- sch->q.qlen--;
- sch->flags &= ~TCQ_F_THROTTLED;
- goto fin;
- }
+ if (likely(skb != NULL))
+ goto ok;
}
}
sch->qstats.overlimits++;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 21f13da24763..436a2e75b322 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -83,7 +83,6 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, qdisc);
if (ret == NET_XMIT_SUCCESS) {
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
@@ -112,6 +111,7 @@ static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
qdisc = q->queues[q->curband];
skb = qdisc->dequeue(qdisc);
if (skb) {
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 1c4bce863479..6a3006b38dc5 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -240,7 +240,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (likely(ret == NET_XMIT_SUCCESS)) {
sch->q.qlen++;
- qdisc_bstats_update(sch, skb);
} else if (net_xmit_drop_count(ret)) {
sch->qstats.drops++;
}
@@ -289,6 +288,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
skb->tstamp.tv64 = 0;
#endif
pr_debug("netem_dequeue: return skb=%p\n", skb);
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
}
@@ -476,7 +476,6 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
__skb_queue_after(list, skb, nskb);
sch->qstats.backlog += qdisc_pkt_len(nskb);
- qdisc_bstats_update(sch, nskb);
return NET_XMIT_SUCCESS;
}
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 966158d49dd1..fbd710d619bf 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -84,7 +84,6 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, qdisc);
if (ret == NET_XMIT_SUCCESS) {
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
@@ -116,6 +115,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc* sch)
struct Qdisc *qdisc = q->queues[prio];
struct sk_buff *skb = qdisc->dequeue(qdisc);
if (skb) {
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
}
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index a6009c5a2c97..9f98dbd32d4c 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -94,7 +94,6 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
ret = qdisc_enqueue(skb, child);
if (likely(ret == NET_XMIT_SUCCESS)) {
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
} else if (net_xmit_drop_count(ret)) {
q->stats.pdrop++;
@@ -114,11 +113,13 @@ static struct sk_buff * red_dequeue(struct Qdisc* sch)
struct Qdisc *child = q->qdisc;
skb = child->dequeue(child);
- if (skb)
+ if (skb) {
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
- else if (!red_is_idling(&q->parms))
- red_start_of_idle_period(&q->parms);
-
+ } else {
+ if (!red_is_idling(&q->parms))
+ red_start_of_idle_period(&q->parms);
+ }
return skb;
}
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 239ec53a634d..edea8cefec6c 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -402,10 +402,8 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
q->tail = slot;
slot->allot = q->scaled_quantum;
}
- if (++sch->q.qlen <= q->limit) {
- qdisc_bstats_update(sch, skb);
+ if (++sch->q.qlen <= q->limit)
return NET_XMIT_SUCCESS;
- }
sfq_drop(sch);
return NET_XMIT_CN;
@@ -445,6 +443,7 @@ next_slot:
}
skb = slot_dequeue_head(slot);
sfq_dec(q, a);
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
sch->qstats.backlog -= qdisc_pkt_len(skb);
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 77565e721811..e93165820c3f 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -134,7 +134,6 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
}
sch->q.qlen++;
- qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS;
}
@@ -187,6 +186,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
q->ptokens = ptoks;
sch->q.qlen--;
sch->flags &= ~TCQ_F_THROTTLED;
+ qdisc_bstats_update(sch, skb);
return skb;
}
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 84ce48eadff4..d84e7329660f 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -87,7 +87,6 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
if (q->q.qlen < dev->tx_queue_len) {
__skb_queue_tail(&q->q, skb);
- qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS;
}
@@ -111,6 +110,8 @@ teql_dequeue(struct Qdisc* sch)
dat->m->slaves = sch;
netif_wake_queue(m);
}
+ } else {
+ qdisc_bstats_update(sch, skb);
}
sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen;
return skb;
diff --git a/net/socket.c b/net/socket.c
index ccc576a6a508..ac2219f90d5d 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -306,20 +306,6 @@ static const struct super_operations sockfs_ops = {
.statfs = simple_statfs,
};
-static struct dentry *sockfs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
-{
- return mount_pseudo(fs_type, "socket:", &sockfs_ops, SOCKFS_MAGIC);
-}
-
-static struct vfsmount *sock_mnt __read_mostly;
-
-static struct file_system_type sock_fs_type = {
- .name = "sockfs",
- .mount = sockfs_mount,
- .kill_sb = kill_anon_super,
-};
-
/*
* sockfs_dname() is called from d_path().
*/
@@ -333,6 +319,21 @@ static const struct dentry_operations sockfs_dentry_operations = {
.d_dname = sockfs_dname,
};
+static struct dentry *sockfs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+{
+ return mount_pseudo(fs_type, "socket:", &sockfs_ops,
+ &sockfs_dentry_operations, SOCKFS_MAGIC);
+}
+
+static struct vfsmount *sock_mnt __read_mostly;
+
+static struct file_system_type sock_fs_type = {
+ .name = "sockfs",
+ .mount = sockfs_mount,
+ .kill_sb = kill_anon_super,
+};
+
/*
* Obtains the first available file descriptor and sets it up for use.
*
@@ -368,7 +369,6 @@ static int sock_alloc_file(struct socket *sock, struct file **f, int flags)
}
path.mnt = mntget(sock_mnt);
- d_set_d_op(path.dentry, &sockfs_dentry_operations);
d_instantiate(path.dentry, SOCK_INODE(sock));
SOCK_INODE(sock)->i_fop = &socket_file_ops;
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index afe67849269f..67e31276682a 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -563,8 +563,17 @@ rpcauth_checkverf(struct rpc_task *task, __be32 *p)
return cred->cr_ops->crvalidate(task, p);
}
+static void rpcauth_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp,
+ __be32 *data, void *obj)
+{
+ struct xdr_stream xdr;
+
+ xdr_init_encode(&xdr, &rqstp->rq_snd_buf, data);
+ encode(rqstp, &xdr, obj);
+}
+
int
-rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp,
+rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp,
__be32 *data, void *obj)
{
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
@@ -574,11 +583,22 @@ rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp,
if (cred->cr_ops->crwrap_req)
return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj);
/* By default, we encode the arguments normally. */
- return encode(rqstp, data, obj);
+ rpcauth_wrap_req_encode(encode, rqstp, data, obj);
+ return 0;
+}
+
+static int
+rpcauth_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp,
+ __be32 *data, void *obj)
+{
+ struct xdr_stream xdr;
+
+ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, data);
+ return decode(rqstp, &xdr, obj);
}
int
-rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp,
+rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp,
__be32 *data, void *obj)
{
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
@@ -589,7 +609,7 @@ rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp,
return cred->cr_ops->crunwrap_resp(task, decode, rqstp,
data, obj);
/* By default, we decode the arguments normally. */
- return decode(rqstp, data, obj);
+ return rpcauth_unwrap_req_decode(decode, rqstp, data, obj);
}
int
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 3835ce35e224..45dbf1521b9a 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1231,9 +1231,19 @@ out_bad:
return NULL;
}
+static void gss_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp,
+ __be32 *p, void *obj)
+{
+ struct xdr_stream xdr;
+
+ xdr_init_encode(&xdr, &rqstp->rq_snd_buf, p);
+ encode(rqstp, &xdr, obj);
+}
+
static inline int
gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
- kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj)
+ kxdreproc_t encode, struct rpc_rqst *rqstp,
+ __be32 *p, void *obj)
{
struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
struct xdr_buf integ_buf;
@@ -1249,9 +1259,7 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
*p++ = htonl(rqstp->rq_seqno);
- status = encode(rqstp, p, obj);
- if (status)
- return status;
+ gss_wrap_req_encode(encode, rqstp, p, obj);
if (xdr_buf_subsegment(snd_buf, &integ_buf,
offset, snd_buf->len - offset))
@@ -1325,7 +1333,8 @@ out:
static inline int
gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
- kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj)
+ kxdreproc_t encode, struct rpc_rqst *rqstp,
+ __be32 *p, void *obj)
{
struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
u32 offset;
@@ -1342,9 +1351,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
*p++ = htonl(rqstp->rq_seqno);
- status = encode(rqstp, p, obj);
- if (status)
- return status;
+ gss_wrap_req_encode(encode, rqstp, p, obj);
status = alloc_enc_pages(rqstp);
if (status)
@@ -1394,7 +1401,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
static int
gss_wrap_req(struct rpc_task *task,
- kxdrproc_t encode, void *rqstp, __be32 *p, void *obj)
+ kxdreproc_t encode, void *rqstp, __be32 *p, void *obj)
{
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
@@ -1407,12 +1414,14 @@ gss_wrap_req(struct rpc_task *task,
/* The spec seems a little ambiguous here, but I think that not
* wrapping context destruction requests makes the most sense.
*/
- status = encode(rqstp, p, obj);
+ gss_wrap_req_encode(encode, rqstp, p, obj);
+ status = 0;
goto out;
}
switch (gss_cred->gc_service) {
case RPC_GSS_SVC_NONE:
- status = encode(rqstp, p, obj);
+ gss_wrap_req_encode(encode, rqstp, p, obj);
+ status = 0;
break;
case RPC_GSS_SVC_INTEGRITY:
status = gss_wrap_req_integ(cred, ctx, encode,
@@ -1494,10 +1503,19 @@ gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
return 0;
}
+static int
+gss_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp,
+ __be32 *p, void *obj)
+{
+ struct xdr_stream xdr;
+
+ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+ return decode(rqstp, &xdr, obj);
+}
static int
gss_unwrap_resp(struct rpc_task *task,
- kxdrproc_t decode, void *rqstp, __be32 *p, void *obj)
+ kxdrdproc_t decode, void *rqstp, __be32 *p, void *obj)
{
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
@@ -1528,7 +1546,7 @@ gss_unwrap_resp(struct rpc_task *task,
cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp)
+ (savedlen - head->iov_len);
out_decode:
- status = decode(rqstp, p, obj);
+ status = gss_unwrap_req_decode(decode, rqstp, p, obj);
out:
gss_put_ctx(ctx);
dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid,
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 75ee993ea057..9576f35ab701 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -137,7 +137,7 @@ arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4])
ms_usage = 13;
break;
default:
- return EINVAL;;
+ return -EINVAL;
}
salt[0] = (ms_usage >> 0) & 0xff;
salt[1] = (ms_usage >> 8) & 0xff;
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index dec2a6fc7c12..bcdae78fdfc6 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -67,7 +67,6 @@ static int netobj_equal(struct xdr_netobj *a, struct xdr_netobj *b)
#define RSI_HASHBITS 6
#define RSI_HASHMAX (1<<RSI_HASHBITS)
-#define RSI_HASHMASK (RSI_HASHMAX-1)
struct rsi {
struct cache_head h;
@@ -319,7 +318,6 @@ static struct rsi *rsi_update(struct rsi *new, struct rsi *old)
#define RSC_HASHBITS 10
#define RSC_HASHMAX (1<<RSC_HASHBITS)
-#define RSC_HASHMASK (RSC_HASHMAX-1)
#define GSS_SEQ_WIN 128
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c
index 7dcfe0cc3500..1dd1a6890007 100644
--- a/net/sunrpc/bc_svc.c
+++ b/net/sunrpc/bc_svc.c
@@ -59,8 +59,8 @@ int bc_send(struct rpc_rqst *req)
ret = task->tk_status;
rpc_put_task(task);
}
- return ret;
dprintk("RPC: bc_send ret= %d\n", ret);
+ return ret;
}
#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index e433e7580e27..72ad836e4fe0 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -37,7 +37,7 @@
#define RPCDBG_FACILITY RPCDBG_CACHE
-static void cache_defer_req(struct cache_req *req, struct cache_head *item);
+static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
static void cache_revisit_request(struct cache_head *item);
static void cache_init(struct cache_head *h)
@@ -128,6 +128,7 @@ static void cache_fresh_locked(struct cache_head *head, time_t expiry)
{
head->expiry_time = expiry;
head->last_refresh = seconds_since_boot();
+ smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
set_bit(CACHE_VALID, &head->flags);
}
@@ -208,11 +209,36 @@ static inline int cache_is_valid(struct cache_detail *detail, struct cache_head
/* entry is valid */
if (test_bit(CACHE_NEGATIVE, &h->flags))
return -ENOENT;
- else
+ else {
+ /*
+ * In combination with write barrier in
+ * sunrpc_cache_update, ensures that anyone
+ * using the cache entry after this sees the
+ * updated contents:
+ */
+ smp_rmb();
return 0;
+ }
}
}
+static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
+{
+ int rv;
+
+ write_lock(&detail->hash_lock);
+ rv = cache_is_valid(detail, h);
+ if (rv != -EAGAIN) {
+ write_unlock(&detail->hash_lock);
+ return rv;
+ }
+ set_bit(CACHE_NEGATIVE, &h->flags);
+ cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY);
+ write_unlock(&detail->hash_lock);
+ cache_fresh_unlocked(h, detail);
+ return -ENOENT;
+}
+
/*
* This is the generic cache management routine for all
* the authentication caches.
@@ -251,14 +277,8 @@ int cache_check(struct cache_detail *detail,
case -EINVAL:
clear_bit(CACHE_PENDING, &h->flags);
cache_revisit_request(h);
- if (rv == -EAGAIN) {
- set_bit(CACHE_NEGATIVE, &h->flags);
- cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY);
- cache_fresh_unlocked(h, detail);
- rv = -ENOENT;
- }
+ rv = try_to_negate_entry(detail, h);
break;
-
case -EAGAIN:
clear_bit(CACHE_PENDING, &h->flags);
cache_revisit_request(h);
@@ -268,9 +288,11 @@ int cache_check(struct cache_detail *detail,
}
if (rv == -EAGAIN) {
- cache_defer_req(rqstp, h);
- if (!test_bit(CACHE_PENDING, &h->flags)) {
- /* Request is not deferred */
+ if (!cache_defer_req(rqstp, h)) {
+ /*
+ * Request was not deferred; handle it as best
+ * we can ourselves:
+ */
rv = cache_is_valid(detail, h);
if (rv == -EAGAIN)
rv = -ETIMEDOUT;
@@ -618,18 +640,19 @@ static void cache_limit_defers(void)
discard->revisit(discard, 1);
}
-static void cache_defer_req(struct cache_req *req, struct cache_head *item)
+/* Return true if and only if a deferred request is queued. */
+static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
{
struct cache_deferred_req *dreq;
if (req->thread_wait) {
cache_wait_req(req, item);
if (!test_bit(CACHE_PENDING, &item->flags))
- return;
+ return false;
}
dreq = req->defer(req);
if (dreq == NULL)
- return;
+ return false;
setup_deferral(dreq, item, 1);
if (!test_bit(CACHE_PENDING, &item->flags))
/* Bit could have been cleared before we managed to
@@ -638,6 +661,7 @@ static void cache_defer_req(struct cache_req *req, struct cache_head *item)
cache_revisit_request(item);
cache_limit_defers();
+ return true;
}
static void cache_revisit_request(struct cache_head *item)
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 92ce94f5146b..57d344cf2256 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1095,7 +1095,7 @@ static void
rpc_xdr_encode(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
- kxdrproc_t encode;
+ kxdreproc_t encode;
__be32 *p;
dprint_status(task);
@@ -1535,7 +1535,7 @@ call_decode(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
struct rpc_rqst *req = task->tk_rqstp;
- kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode;
+ kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode;
__be32 *p;
dprintk("RPC: %5u call_decode (status %d)\n",
@@ -1776,12 +1776,11 @@ out_overflow:
goto out_garbage;
}
-static int rpcproc_encode_null(void *rqstp, __be32 *data, void *obj)
+static void rpcproc_encode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
{
- return 0;
}
-static int rpcproc_decode_null(void *rqstp, __be32 *data, void *obj)
+static int rpcproc_decode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
{
return 0;
}
@@ -1830,23 +1829,15 @@ static void rpc_show_task(const struct rpc_clnt *clnt,
const struct rpc_task *task)
{
const char *rpc_waitq = "none";
- char *p, action[KSYM_SYMBOL_LEN];
if (RPC_IS_QUEUED(task))
rpc_waitq = rpc_qname(task->tk_waitqueue);
- /* map tk_action pointer to a function name; then trim off
- * the "+0x0 [sunrpc]" */
- sprint_symbol(action, (unsigned long)task->tk_action);
- p = strchr(action, '+');
- if (p)
- *p = '\0';
-
- printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%s q:%s\n",
+ printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n",
task->tk_pid, task->tk_flags, task->tk_status,
clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops,
clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task),
- action, rpc_waitq);
+ task->tk_action, rpc_waitq);
}
void rpc_show_tasks(void)
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 09f01f41e55a..72bc53683965 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -474,7 +474,7 @@ static int __rpc_create_common(struct inode *dir, struct dentry *dentry,
{
struct inode *inode;
- BUG_ON(!d_unhashed(dentry));
+ d_drop(dentry);
inode = rpc_get_inode(dir->i_sb, mode);
if (!inode)
goto out_err;
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index fa6d7ca2c851..c652e4cc9fe9 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -57,10 +57,6 @@ enum {
RPCBPROC_GETSTAT,
};
-#define RPCB_HIGHPROC_2 RPCBPROC_CALLIT
-#define RPCB_HIGHPROC_3 RPCBPROC_TADDR2UADDR
-#define RPCB_HIGHPROC_4 RPCBPROC_GETSTAT
-
/*
* r_owner
*
@@ -693,46 +689,37 @@ static void rpcb_getport_done(struct rpc_task *child, void *data)
* XDR functions for rpcbind
*/
-static int rpcb_enc_mapping(struct rpc_rqst *req, __be32 *p,
- const struct rpcbind_args *rpcb)
+static void rpcb_enc_mapping(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct rpcbind_args *rpcb)
{
struct rpc_task *task = req->rq_task;
- struct xdr_stream xdr;
+ __be32 *p;
dprintk("RPC: %5u encoding PMAP_%s call (%u, %u, %d, %u)\n",
task->tk_pid, task->tk_msg.rpc_proc->p_name,
rpcb->r_prog, rpcb->r_vers, rpcb->r_prot, rpcb->r_port);
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-
- p = xdr_reserve_space(&xdr, sizeof(__be32) * RPCB_mappingargs_sz);
- if (unlikely(p == NULL))
- return -EIO;
-
- *p++ = htonl(rpcb->r_prog);
- *p++ = htonl(rpcb->r_vers);
- *p++ = htonl(rpcb->r_prot);
- *p = htonl(rpcb->r_port);
-
- return 0;
+ p = xdr_reserve_space(xdr, RPCB_mappingargs_sz << 2);
+ *p++ = cpu_to_be32(rpcb->r_prog);
+ *p++ = cpu_to_be32(rpcb->r_vers);
+ *p++ = cpu_to_be32(rpcb->r_prot);
+ *p = cpu_to_be32(rpcb->r_port);
}
-static int rpcb_dec_getport(struct rpc_rqst *req, __be32 *p,
+static int rpcb_dec_getport(struct rpc_rqst *req, struct xdr_stream *xdr,
struct rpcbind_args *rpcb)
{
struct rpc_task *task = req->rq_task;
- struct xdr_stream xdr;
unsigned long port;
-
- xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
+ __be32 *p;
rpcb->r_port = 0;
- p = xdr_inline_decode(&xdr, sizeof(__be32));
+ p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
return -EIO;
- port = ntohl(*p);
+ port = be32_to_cpup(p);
dprintk("RPC: %5u PMAP_%s result: %lu\n", task->tk_pid,
task->tk_msg.rpc_proc->p_name, port);
if (unlikely(port > USHRT_MAX))
@@ -742,20 +729,18 @@ static int rpcb_dec_getport(struct rpc_rqst *req, __be32 *p,
return 0;
}
-static int rpcb_dec_set(struct rpc_rqst *req, __be32 *p,
+static int rpcb_dec_set(struct rpc_rqst *req, struct xdr_stream *xdr,
unsigned int *boolp)
{
struct rpc_task *task = req->rq_task;
- struct xdr_stream xdr;
-
- xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
+ __be32 *p;
- p = xdr_inline_decode(&xdr, sizeof(__be32));
+ p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
return -EIO;
*boolp = 0;
- if (*p)
+ if (*p != xdr_zero)
*boolp = 1;
dprintk("RPC: %5u RPCB_%s call %s\n",
@@ -764,73 +749,53 @@ static int rpcb_dec_set(struct rpc_rqst *req, __be32 *p,
return 0;
}
-static int encode_rpcb_string(struct xdr_stream *xdr, const char *string,
- const u32 maxstrlen)
+static void encode_rpcb_string(struct xdr_stream *xdr, const char *string,
+ const u32 maxstrlen)
{
- u32 len;
__be32 *p;
+ u32 len;
- if (unlikely(string == NULL))
- return -EIO;
len = strlen(string);
- if (unlikely(len > maxstrlen))
- return -EIO;
-
- p = xdr_reserve_space(xdr, sizeof(__be32) + len);
- if (unlikely(p == NULL))
- return -EIO;
+ BUG_ON(len > maxstrlen);
+ p = xdr_reserve_space(xdr, 4 + len);
xdr_encode_opaque(p, string, len);
-
- return 0;
}
-static int rpcb_enc_getaddr(struct rpc_rqst *req, __be32 *p,
- const struct rpcbind_args *rpcb)
+static void rpcb_enc_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct rpcbind_args *rpcb)
{
struct rpc_task *task = req->rq_task;
- struct xdr_stream xdr;
+ __be32 *p;
dprintk("RPC: %5u encoding RPCB_%s call (%u, %u, '%s', '%s')\n",
task->tk_pid, task->tk_msg.rpc_proc->p_name,
rpcb->r_prog, rpcb->r_vers,
rpcb->r_netid, rpcb->r_addr);
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-
- p = xdr_reserve_space(&xdr,
- sizeof(__be32) * (RPCB_program_sz + RPCB_version_sz));
- if (unlikely(p == NULL))
- return -EIO;
- *p++ = htonl(rpcb->r_prog);
- *p = htonl(rpcb->r_vers);
-
- if (encode_rpcb_string(&xdr, rpcb->r_netid, RPCBIND_MAXNETIDLEN))
- return -EIO;
- if (encode_rpcb_string(&xdr, rpcb->r_addr, RPCBIND_MAXUADDRLEN))
- return -EIO;
- if (encode_rpcb_string(&xdr, rpcb->r_owner, RPCB_MAXOWNERLEN))
- return -EIO;
+ p = xdr_reserve_space(xdr, (RPCB_program_sz + RPCB_version_sz) << 2);
+ *p++ = cpu_to_be32(rpcb->r_prog);
+ *p = cpu_to_be32(rpcb->r_vers);
- return 0;
+ encode_rpcb_string(xdr, rpcb->r_netid, RPCBIND_MAXNETIDLEN);
+ encode_rpcb_string(xdr, rpcb->r_addr, RPCBIND_MAXUADDRLEN);
+ encode_rpcb_string(xdr, rpcb->r_owner, RPCB_MAXOWNERLEN);
}
-static int rpcb_dec_getaddr(struct rpc_rqst *req, __be32 *p,
+static int rpcb_dec_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
struct rpcbind_args *rpcb)
{
struct sockaddr_storage address;
struct sockaddr *sap = (struct sockaddr *)&address;
struct rpc_task *task = req->rq_task;
- struct xdr_stream xdr;
+ __be32 *p;
u32 len;
rpcb->r_port = 0;
- xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-
- p = xdr_inline_decode(&xdr, sizeof(__be32));
+ p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_fail;
- len = ntohl(*p);
+ len = be32_to_cpup(p);
/*
* If the returned universal address is a null string,
@@ -845,7 +810,7 @@ static int rpcb_dec_getaddr(struct rpc_rqst *req, __be32 *p,
if (unlikely(len > RPCBIND_MAXUADDRLEN))
goto out_fail;
- p = xdr_inline_decode(&xdr, len);
+ p = xdr_inline_decode(xdr, len);
if (unlikely(p == NULL))
goto out_fail;
dprintk("RPC: %5u RPCB_%s reply: %s\n", task->tk_pid,
@@ -871,8 +836,8 @@ out_fail:
static struct rpc_procinfo rpcb_procedures2[] = {
[RPCBPROC_SET] = {
.p_proc = RPCBPROC_SET,
- .p_encode = (kxdrproc_t)rpcb_enc_mapping,
- .p_decode = (kxdrproc_t)rpcb_dec_set,
+ .p_encode = (kxdreproc_t)rpcb_enc_mapping,
+ .p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_mappingargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_SET,
@@ -881,8 +846,8 @@ static struct rpc_procinfo rpcb_procedures2[] = {
},
[RPCBPROC_UNSET] = {
.p_proc = RPCBPROC_UNSET,
- .p_encode = (kxdrproc_t)rpcb_enc_mapping,
- .p_decode = (kxdrproc_t)rpcb_dec_set,
+ .p_encode = (kxdreproc_t)rpcb_enc_mapping,
+ .p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_mappingargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_UNSET,
@@ -891,8 +856,8 @@ static struct rpc_procinfo rpcb_procedures2[] = {
},
[RPCBPROC_GETPORT] = {
.p_proc = RPCBPROC_GETPORT,
- .p_encode = (kxdrproc_t)rpcb_enc_mapping,
- .p_decode = (kxdrproc_t)rpcb_dec_getport,
+ .p_encode = (kxdreproc_t)rpcb_enc_mapping,
+ .p_decode = (kxdrdproc_t)rpcb_dec_getport,
.p_arglen = RPCB_mappingargs_sz,
.p_replen = RPCB_getportres_sz,
.p_statidx = RPCBPROC_GETPORT,
@@ -904,8 +869,8 @@ static struct rpc_procinfo rpcb_procedures2[] = {
static struct rpc_procinfo rpcb_procedures3[] = {
[RPCBPROC_SET] = {
.p_proc = RPCBPROC_SET,
- .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
- .p_decode = (kxdrproc_t)rpcb_dec_set,
+ .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
+ .p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_SET,
@@ -914,8 +879,8 @@ static struct rpc_procinfo rpcb_procedures3[] = {
},
[RPCBPROC_UNSET] = {
.p_proc = RPCBPROC_UNSET,
- .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
- .p_decode = (kxdrproc_t)rpcb_dec_set,
+ .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
+ .p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_UNSET,
@@ -924,8 +889,8 @@ static struct rpc_procinfo rpcb_procedures3[] = {
},
[RPCBPROC_GETADDR] = {
.p_proc = RPCBPROC_GETADDR,
- .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
- .p_decode = (kxdrproc_t)rpcb_dec_getaddr,
+ .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
+ .p_decode = (kxdrdproc_t)rpcb_dec_getaddr,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_getaddrres_sz,
.p_statidx = RPCBPROC_GETADDR,
@@ -937,8 +902,8 @@ static struct rpc_procinfo rpcb_procedures3[] = {
static struct rpc_procinfo rpcb_procedures4[] = {
[RPCBPROC_SET] = {
.p_proc = RPCBPROC_SET,
- .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
- .p_decode = (kxdrproc_t)rpcb_dec_set,
+ .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
+ .p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_SET,
@@ -947,8 +912,8 @@ static struct rpc_procinfo rpcb_procedures4[] = {
},
[RPCBPROC_UNSET] = {
.p_proc = RPCBPROC_UNSET,
- .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
- .p_decode = (kxdrproc_t)rpcb_dec_set,
+ .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
+ .p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_UNSET,
@@ -957,8 +922,8 @@ static struct rpc_procinfo rpcb_procedures4[] = {
},
[RPCBPROC_GETADDR] = {
.p_proc = RPCBPROC_GETADDR,
- .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
- .p_decode = (kxdrproc_t)rpcb_dec_getaddr,
+ .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
+ .p_decode = (kxdrdproc_t)rpcb_dec_getaddr,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_getaddrres_sz,
.p_statidx = RPCBPROC_GETADDR,
@@ -993,19 +958,19 @@ static struct rpcb_info rpcb_next_version6[] = {
static struct rpc_version rpcb_version2 = {
.number = RPCBVERS_2,
- .nrprocs = RPCB_HIGHPROC_2,
+ .nrprocs = ARRAY_SIZE(rpcb_procedures2),
.procs = rpcb_procedures2
};
static struct rpc_version rpcb_version3 = {
.number = RPCBVERS_3,
- .nrprocs = RPCB_HIGHPROC_3,
+ .nrprocs = ARRAY_SIZE(rpcb_procedures3),
.procs = rpcb_procedures3
};
static struct rpc_version rpcb_version4 = {
.number = RPCBVERS_4,
- .nrprocs = RPCB_HIGHPROC_4,
+ .nrprocs = ARRAY_SIZE(rpcb_procedures4),
.procs = rpcb_procedures4
};
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 6359c42c4941..08e05a8ce025 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -488,10 +488,6 @@ svc_destroy(struct svc_serv *serv)
if (svc_serv_is_pooled(serv))
svc_pool_map_put();
-#if defined(CONFIG_NFS_V4_1)
- svc_sock_destroy(serv->bc_xprt);
-#endif /* CONFIG_NFS_V4_1 */
-
svc_unregister(serv);
kfree(serv->sv_pools);
kfree(serv);
@@ -1005,6 +1001,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
rqstp->rq_splice_ok = 1;
/* Will be turned off only when NFSv4 Sessions are used */
rqstp->rq_usedeferral = 1;
+ rqstp->rq_dropme = false;
/* Setup reply header */
rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
@@ -1106,7 +1103,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
*statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
/* Encode reply */
- if (*statp == rpc_drop_reply) {
+ if (rqstp->rq_dropme) {
if (procp->pc_release)
procp->pc_release(rqstp, NULL, rqstp->rq_resp);
goto dropit;
@@ -1147,7 +1144,6 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
dropit:
svc_authorise(rqstp); /* doesn't hurt to call this twice */
dprintk("svc: svc_process dropit\n");
- svc_drop(rqstp);
return 0;
err_short_len:
@@ -1218,7 +1214,6 @@ svc_process(struct svc_rqst *rqstp)
struct kvec *resv = &rqstp->rq_res.head[0];
struct svc_serv *serv = rqstp->rq_server;
u32 dir;
- int error;
/*
* Setup response xdr_buf.
@@ -1246,11 +1241,13 @@ svc_process(struct svc_rqst *rqstp)
return 0;
}
- error = svc_process_common(rqstp, argv, resv);
- if (error <= 0)
- return error;
-
- return svc_send(rqstp);
+ /* Returns 1 for send, 0 for drop */
+ if (svc_process_common(rqstp, argv, resv))
+ return svc_send(rqstp);
+ else {
+ svc_drop(rqstp);
+ return 0;
+ }
}
#if defined(CONFIG_NFS_V4_1)
@@ -1264,10 +1261,9 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
{
struct kvec *argv = &rqstp->rq_arg.head[0];
struct kvec *resv = &rqstp->rq_res.head[0];
- int error;
/* Build the svc_rqst used by the common processing routine */
- rqstp->rq_xprt = serv->bc_xprt;
+ rqstp->rq_xprt = serv->sv_bc_xprt;
rqstp->rq_xid = req->rq_xid;
rqstp->rq_prot = req->rq_xprt->prot;
rqstp->rq_server = serv;
@@ -1292,12 +1288,15 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
svc_getu32(argv); /* XID */
svc_getnl(argv); /* CALLDIR */
- error = svc_process_common(rqstp, argv, resv);
- if (error <= 0)
- return error;
-
- memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
- return bc_send(req);
+ /* Returns 1 for send, 0 for drop */
+ if (svc_process_common(rqstp, argv, resv)) {
+ memcpy(&req->rq_snd_buf, &rqstp->rq_res,
+ sizeof(req->rq_snd_buf));
+ return bc_send(req);
+ } else {
+ /* Nothing to do to drop request */
+ return 0;
+ }
}
EXPORT_SYMBOL(bc_svc_process);
#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 3f2c5559ca1a..ab86b7927f84 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -13,6 +13,7 @@
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/svc_xprt.h>
#include <linux/sunrpc/svcsock.h>
+#include <linux/sunrpc/xprt.h>
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
@@ -128,6 +129,9 @@ static void svc_xprt_free(struct kref *kref)
if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags))
svcauth_unix_info_release(xprt);
put_net(xprt->xpt_net);
+ /* See comment on corresponding get in xs_setup_bc_tcp(): */
+ if (xprt->xpt_bc_xprt)
+ xprt_put(xprt->xpt_bc_xprt);
xprt->xpt_ops->xpo_free(xprt);
module_put(owner);
}
@@ -303,6 +307,15 @@ static void svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp)
list_del(&rqstp->rq_list);
}
+static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
+{
+ if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE)))
+ return true;
+ if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED)))
+ return xprt->xpt_ops->xpo_has_wspace(xprt);
+ return false;
+}
+
/*
* Queue up a transport with data pending. If there are idle nfsd
* processes, wake 'em up.
@@ -315,8 +328,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
struct svc_rqst *rqstp;
int cpu;
- if (!(xprt->xpt_flags &
- ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED))))
+ if (!svc_xprt_has_something_to_do(xprt))
return;
cpu = get_cpu();
@@ -343,28 +355,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
dprintk("svc: transport %p busy, not enqueued\n", xprt);
goto out_unlock;
}
- BUG_ON(xprt->xpt_pool != NULL);
- xprt->xpt_pool = pool;
-
- /* Handle pending connection */
- if (test_bit(XPT_CONN, &xprt->xpt_flags))
- goto process;
-
- /* Handle close in-progress */
- if (test_bit(XPT_CLOSE, &xprt->xpt_flags))
- goto process;
-
- /* Check if we have space to reply to a request */
- if (!xprt->xpt_ops->xpo_has_wspace(xprt)) {
- /* Don't enqueue while not enough space for reply */
- dprintk("svc: no write space, transport %p not enqueued\n",
- xprt);
- xprt->xpt_pool = NULL;
- clear_bit(XPT_BUSY, &xprt->xpt_flags);
- goto out_unlock;
- }
- process:
if (!list_empty(&pool->sp_threads)) {
rqstp = list_entry(pool->sp_threads.next,
struct svc_rqst,
@@ -381,13 +372,11 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
rqstp->rq_reserved = serv->sv_max_mesg;
atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
pool->sp_stats.threads_woken++;
- BUG_ON(xprt->xpt_pool != pool);
wake_up(&rqstp->rq_wait);
} else {
dprintk("svc: transport %p put into queue\n", xprt);
list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
pool->sp_stats.sockets_queued++;
- BUG_ON(xprt->xpt_pool != pool);
}
out_unlock:
@@ -426,7 +415,6 @@ static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
void svc_xprt_received(struct svc_xprt *xprt)
{
BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags));
- xprt->xpt_pool = NULL;
/* As soon as we clear busy, the xprt could be closed and
* 'put', so we need a reference to call svc_xprt_enqueue with:
*/
@@ -722,7 +710,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
dprintk("svc_recv: found XPT_CLOSE\n");
svc_delete_xprt(xprt);
- } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
+ /* Leave XPT_BUSY set on the dead xprt: */
+ goto out;
+ }
+ if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
struct svc_xprt *newxpt;
newxpt = xprt->xpt_ops->xpo_accept(xprt);
if (newxpt) {
@@ -747,28 +738,23 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
spin_unlock_bh(&serv->sv_lock);
svc_xprt_received(newxpt);
}
- svc_xprt_received(xprt);
- } else {
+ } else if (xprt->xpt_ops->xpo_has_wspace(xprt)) {
dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
rqstp, pool->sp_id, xprt,
atomic_read(&xprt->xpt_ref.refcount));
rqstp->rq_deferred = svc_deferred_dequeue(xprt);
- if (rqstp->rq_deferred) {
- svc_xprt_received(xprt);
+ if (rqstp->rq_deferred)
len = svc_deferred_recv(rqstp);
- } else {
+ else
len = xprt->xpt_ops->xpo_recvfrom(rqstp);
- svc_xprt_received(xprt);
- }
dprintk("svc: got len=%d\n", len);
}
+ svc_xprt_received(xprt);
/* No data, incomplete (TCP) read, or accept() */
- if (len == 0 || len == -EAGAIN) {
- rqstp->rq_res.len = 0;
- svc_xprt_release(rqstp);
- return -EAGAIN;
- }
+ if (len == 0 || len == -EAGAIN)
+ goto out;
+
clear_bit(XPT_OLD, &xprt->xpt_flags);
rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp));
@@ -777,6 +763,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
if (serv->sv_stats)
serv->sv_stats->netcnt++;
return len;
+out:
+ rqstp->rq_res.len = 0;
+ svc_xprt_release(rqstp);
+ return -EAGAIN;
}
EXPORT_SYMBOL_GPL(svc_recv);
@@ -935,7 +925,12 @@ void svc_close_xprt(struct svc_xprt *xprt)
if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
/* someone else will have to effect the close */
return;
-
+ /*
+ * We expect svc_close_xprt() to work even when no threads are
+ * running (e.g., while configuring the server before starting
+ * any threads), so if the transport isn't busy, we delete
+ * it ourself:
+ */
svc_delete_xprt(xprt);
}
EXPORT_SYMBOL_GPL(svc_close_xprt);
@@ -945,16 +940,16 @@ void svc_close_all(struct list_head *xprt_list)
struct svc_xprt *xprt;
struct svc_xprt *tmp;
+ /*
+ * The server is shutting down, and no more threads are running.
+ * svc_xprt_enqueue() might still be running, but at worst it
+ * will re-add the xprt to sp_sockets, which will soon get
+ * freed. So we don't bother with any more locking, and don't
+ * leave the close to the (nonexistent) server threads:
+ */
list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
set_bit(XPT_CLOSE, &xprt->xpt_flags);
- if (test_bit(XPT_BUSY, &xprt->xpt_flags)) {
- /* Waiting to be processed, but no threads left,
- * So just remove it from the waiting list
- */
- list_del_init(&xprt->xpt_ready);
- clear_bit(XPT_BUSY, &xprt->xpt_flags);
- }
- svc_close_xprt(xprt);
+ svc_delete_xprt(xprt);
}
}
@@ -1028,6 +1023,7 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
}
svc_xprt_get(rqstp->rq_xprt);
dr->xprt = rqstp->rq_xprt;
+ rqstp->rq_dropme = true;
dr->handle.revisit = svc_revisit;
return &dr->handle;
@@ -1065,14 +1061,13 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags))
return NULL;
spin_lock(&xprt->xpt_lock);
- clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
if (!list_empty(&xprt->xpt_deferred)) {
dr = list_entry(xprt->xpt_deferred.next,
struct svc_deferred_req,
handle.recent);
list_del_init(&dr->handle.recent);
- set_bit(XPT_DEFERRED, &xprt->xpt_flags);
- }
+ } else
+ clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
spin_unlock(&xprt->xpt_lock);
return dr;
}
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index 4e9393c24687..7963569fc04f 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -118,7 +118,6 @@ EXPORT_SYMBOL_GPL(svc_auth_unregister);
#define DN_HASHBITS 6
#define DN_HASHMAX (1<<DN_HASHBITS)
-#define DN_HASHMASK (DN_HASHMAX-1)
static struct hlist_head auth_domain_table[DN_HASHMAX];
static spinlock_t auth_domain_lock =
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 560677d187f1..30916b06c12b 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -30,7 +30,9 @@
struct unix_domain {
struct auth_domain h;
+#ifdef CONFIG_NFSD_DEPRECATED
int addr_changes;
+#endif /* CONFIG_NFSD_DEPRECATED */
/* other stuff later */
};
@@ -64,7 +66,9 @@ struct auth_domain *unix_domain_find(char *name)
return NULL;
}
new->h.flavour = &svcauth_unix;
+#ifdef CONFIG_NFSD_DEPRECATED
new->addr_changes = 0;
+#endif /* CONFIG_NFSD_DEPRECATED */
rv = auth_domain_lookup(name, &new->h);
}
}
@@ -85,14 +89,15 @@ static void svcauth_unix_domain_release(struct auth_domain *dom)
*/
#define IP_HASHBITS 8
#define IP_HASHMAX (1<<IP_HASHBITS)
-#define IP_HASHMASK (IP_HASHMAX-1)
struct ip_map {
struct cache_head h;
char m_class[8]; /* e.g. "nfsd" */
struct in6_addr m_addr;
struct unix_domain *m_client;
+#ifdef CONFIG_NFSD_DEPRECATED
int m_add_change;
+#endif /* CONFIG_NFSD_DEPRECATED */
};
static void ip_map_put(struct kref *kref)
@@ -146,7 +151,9 @@ static void update(struct cache_head *cnew, struct cache_head *citem)
kref_get(&item->m_client->h.ref);
new->m_client = item->m_client;
+#ifdef CONFIG_NFSD_DEPRECATED
new->m_add_change = item->m_add_change;
+#endif /* CONFIG_NFSD_DEPRECATED */
}
static struct cache_head *ip_map_alloc(void)
{
@@ -331,6 +338,7 @@ static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm,
ip.h.flags = 0;
if (!udom)
set_bit(CACHE_NEGATIVE, &ip.h.flags);
+#ifdef CONFIG_NFSD_DEPRECATED
else {
ip.m_add_change = udom->addr_changes;
/* if this is from the legacy set_client system call,
@@ -339,6 +347,7 @@ static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm,
if (expiry == NEVER)
ip.m_add_change++;
}
+#endif /* CONFIG_NFSD_DEPRECATED */
ip.h.expiry_time = expiry;
ch = sunrpc_cache_update(cd, &ip.h, &ipm->h,
hash_str(ipm->m_class, IP_HASHBITS) ^
@@ -358,6 +367,7 @@ static inline int ip_map_update(struct net *net, struct ip_map *ipm,
return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry);
}
+#ifdef CONFIG_NFSD_DEPRECATED
int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom)
{
struct unix_domain *udom;
@@ -402,8 +412,7 @@ struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr)
return NULL;
if ((ipm->m_client->addr_changes - ipm->m_add_change) >0) {
- if (test_and_set_bit(CACHE_NEGATIVE, &ipm->h.flags) == 0)
- auth_domain_put(&ipm->m_client->h);
+ sunrpc_invalidate(&ipm->h, sn->ip_map_cache);
rv = NULL;
} else {
rv = &ipm->m_client->h;
@@ -413,6 +422,7 @@ struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr)
return rv;
}
EXPORT_SYMBOL_GPL(auth_unix_lookup);
+#endif /* CONFIG_NFSD_DEPRECATED */
void svcauth_unix_purge(void)
{
@@ -497,7 +507,6 @@ svcauth_unix_info_release(struct svc_xprt *xpt)
*/
#define GID_HASHBITS 8
#define GID_HASHMAX (1<<GID_HASHBITS)
-#define GID_HASHMASK (GID_HASHMAX - 1)
struct unix_gid {
struct cache_head h;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 07919e16be3e..d802e941d365 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -66,6 +66,13 @@ static void svc_sock_free(struct svc_xprt *);
static struct svc_xprt *svc_create_socket(struct svc_serv *, int,
struct net *, struct sockaddr *,
int, int);
+#if defined(CONFIG_NFS_V4_1)
+static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int,
+ struct net *, struct sockaddr *,
+ int, int);
+static void svc_bc_sock_free(struct svc_xprt *xprt);
+#endif /* CONFIG_NFS_V4_1 */
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key svc_key[2];
static struct lock_class_key svc_slock_key[2];
@@ -324,19 +331,21 @@ int svc_sock_names(struct svc_serv *serv, char *buf, const size_t buflen,
len = onelen;
break;
}
- if (toclose && strcmp(toclose, buf + len) == 0)
+ if (toclose && strcmp(toclose, buf + len) == 0) {
closesk = svsk;
- else
+ svc_xprt_get(&closesk->sk_xprt);
+ } else
len += onelen;
}
spin_unlock_bh(&serv->sv_lock);
- if (closesk)
+ if (closesk) {
/* Should unregister with portmap, but you cannot
* unregister just one protocol...
*/
svc_close_xprt(&closesk->sk_xprt);
- else if (toclose)
+ svc_xprt_put(&closesk->sk_xprt);
+ } else if (toclose)
return -ENOENT;
return len;
}
@@ -985,15 +994,17 @@ static int svc_process_calldir(struct svc_sock *svsk, struct svc_rqst *rqstp,
vec[0] = rqstp->rq_arg.head[0];
} else {
/* REPLY */
- if (svsk->sk_bc_xprt)
- req = xprt_lookup_rqst(svsk->sk_bc_xprt, xid);
+ struct rpc_xprt *bc_xprt = svsk->sk_xprt.xpt_bc_xprt;
+
+ if (bc_xprt)
+ req = xprt_lookup_rqst(bc_xprt, xid);
if (!req) {
printk(KERN_NOTICE
"%s: Got unrecognized reply: "
- "calldir 0x%x sk_bc_xprt %p xid %08x\n",
+ "calldir 0x%x xpt_bc_xprt %p xid %08x\n",
__func__, ntohl(calldir),
- svsk->sk_bc_xprt, xid);
+ bc_xprt, xid);
vec[0] = rqstp->rq_arg.head[0];
goto out;
}
@@ -1184,6 +1195,57 @@ static struct svc_xprt *svc_tcp_create(struct svc_serv *serv,
return svc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags);
}
+#if defined(CONFIG_NFS_V4_1)
+static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int,
+ struct net *, struct sockaddr *,
+ int, int);
+static void svc_bc_sock_free(struct svc_xprt *xprt);
+
+static struct svc_xprt *svc_bc_tcp_create(struct svc_serv *serv,
+ struct net *net,
+ struct sockaddr *sa, int salen,
+ int flags)
+{
+ return svc_bc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags);
+}
+
+static void svc_bc_tcp_sock_detach(struct svc_xprt *xprt)
+{
+}
+
+static struct svc_xprt_ops svc_tcp_bc_ops = {
+ .xpo_create = svc_bc_tcp_create,
+ .xpo_detach = svc_bc_tcp_sock_detach,
+ .xpo_free = svc_bc_sock_free,
+ .xpo_prep_reply_hdr = svc_tcp_prep_reply_hdr,
+};
+
+static struct svc_xprt_class svc_tcp_bc_class = {
+ .xcl_name = "tcp-bc",
+ .xcl_owner = THIS_MODULE,
+ .xcl_ops = &svc_tcp_bc_ops,
+ .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
+};
+
+static void svc_init_bc_xprt_sock(void)
+{
+ svc_reg_xprt_class(&svc_tcp_bc_class);
+}
+
+static void svc_cleanup_bc_xprt_sock(void)
+{
+ svc_unreg_xprt_class(&svc_tcp_bc_class);
+}
+#else /* CONFIG_NFS_V4_1 */
+static void svc_init_bc_xprt_sock(void)
+{
+}
+
+static void svc_cleanup_bc_xprt_sock(void)
+{
+}
+#endif /* CONFIG_NFS_V4_1 */
+
static struct svc_xprt_ops svc_tcp_ops = {
.xpo_create = svc_tcp_create,
.xpo_recvfrom = svc_tcp_recvfrom,
@@ -1207,12 +1269,14 @@ void svc_init_xprt_sock(void)
{
svc_reg_xprt_class(&svc_tcp_class);
svc_reg_xprt_class(&svc_udp_class);
+ svc_init_bc_xprt_sock();
}
void svc_cleanup_xprt_sock(void)
{
svc_unreg_xprt_class(&svc_tcp_class);
svc_unreg_xprt_class(&svc_udp_class);
+ svc_cleanup_bc_xprt_sock();
}
static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
@@ -1509,41 +1573,43 @@ static void svc_sock_free(struct svc_xprt *xprt)
kfree(svsk);
}
+#if defined(CONFIG_NFS_V4_1)
/*
- * Create a svc_xprt.
- *
- * For internal use only (e.g. nfsv4.1 backchannel).
- * Callers should typically use the xpo_create() method.
+ * Create a back channel svc_xprt which shares the fore channel socket.
*/
-struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot)
+static struct svc_xprt *svc_bc_create_socket(struct svc_serv *serv,
+ int protocol,
+ struct net *net,
+ struct sockaddr *sin, int len,
+ int flags)
{
struct svc_sock *svsk;
- struct svc_xprt *xprt = NULL;
+ struct svc_xprt *xprt;
+
+ if (protocol != IPPROTO_TCP) {
+ printk(KERN_WARNING "svc: only TCP sockets"
+ " supported on shared back channel\n");
+ return ERR_PTR(-EINVAL);
+ }
- dprintk("svc: %s\n", __func__);
svsk = kzalloc(sizeof(*svsk), GFP_KERNEL);
if (!svsk)
- goto out;
+ return ERR_PTR(-ENOMEM);
xprt = &svsk->sk_xprt;
- if (prot == IPPROTO_TCP)
- svc_xprt_init(&svc_tcp_class, xprt, serv);
- else if (prot == IPPROTO_UDP)
- svc_xprt_init(&svc_udp_class, xprt, serv);
- else
- BUG();
-out:
- dprintk("svc: %s return %p\n", __func__, xprt);
+ svc_xprt_init(&svc_tcp_bc_class, xprt, serv);
+
+ serv->sv_bc_xprt = xprt;
+
return xprt;
}
-EXPORT_SYMBOL_GPL(svc_sock_create);
/*
- * Destroy a svc_sock.
+ * Free a back channel svc_sock.
*/
-void svc_sock_destroy(struct svc_xprt *xprt)
+static void svc_bc_sock_free(struct svc_xprt *xprt)
{
if (xprt)
kfree(container_of(xprt, struct svc_sock, sk_xprt));
}
-EXPORT_SYMBOL_GPL(svc_sock_destroy);
+#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index cd9e841e7492..679cd674b81d 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -552,6 +552,74 @@ void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int b
}
EXPORT_SYMBOL_GPL(xdr_write_pages);
+static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
+ __be32 *p, unsigned int len)
+{
+ if (len > iov->iov_len)
+ len = iov->iov_len;
+ if (p == NULL)
+ p = (__be32*)iov->iov_base;
+ xdr->p = p;
+ xdr->end = (__be32*)(iov->iov_base + len);
+ xdr->iov = iov;
+ xdr->page_ptr = NULL;
+}
+
+static int xdr_set_page_base(struct xdr_stream *xdr,
+ unsigned int base, unsigned int len)
+{
+ unsigned int pgnr;
+ unsigned int maxlen;
+ unsigned int pgoff;
+ unsigned int pgend;
+ void *kaddr;
+
+ maxlen = xdr->buf->page_len;
+ if (base >= maxlen)
+ return -EINVAL;
+ maxlen -= base;
+ if (len > maxlen)
+ len = maxlen;
+
+ base += xdr->buf->page_base;
+
+ pgnr = base >> PAGE_SHIFT;
+ xdr->page_ptr = &xdr->buf->pages[pgnr];
+ kaddr = page_address(*xdr->page_ptr);
+
+ pgoff = base & ~PAGE_MASK;
+ xdr->p = (__be32*)(kaddr + pgoff);
+
+ pgend = pgoff + len;
+ if (pgend > PAGE_SIZE)
+ pgend = PAGE_SIZE;
+ xdr->end = (__be32*)(kaddr + pgend);
+ xdr->iov = NULL;
+ return 0;
+}
+
+static void xdr_set_next_page(struct xdr_stream *xdr)
+{
+ unsigned int newbase;
+
+ newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
+ newbase -= xdr->buf->page_base;
+
+ if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
+ xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
+}
+
+static bool xdr_set_next_buffer(struct xdr_stream *xdr)
+{
+ if (xdr->page_ptr != NULL)
+ xdr_set_next_page(xdr);
+ else if (xdr->iov == xdr->buf->head) {
+ if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
+ xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
+ }
+ return xdr->p != xdr->end;
+}
+
/**
* xdr_init_decode - Initialize an xdr_stream for decoding data.
* @xdr: pointer to xdr_stream struct
@@ -560,41 +628,67 @@ EXPORT_SYMBOL_GPL(xdr_write_pages);
*/
void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
{
- struct kvec *iov = buf->head;
- unsigned int len = iov->iov_len;
-
- if (len > buf->len)
- len = buf->len;
xdr->buf = buf;
- xdr->iov = iov;
- xdr->p = p;
- xdr->end = (__be32 *)((char *)iov->iov_base + len);
+ xdr->scratch.iov_base = NULL;
+ xdr->scratch.iov_len = 0;
+ if (buf->head[0].iov_len != 0)
+ xdr_set_iov(xdr, buf->head, p, buf->len);
+ else if (buf->page_len != 0)
+ xdr_set_page_base(xdr, 0, buf->len);
}
EXPORT_SYMBOL_GPL(xdr_init_decode);
-/**
- * xdr_inline_peek - Allow read-ahead in the XDR data stream
- * @xdr: pointer to xdr_stream struct
- * @nbytes: number of bytes of data to decode
- *
- * Check if the input buffer is long enough to enable us to decode
- * 'nbytes' more bytes of data starting at the current position.
- * If so return the current pointer without updating the current
- * pointer position.
- */
-__be32 * xdr_inline_peek(struct xdr_stream *xdr, size_t nbytes)
+static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
{
__be32 *p = xdr->p;
__be32 *q = p + XDR_QUADLEN(nbytes);
if (unlikely(q > xdr->end || q < p))
return NULL;
+ xdr->p = q;
return p;
}
-EXPORT_SYMBOL_GPL(xdr_inline_peek);
/**
- * xdr_inline_decode - Retrieve non-page XDR data to decode
+ * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
+ * @xdr: pointer to xdr_stream struct
+ * @buf: pointer to an empty buffer
+ * @buflen: size of 'buf'
+ *
+ * The scratch buffer is used when decoding from an array of pages.
+ * If an xdr_inline_decode() call spans across page boundaries, then
+ * we copy the data into the scratch buffer in order to allow linear
+ * access.
+ */
+void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
+{
+ xdr->scratch.iov_base = buf;
+ xdr->scratch.iov_len = buflen;
+}
+EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
+
+static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
+{
+ __be32 *p;
+ void *cpdest = xdr->scratch.iov_base;
+ size_t cplen = (char *)xdr->end - (char *)xdr->p;
+
+ if (nbytes > xdr->scratch.iov_len)
+ return NULL;
+ memcpy(cpdest, xdr->p, cplen);
+ cpdest += cplen;
+ nbytes -= cplen;
+ if (!xdr_set_next_buffer(xdr))
+ return NULL;
+ p = __xdr_inline_decode(xdr, nbytes);
+ if (p == NULL)
+ return NULL;
+ memcpy(cpdest, p, nbytes);
+ return xdr->scratch.iov_base;
+}
+
+/**
+ * xdr_inline_decode - Retrieve XDR data to decode
* @xdr: pointer to xdr_stream struct
* @nbytes: number of bytes of data to decode
*
@@ -605,13 +699,16 @@ EXPORT_SYMBOL_GPL(xdr_inline_peek);
*/
__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
{
- __be32 *p = xdr->p;
- __be32 *q = p + XDR_QUADLEN(nbytes);
+ __be32 *p;
- if (unlikely(q > xdr->end || q < p))
+ if (nbytes == 0)
+ return xdr->p;
+ if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
return NULL;
- xdr->p = q;
- return p;
+ p = __xdr_inline_decode(xdr, nbytes);
+ if (p != NULL)
+ return p;
+ return xdr_copy_to_scratch(xdr, nbytes);
}
EXPORT_SYMBOL_GPL(xdr_inline_decode);
@@ -671,16 +768,12 @@ EXPORT_SYMBOL_GPL(xdr_read_pages);
*/
void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
{
- char * kaddr = page_address(xdr->buf->pages[0]);
xdr_read_pages(xdr, len);
/*
* Position current pointer at beginning of tail, and
* set remaining message length.
*/
- if (len > PAGE_CACHE_SIZE - xdr->buf->page_base)
- len = PAGE_CACHE_SIZE - xdr->buf->page_base;
- xdr->p = (__be32 *)(kaddr + xdr->buf->page_base);
- xdr->end = (__be32 *)((char *)xdr->p + len);
+ xdr_set_page_base(xdr, 0, len);
}
EXPORT_SYMBOL_GPL(xdr_enter_page);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 4c8f18aff7c3..856274d7e85c 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -965,6 +965,7 @@ struct rpc_xprt *xprt_alloc(struct net *net, int size, int max_req)
xprt = kzalloc(size, GFP_KERNEL);
if (xprt == NULL)
goto out;
+ kref_init(&xprt->kref);
xprt->max_reqs = max_req;
xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL);
@@ -1101,8 +1102,10 @@ found:
-PTR_ERR(xprt));
return xprt;
}
+ if (test_and_set_bit(XPRT_INITIALIZED, &xprt->state))
+ /* ->setup returned a pre-initialized xprt: */
+ return xprt;
- kref_init(&xprt->kref);
spin_lock_init(&xprt->transport_lock);
spin_lock_init(&xprt->reserve_lock);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 96549df836ee..c431f5a57960 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2359,6 +2359,15 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
struct svc_sock *bc_sock;
struct rpc_xprt *ret;
+ if (args->bc_xprt->xpt_bc_xprt) {
+ /*
+ * This server connection already has a backchannel
+ * export; we can't create a new one, as we wouldn't be
+ * able to match replies based on xid any more. So,
+ * reuse the already-existing one:
+ */
+ return args->bc_xprt->xpt_bc_xprt;
+ }
xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
if (IS_ERR(xprt))
return xprt;
@@ -2375,16 +2384,6 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
xprt->reestablish_timeout = 0;
xprt->idle_timeout = 0;
- /*
- * The backchannel uses the same socket connection as the
- * forechannel
- */
- xprt->bc_xprt = args->bc_xprt;
- bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
- bc_sock->sk_bc_xprt = xprt;
- transport->sock = bc_sock->sk_sock;
- transport->inet = bc_sock->sk_sk;
-
xprt->ops = &bc_tcp_ops;
switch (addr->sa_family) {
@@ -2407,6 +2406,20 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
xprt->address_strings[RPC_DISPLAY_PROTO]);
/*
+ * Once we've associated a backchannel xprt with a connection,
+ * we want to keep it around as long as long as the connection
+ * lasts, in case we need to start using it for a backchannel
+ * again; this reference won't be dropped until bc_xprt is
+ * destroyed.
+ */
+ xprt_get(xprt);
+ args->bc_xprt->xpt_bc_xprt = xprt;
+ xprt->bc_xprt = args->bc_xprt;
+ bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
+ transport->sock = bc_sock->sk_sock;
+ transport->inet = bc_sock->sk_sk;
+
+ /*
* Since we don't want connections for the backchannel, we set
* the xprt status to connected
*/
@@ -2415,6 +2428,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
if (try_module_get(THIS_MODULE))
return xprt;
+ xprt_put(xprt);
ret = ERR_PTR(-EINVAL);
out_err:
xprt_free(xprt);
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index d0ee29063e5d..1f1ef70f34f2 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -95,7 +95,7 @@ config CFG80211_DEBUGFS
If unsure, say N.
config CFG80211_INTERNAL_REGDB
- bool "use statically compiled regulatory rules database" if EMBEDDED
+ bool "use statically compiled regulatory rules database" if EXPERT
default n
depends on CFG80211
---help---
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index 55187c8f6420..406207515b5e 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -27,9 +27,19 @@
#include <net/sock.h>
#include <net/x25.h>
-/*
- * Parse a set of facilities into the facilities structures. Unrecognised
- * facilities are written to the debug log file.
+/**
+ * x25_parse_facilities - Parse facilities from skb into the facilities structs
+ *
+ * @skb: sk_buff to parse
+ * @facilities: Regular facilites, updated as facilities are found
+ * @dte_facs: ITU DTE facilities, updated as DTE facilities are found
+ * @vc_fac_mask: mask is updated with all facilities found
+ *
+ * Return codes:
+ * -1 - Parsing error, caller should drop call and clean up
+ * 0 - Parse OK, this skb has no facilities
+ * >0 - Parse OK, returns the length of the facilities header
+ *
*/
int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask)
@@ -62,7 +72,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
switch (*p & X25_FAC_CLASS_MASK) {
case X25_FAC_CLASS_A:
if (len < 2)
- return 0;
+ return -1;
switch (*p) {
case X25_FAC_REVERSE:
if((p[1] & 0x81) == 0x81) {
@@ -107,7 +117,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
break;
case X25_FAC_CLASS_B:
if (len < 3)
- return 0;
+ return -1;
switch (*p) {
case X25_FAC_PACKET_SIZE:
facilities->pacsize_in = p[1];
@@ -130,7 +140,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
break;
case X25_FAC_CLASS_C:
if (len < 4)
- return 0;
+ return -1;
printk(KERN_DEBUG "X.25: unknown facility %02X, "
"values %02X, %02X, %02X\n",
p[0], p[1], p[2], p[3]);
@@ -139,18 +149,18 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
break;
case X25_FAC_CLASS_D:
if (len < p[1] + 2)
- return 0;
+ return -1;
switch (*p) {
case X25_FAC_CALLING_AE:
if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
- return 0;
+ return -1;
dte_facs->calling_len = p[2];
memcpy(dte_facs->calling_ae, &p[3], p[1] - 1);
*vc_fac_mask |= X25_MASK_CALLING_AE;
break;
case X25_FAC_CALLED_AE:
if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
- return 0;
+ return -1;
dte_facs->called_len = p[2];
memcpy(dte_facs->called_ae, &p[3], p[1] - 1);
*vc_fac_mask |= X25_MASK_CALLED_AE;
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index f729f022be69..15de65f04719 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -91,10 +91,10 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
{
struct x25_address source_addr, dest_addr;
int len;
+ struct x25_sock *x25 = x25_sk(sk);
switch (frametype) {
case X25_CALL_ACCEPTED: {
- struct x25_sock *x25 = x25_sk(sk);
x25_stop_timer(sk);
x25->condition = 0x00;
@@ -113,14 +113,16 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
&dest_addr);
if (len > 0)
skb_pull(skb, len);
+ else if (len < 0)
+ goto out_clear;
len = x25_parse_facilities(skb, &x25->facilities,
&x25->dte_facilities,
&x25->vc_facil_mask);
if (len > 0)
skb_pull(skb, len);
- else
- return -1;
+ else if (len < 0)
+ goto out_clear;
/*
* Copy any Call User Data.
*/
@@ -144,6 +146,12 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
}
return 0;
+
+out_clear:
+ x25_write_internal(sk, X25_CLEAR_REQUEST);
+ x25->state = X25_STATE_2;
+ x25_start_t23timer(sk);
+ return 0;
}
/*
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
index 4cbc942f762a..21306928d47f 100644
--- a/net/x25/x25_link.c
+++ b/net/x25/x25_link.c
@@ -396,9 +396,12 @@ void __exit x25_link_free(void)
write_lock_bh(&x25_neigh_list_lock);
list_for_each_safe(entry, tmp, &x25_neigh_list) {
+ struct net_device *dev;
+
nb = list_entry(entry, struct x25_neigh, node);
+ dev = nb->dev;
__x25_remove_neigh(nb);
- dev_put(nb->dev);
+ dev_put(dev);
}
write_unlock_bh(&x25_neigh_list_lock);
}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 8b3ef404c794..6459588befc3 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1340,10 +1340,13 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
default:
BUG();
}
- xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS);
+ xdst = dst_alloc(dst_ops);
xfrm_policy_put_afinfo(afinfo);
- xdst->flo.ops = &xfrm_bundle_fc_ops;
+ if (likely(xdst))
+ xdst->flo.ops = &xfrm_bundle_fc_ops;
+ else
+ xdst = ERR_PTR(-ENOBUFS);
return xdst;
}