diff options
Diffstat (limited to 'net/bluetooth')
-rw-r--r-- | net/bluetooth/6lowpan.c | 284 | ||||
-rw-r--r-- | net/bluetooth/hci_conn.c | 14 | ||||
-rw-r--r-- | net/bluetooth/hci_core.c | 16 | ||||
-rw-r--r-- | net/bluetooth/hci_event.c | 50 | ||||
-rw-r--r-- | net/bluetooth/hci_sock.c | 2 | ||||
-rw-r--r-- | net/bluetooth/l2cap_core.c | 7 | ||||
-rw-r--r-- | net/bluetooth/mgmt.c | 83 | ||||
-rw-r--r-- | net/bluetooth/rfcomm/core.c | 6 | ||||
-rw-r--r-- | net/bluetooth/smp.c | 34 |
9 files changed, 300 insertions, 196 deletions
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index c2e0d14433df..eef298d17452 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -53,7 +53,7 @@ struct skb_cb { * The list contains struct lowpan_dev elements. */ static LIST_HEAD(bt_6lowpan_devices); -static DEFINE_RWLOCK(devices_lock); +static DEFINE_SPINLOCK(devices_lock); /* If psm is set to 0 (default value), then 6lowpan is disabled. * Other values are used to indicate a Protocol Service Multiplexer @@ -67,6 +67,7 @@ static struct l2cap_chan *listen_chan; struct lowpan_peer { struct list_head list; + struct rcu_head rcu; struct l2cap_chan *chan; /* peer addresses in various formats */ @@ -86,6 +87,13 @@ struct lowpan_dev { struct delayed_work notify_peers; }; +static inline void peer_free(struct rcu_head *head) +{ + struct lowpan_peer *e = container_of(head, struct lowpan_peer, rcu); + + kfree(e); +} + static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev) { return netdev_priv(netdev); @@ -93,13 +101,14 @@ static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev) static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer) { - list_add(&peer->list, &dev->peers); + list_add_rcu(&peer->list, &dev->peers); atomic_inc(&dev->peer_count); } static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer) { - list_del(&peer->list); + list_del_rcu(&peer->list); + call_rcu(&peer->rcu, peer_free); module_put(THIS_MODULE); @@ -114,31 +123,37 @@ static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer) static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev, bdaddr_t *ba, __u8 type) { - struct lowpan_peer *peer, *tmp; + struct lowpan_peer *peer; BT_DBG("peers %d addr %pMR type %d", atomic_read(&dev->peer_count), ba, type); - list_for_each_entry_safe(peer, tmp, &dev->peers, list) { + rcu_read_lock(); + + list_for_each_entry_rcu(peer, &dev->peers, list) { BT_DBG("dst addr %pMR dst type %d", &peer->chan->dst, peer->chan->dst_type); if (bacmp(&peer->chan->dst, ba)) continue; - if (type == peer->chan->dst_type) + if (type == peer->chan->dst_type) { + rcu_read_unlock(); return peer; + } } + rcu_read_unlock(); + return NULL; } -static inline struct lowpan_peer *peer_lookup_chan(struct lowpan_dev *dev, - struct l2cap_chan *chan) +static inline struct lowpan_peer *__peer_lookup_chan(struct lowpan_dev *dev, + struct l2cap_chan *chan) { - struct lowpan_peer *peer, *tmp; + struct lowpan_peer *peer; - list_for_each_entry_safe(peer, tmp, &dev->peers, list) { + list_for_each_entry_rcu(peer, &dev->peers, list) { if (peer->chan == chan) return peer; } @@ -146,12 +161,12 @@ static inline struct lowpan_peer *peer_lookup_chan(struct lowpan_dev *dev, return NULL; } -static inline struct lowpan_peer *peer_lookup_conn(struct lowpan_dev *dev, - struct l2cap_conn *conn) +static inline struct lowpan_peer *__peer_lookup_conn(struct lowpan_dev *dev, + struct l2cap_conn *conn) { - struct lowpan_peer *peer, *tmp; + struct lowpan_peer *peer; - list_for_each_entry_safe(peer, tmp, &dev->peers, list) { + list_for_each_entry_rcu(peer, &dev->peers, list) { if (peer->chan->conn == conn) return peer; } @@ -163,7 +178,7 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev, struct in6_addr *daddr, struct sk_buff *skb) { - struct lowpan_peer *peer, *tmp; + struct lowpan_peer *peer; struct in6_addr *nexthop; struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); int count = atomic_read(&dev->peer_count); @@ -174,9 +189,13 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev, * send the packet. If only one peer exists, then we can send the * packet right away. */ - if (count == 1) - return list_first_entry(&dev->peers, struct lowpan_peer, - list); + if (count == 1) { + rcu_read_lock(); + peer = list_first_or_null_rcu(&dev->peers, struct lowpan_peer, + list); + rcu_read_unlock(); + return peer; + } if (!rt) { nexthop = &lowpan_cb(skb)->gw; @@ -195,53 +214,57 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev, BT_DBG("gw %pI6c", nexthop); - list_for_each_entry_safe(peer, tmp, &dev->peers, list) { + rcu_read_lock(); + + list_for_each_entry_rcu(peer, &dev->peers, list) { BT_DBG("dst addr %pMR dst type %d ip %pI6c", &peer->chan->dst, peer->chan->dst_type, &peer->peer_addr); - if (!ipv6_addr_cmp(&peer->peer_addr, nexthop)) + if (!ipv6_addr_cmp(&peer->peer_addr, nexthop)) { + rcu_read_unlock(); return peer; + } } + rcu_read_unlock(); + return NULL; } static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn) { - struct lowpan_dev *entry, *tmp; + struct lowpan_dev *entry; struct lowpan_peer *peer = NULL; - unsigned long flags; - read_lock_irqsave(&devices_lock, flags); + rcu_read_lock(); - list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) { - peer = peer_lookup_conn(entry, conn); + list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { + peer = __peer_lookup_conn(entry, conn); if (peer) break; } - read_unlock_irqrestore(&devices_lock, flags); + rcu_read_unlock(); return peer; } static struct lowpan_dev *lookup_dev(struct l2cap_conn *conn) { - struct lowpan_dev *entry, *tmp; + struct lowpan_dev *entry; struct lowpan_dev *dev = NULL; - unsigned long flags; - read_lock_irqsave(&devices_lock, flags); + rcu_read_lock(); - list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) { + list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { if (conn->hcon->hdev == entry->hdev) { dev = entry; break; } } - read_unlock_irqrestore(&devices_lock, flags); + rcu_read_unlock(); return dev; } @@ -249,35 +272,27 @@ static struct lowpan_dev *lookup_dev(struct l2cap_conn *conn) static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev) { struct sk_buff *skb_cp; - int ret; skb_cp = skb_copy(skb, GFP_ATOMIC); if (!skb_cp) - return -ENOMEM; - - ret = netif_rx(skb_cp); - if (ret < 0) { - BT_DBG("receive skb %d", ret); return NET_RX_DROP; - } - return ret; + return netif_rx(skb_cp); } -static int process_data(struct sk_buff *skb, struct net_device *netdev, - struct l2cap_chan *chan) +static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev, + struct l2cap_chan *chan) { const u8 *saddr, *daddr; u8 iphc0, iphc1; struct lowpan_dev *dev; struct lowpan_peer *peer; - unsigned long flags; dev = lowpan_dev(netdev); - read_lock_irqsave(&devices_lock, flags); - peer = peer_lookup_chan(dev, chan); - read_unlock_irqrestore(&devices_lock, flags); + rcu_read_lock(); + peer = __peer_lookup_chan(dev, chan); + rcu_read_unlock(); if (!peer) goto drop; @@ -294,10 +309,11 @@ static int process_data(struct sk_buff *skb, struct net_device *netdev, if (lowpan_fetch_skb_u8(skb, &iphc1)) goto drop; - return lowpan_process_data(skb, netdev, - saddr, IEEE802154_ADDR_LONG, EUI64_ADDR_LEN, - daddr, IEEE802154_ADDR_LONG, EUI64_ADDR_LEN, - iphc0, iphc1, give_skb_to_upper); + return lowpan_header_decompress(skb, netdev, + saddr, IEEE802154_ADDR_LONG, + EUI64_ADDR_LEN, daddr, + IEEE802154_ADDR_LONG, EUI64_ADDR_LEN, + iphc0, iphc1); drop: kfree_skb(skb); @@ -316,6 +332,10 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev, if (dev->type != ARPHRD_6LOWPAN) goto drop; + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) + goto drop; + /* check that it's our buffer */ if (skb->data[0] == LOWPAN_DISPATCH_IPV6) { /* Copy the packet so that the IPv6 header is @@ -340,8 +360,8 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev, dev->stats.rx_bytes += skb->len; dev->stats.rx_packets++; - kfree_skb(local_skb); - kfree_skb(skb); + consume_skb(local_skb); + consume_skb(skb); } else { switch (skb->data[0] & 0xe0) { case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */ @@ -349,14 +369,25 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev, if (!local_skb) goto drop; - ret = process_data(local_skb, dev, chan); - if (ret != NET_RX_SUCCESS) + ret = iphc_decompress(local_skb, dev, chan); + if (ret < 0) goto drop; + local_skb->protocol = htons(ETH_P_IPV6); + local_skb->pkt_type = PACKET_HOST; + local_skb->dev = dev; + + if (give_skb_to_upper(local_skb, dev) + != NET_RX_SUCCESS) { + kfree_skb(local_skb); + goto drop; + } + dev->stats.rx_bytes += skb->len; dev->stats.rx_packets++; - kfree_skb(skb); + consume_skb(local_skb); + consume_skb(skb); break; default: break; @@ -443,7 +474,6 @@ static int setup_header(struct sk_buff *skb, struct net_device *netdev, if (ipv6_addr_is_multicast(&ipv6_daddr)) { lowpan_cb(skb)->chan = NULL; } else { - unsigned long flags; u8 addr_type; /* Get destination BT device from skb. @@ -454,19 +484,14 @@ static int setup_header(struct sk_buff *skb, struct net_device *netdev, BT_DBG("dest addr %pMR type %d IP %pI6c", &addr, addr_type, &ipv6_daddr); - read_lock_irqsave(&devices_lock, flags); peer = peer_lookup_ba(dev, &addr, addr_type); - read_unlock_irqrestore(&devices_lock, flags); - if (!peer) { /* The packet might be sent to 6lowpan interface * because of routing (either via default route * or user set route) so get peer according to * the destination address. */ - read_lock_irqsave(&devices_lock, flags); peer = peer_lookup_dst(dev, &ipv6_daddr, skb); - read_unlock_irqrestore(&devices_lock, flags); if (!peer) { BT_DBG("no such peer %pMR found", &addr); return -ENOENT; @@ -549,14 +574,13 @@ static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb, static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev) { struct sk_buff *local_skb; - struct lowpan_dev *entry, *tmp; - unsigned long flags; + struct lowpan_dev *entry; int err = 0; - read_lock_irqsave(&devices_lock, flags); + rcu_read_lock(); - list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) { - struct lowpan_peer *pentry, *ptmp; + list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { + struct lowpan_peer *pentry; struct lowpan_dev *dev; if (entry->netdev != netdev) @@ -564,7 +588,7 @@ static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev) dev = lowpan_dev(entry->netdev); - list_for_each_entry_safe(pentry, ptmp, &dev->peers, list) { + list_for_each_entry_rcu(pentry, &dev->peers, list) { int ret; local_skb = skb_clone(skb, GFP_ATOMIC); @@ -581,7 +605,7 @@ static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev) } } - read_unlock_irqrestore(&devices_lock, flags); + rcu_read_unlock(); return err; } @@ -638,7 +662,26 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev) return err < 0 ? NET_XMIT_DROP : err; } +static struct lock_class_key bt_tx_busylock; +static struct lock_class_key bt_netdev_xmit_lock_key; + +static void bt_set_lockdep_class_one(struct net_device *dev, + struct netdev_queue *txq, + void *_unused) +{ + lockdep_set_class(&txq->_xmit_lock, &bt_netdev_xmit_lock_key); +} + +static int bt_dev_init(struct net_device *dev) +{ + netdev_for_each_tx_queue(dev, bt_set_lockdep_class_one, NULL); + dev->qdisc_tx_busylock = &bt_tx_busylock; + + return 0; +} + static const struct net_device_ops netdev_ops = { + .ndo_init = bt_dev_init, .ndo_start_xmit = bt_xmit, }; @@ -783,7 +826,6 @@ static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan, struct lowpan_dev *dev) { struct lowpan_peer *peer; - unsigned long flags; peer = kzalloc(sizeof(*peer), GFP_ATOMIC); if (!peer) @@ -806,10 +848,10 @@ static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan, */ set_ip_addr_bits(chan->dst_type, (u8 *)&peer->peer_addr.s6_addr + 8); - write_lock_irqsave(&devices_lock, flags); + spin_lock(&devices_lock); INIT_LIST_HEAD(&peer->list); peer_add(dev, peer); - write_unlock_irqrestore(&devices_lock, flags); + spin_unlock(&devices_lock); /* Notifying peers about us needs to be done without locks held */ INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers); @@ -822,7 +864,6 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev) { struct net_device *netdev; int err = 0; - unsigned long flags; netdev = alloc_netdev(sizeof(struct lowpan_dev), IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN, netdev_setup); @@ -852,10 +893,10 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev) (*dev)->hdev = chan->conn->hcon->hdev; INIT_LIST_HEAD(&(*dev)->peers); - write_lock_irqsave(&devices_lock, flags); + spin_lock(&devices_lock); INIT_LIST_HEAD(&(*dev)->list); - list_add(&(*dev)->list, &bt_6lowpan_devices); - write_unlock_irqrestore(&devices_lock, flags); + list_add_rcu(&(*dev)->list, &bt_6lowpan_devices); + spin_unlock(&devices_lock); return 0; @@ -909,11 +950,10 @@ static void delete_netdev(struct work_struct *work) static void chan_close_cb(struct l2cap_chan *chan) { - struct lowpan_dev *entry, *tmp; + struct lowpan_dev *entry; struct lowpan_dev *dev = NULL; struct lowpan_peer *peer; int err = -ENOENT; - unsigned long flags; bool last = false, removed = true; BT_DBG("chan %p conn %p", chan, chan->conn); @@ -928,11 +968,11 @@ static void chan_close_cb(struct l2cap_chan *chan) removed = false; } - write_lock_irqsave(&devices_lock, flags); + spin_lock(&devices_lock); - list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) { + list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { dev = lowpan_dev(entry->netdev); - peer = peer_lookup_chan(dev, chan); + peer = __peer_lookup_chan(dev, chan); if (peer) { last = peer_del(dev, peer); err = 0; @@ -943,13 +983,12 @@ static void chan_close_cb(struct l2cap_chan *chan) atomic_read(&chan->kref.refcount)); l2cap_chan_put(chan); - kfree(peer); break; } } if (!err && last && dev && !atomic_read(&dev->peer_count)) { - write_unlock_irqrestore(&devices_lock, flags); + spin_unlock(&devices_lock); cancel_delayed_work_sync(&dev->notify_peers); @@ -960,7 +999,7 @@ static void chan_close_cb(struct l2cap_chan *chan) schedule_work(&entry->delete_netdev); } } else { - write_unlock_irqrestore(&devices_lock, flags); + spin_unlock(&devices_lock); } return; @@ -1152,10 +1191,9 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type, static void disconnect_all_peers(void) { - struct lowpan_dev *entry, *tmp_dev; + struct lowpan_dev *entry; struct lowpan_peer *peer, *tmp_peer, *new_peer; struct list_head peers; - unsigned long flags; INIT_LIST_HEAD(&peers); @@ -1164,10 +1202,10 @@ static void disconnect_all_peers(void) * with the same list at the same time. */ - read_lock_irqsave(&devices_lock, flags); + rcu_read_lock(); - list_for_each_entry_safe(entry, tmp_dev, &bt_6lowpan_devices, list) { - list_for_each_entry_safe(peer, tmp_peer, &entry->peers, list) { + list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { + list_for_each_entry_rcu(peer, &entry->peers, list) { new_peer = kmalloc(sizeof(*new_peer), GFP_ATOMIC); if (!new_peer) break; @@ -1179,26 +1217,36 @@ static void disconnect_all_peers(void) } } - read_unlock_irqrestore(&devices_lock, flags); + rcu_read_unlock(); + spin_lock(&devices_lock); list_for_each_entry_safe(peer, tmp_peer, &peers, list) { l2cap_chan_close(peer->chan, ENOENT); - kfree(peer); + + list_del_rcu(&peer->list); + call_rcu(&peer->rcu, peer_free); + + module_put(THIS_MODULE); } + spin_unlock(&devices_lock); } -static int lowpan_psm_set(void *data, u64 val) -{ +struct set_psm { + struct work_struct work; u16 psm; +}; + +static void do_psm_set(struct work_struct *work) +{ + struct set_psm *set_psm = container_of(work, struct set_psm, work); - psm = val; - if (psm == 0 || psm_6lowpan != psm) + if (set_psm->psm == 0 || psm_6lowpan != set_psm->psm) /* Disconnect existing connections if 6lowpan is * disabled (psm = 0), or if psm changes. */ disconnect_all_peers(); - psm_6lowpan = psm; + psm_6lowpan = set_psm->psm; if (listen_chan) { l2cap_chan_close(listen_chan, 0); @@ -1207,6 +1255,22 @@ static int lowpan_psm_set(void *data, u64 val) listen_chan = bt_6lowpan_listen(); + kfree(set_psm); +} + +static int lowpan_psm_set(void *data, u64 val) +{ + struct set_psm *set_psm; + + set_psm = kzalloc(sizeof(*set_psm), GFP_KERNEL); + if (!set_psm) + return -ENOMEM; + + set_psm->psm = val; + INIT_WORK(&set_psm->work, do_psm_set); + + schedule_work(&set_psm->work); + return 0; } @@ -1288,19 +1352,18 @@ static ssize_t lowpan_control_write(struct file *fp, static int lowpan_control_show(struct seq_file *f, void *ptr) { - struct lowpan_dev *entry, *tmp_dev; - struct lowpan_peer *peer, *tmp_peer; - unsigned long flags; + struct lowpan_dev *entry; + struct lowpan_peer *peer; - read_lock_irqsave(&devices_lock, flags); + spin_lock(&devices_lock); - list_for_each_entry_safe(entry, tmp_dev, &bt_6lowpan_devices, list) { - list_for_each_entry_safe(peer, tmp_peer, &entry->peers, list) + list_for_each_entry(entry, &bt_6lowpan_devices, list) { + list_for_each_entry(peer, &entry->peers, list) seq_printf(f, "%pMR (type %u)\n", &peer->chan->dst, peer->chan->dst_type); } - read_unlock_irqrestore(&devices_lock, flags); + spin_unlock(&devices_lock); return 0; } @@ -1322,7 +1385,6 @@ static void disconnect_devices(void) { struct lowpan_dev *entry, *tmp, *new_dev; struct list_head devices; - unsigned long flags; INIT_LIST_HEAD(&devices); @@ -1331,9 +1393,9 @@ static void disconnect_devices(void) * devices list. */ - read_lock_irqsave(&devices_lock, flags); + rcu_read_lock(); - list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) { + list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { new_dev = kmalloc(sizeof(*new_dev), GFP_ATOMIC); if (!new_dev) break; @@ -1341,10 +1403,10 @@ static void disconnect_devices(void) new_dev->netdev = entry->netdev; INIT_LIST_HEAD(&new_dev->list); - list_add(&new_dev->list, &devices); + list_add_rcu(&new_dev->list, &devices); } - read_unlock_irqrestore(&devices_lock, flags); + rcu_read_unlock(); list_for_each_entry_safe(entry, tmp, &devices, list) { ifdown(entry->netdev); @@ -1359,17 +1421,15 @@ static int device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *netdev = netdev_notifier_info_to_dev(ptr); - struct lowpan_dev *entry, *tmp; - unsigned long flags; + struct lowpan_dev *entry; if (netdev->type != ARPHRD_6LOWPAN) return NOTIFY_DONE; switch (event) { case NETDEV_UNREGISTER: - write_lock_irqsave(&devices_lock, flags); - list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, - list) { + spin_lock(&devices_lock); + list_for_each_entry(entry, &bt_6lowpan_devices, list) { if (entry->netdev == netdev) { BT_DBG("Unregistered netdev %s %p", netdev->name, netdev); @@ -1378,7 +1438,7 @@ static int device_event(struct notifier_block *unused, break; } } - write_unlock_irqrestore(&devices_lock, flags); + spin_unlock(&devices_lock); break; } diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index b9517bd17190..96887ae8375b 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -141,10 +141,11 @@ int hci_disconnect(struct hci_conn *conn, __u8 reason) */ if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER) { struct hci_dev *hdev = conn->hdev; - struct hci_cp_read_clock_offset cp; + struct hci_cp_read_clock_offset clkoff_cp; - cp.handle = cpu_to_le16(conn->handle); - hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(cp), &cp); + clkoff_cp.handle = cpu_to_le16(conn->handle); + hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp), + &clkoff_cp); } conn->state = BT_DISCONN; @@ -415,7 +416,7 @@ static void le_conn_timeout(struct work_struct *work) * happen with broken hardware or if low duty cycle was used * (which doesn't have a timeout of its own). */ - if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) { + if (conn->role == HCI_ROLE_SLAVE) { u8 enable = 0x00; hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); @@ -517,7 +518,7 @@ int hci_conn_del(struct hci_conn *conn) /* Unacked frames */ hdev->acl_cnt += conn->sent; } else if (conn->type == LE_LINK) { - cancel_delayed_work_sync(&conn->le_conn_timeout); + cancel_delayed_work(&conn->le_conn_timeout); if (hdev->le_pkts) hdev->le_cnt += conn->sent; @@ -544,6 +545,9 @@ int hci_conn_del(struct hci_conn *conn) hci_conn_del_sysfs(conn); + if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags)) + hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type); + hci_dev_put(hdev); hci_conn_put(conn); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index cb05d7f16a34..91995f8ab0a0 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -4477,7 +4477,7 @@ int hci_req_run(struct hci_request *req, hci_req_complete_t complete) BT_DBG("length %u", skb_queue_len(&req->cmd_q)); - /* If an error occured during request building, remove all HCI + /* If an error occurred during request building, remove all HCI * commands queued on the HCI request queue. */ if (req->err) { @@ -4546,7 +4546,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, return -ENOMEM; } - /* Stand-alone HCI commands must be flaged as + /* Stand-alone HCI commands must be flagged as * single-command requests. */ bt_cb(skb)->req.start = true; @@ -4566,7 +4566,7 @@ void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); - /* If an error occured during request building, there is no point in + /* If an error occurred during request building, there is no point in * queueing the HCI command. We can simply return. */ if (req->err) @@ -4661,8 +4661,12 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue, skb_shinfo(skb)->frag_list = NULL; - /* Queue all fragments atomically */ - spin_lock(&queue->lock); + /* Queue all fragments atomically. We need to use spin_lock_bh + * here because of 6LoWPAN links, as there this function is + * called from softirq and using normal spin lock could cause + * deadlocks. + */ + spin_lock_bh(&queue->lock); __skb_queue_tail(queue, skb); @@ -4679,7 +4683,7 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue, __skb_queue_tail(queue, skb); } while (list); - spin_unlock(&queue->lock); + spin_unlock_bh(&queue->lock); } } diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 8b0a2a6de419..aa152140c3e2 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -205,6 +205,8 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) hdev->le_scan_type = LE_SCAN_PASSIVE; hdev->ssp_debug_mode = 0; + + hci_bdaddr_list_clear(&hdev->le_white_list); } static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) @@ -1045,7 +1047,7 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_lock(hdev); - /* If we're doing connection initation as peripheral. Set a + /* If we're doing connection initiation as peripheral. Set a * timeout in case something goes wrong. */ if (*sent) { @@ -1577,8 +1579,7 @@ static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, struct inquiry_entry *e; if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) - mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name, - name_len, conn->dev_class); + mgmt_device_connected(hdev, conn, 0, name, name_len); if (discov->state == DISCOVERY_STOPPED) return; @@ -2536,9 +2537,7 @@ static void hci_remote_features_evt(struct hci_dev *hdev, cp.pscan_rep_mode = 0x02; hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) - mgmt_device_connected(hdev, &conn->dst, conn->type, - conn->dst_type, 0, NULL, 0, - conn->dev_class); + mgmt_device_connected(hdev, conn, 0, NULL, 0); if (!hci_outgoing_auth_needed(hdev, conn)) { conn->state = BT_CONNECTED; @@ -3434,9 +3433,7 @@ static void hci_remote_ext_features_evt(struct hci_dev *hdev, cp.pscan_rep_mode = 0x02; hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) - mgmt_device_connected(hdev, &conn->dst, conn->type, - conn->dst_type, 0, NULL, 0, - conn->dev_class); + mgmt_device_connected(hdev, conn, 0, NULL, 0); if (!hci_outgoing_auth_needed(hdev, conn)) { conn->state = BT_CONNECTED; @@ -4214,8 +4211,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) } if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) - mgmt_device_connected(hdev, &conn->dst, conn->type, - conn->dst_type, 0, NULL, 0, NULL); + mgmt_device_connected(hdev, conn, 0, NULL, 0); conn->sec_level = BT_SECURITY_LOW; conn->handle = __le16_to_cpu(ev->handle); @@ -4269,25 +4265,26 @@ static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, } /* This function requires the caller holds hdev->lock */ -static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr, - u8 addr_type, u8 adv_type) +static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, + bdaddr_t *addr, + u8 addr_type, u8 adv_type) { struct hci_conn *conn; struct hci_conn_params *params; /* If the event is not connectable don't proceed further */ if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND) - return; + return NULL; /* Ignore if the device is blocked */ if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type)) - return; + return NULL; /* Most controller will fail if we try to create new connections * while we have an existing one in slave role. */ if (hdev->conn_hash.le_num_slave > 0) - return; + return NULL; /* If we're not connectable only connect devices that we have in * our pend_le_conns list. @@ -4295,7 +4292,7 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr, params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, addr_type); if (!params) - return; + return NULL; switch (params->auto_connect) { case HCI_AUTO_CONN_DIRECT: @@ -4304,7 +4301,7 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr, * incoming connections from slave devices. */ if (adv_type != LE_ADV_DIRECT_IND) - return; + return NULL; break; case HCI_AUTO_CONN_ALWAYS: /* Devices advertising with ADV_IND or ADV_DIRECT_IND @@ -4315,7 +4312,7 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr, */ break; default: - return; + return NULL; } conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW, @@ -4328,7 +4325,7 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr, * count consistent once the connection is established. */ params->conn = hci_conn_get(conn); - return; + return conn; } switch (PTR_ERR(conn)) { @@ -4341,7 +4338,10 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr, break; default: BT_DBG("Failed to connect: err %ld", PTR_ERR(conn)); + return NULL; } + + return NULL; } static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, @@ -4349,6 +4349,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, { struct discovery_state *d = &hdev->discovery; struct smp_irk *irk; + struct hci_conn *conn; bool match; u32 flags; @@ -4360,7 +4361,14 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, } /* Check if we have been requested to connect to this device */ - check_pending_le_conn(hdev, bdaddr, bdaddr_type, type); + conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type); + if (conn && type == LE_ADV_IND) { + /* Store report for later inclusion by + * mgmt_device_connected + */ + memcpy(conn->le_adv_data, data, len); + conn->le_adv_data_len = len; + } /* Passive scanning shouldn't trigger any device found events, * except for devices marked as CONN_REPORT for which we do send diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 29e1ec7189bd..5e2cd2535978 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -987,7 +987,7 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, skb_queue_tail(&hdev->raw_q, skb); queue_work(hdev->workqueue, &hdev->tx_work); } else { - /* Stand-alone HCI commands must be flaged as + /* Stand-alone HCI commands must be flagged as * single-command requests. */ bt_cb(skb)->req.start = true; diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index b6f9777e057d..fc15174c612c 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -3873,9 +3873,7 @@ static int l2cap_connect_req(struct l2cap_conn *conn, hci_dev_lock(hdev); if (test_bit(HCI_MGMT, &hdev->dev_flags) && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags)) - mgmt_device_connected(hdev, &hcon->dst, hcon->type, - hcon->dst_type, 0, NULL, 0, - hcon->dev_class); + mgmt_device_connected(hdev, hcon, 0, NULL, 0); hci_dev_unlock(hdev); l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0); @@ -4084,7 +4082,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, chan->num_conf_req++; } - /* Got Conf Rsp PENDING from remote side and asume we sent + /* Got Conf Rsp PENDING from remote side and assume we sent Conf Rsp PENDING in the code above */ if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) && test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { @@ -5494,6 +5492,7 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn, if (credits > max_credits) { BT_ERR("LE credits overflow"); l2cap_send_disconn_req(chan, ECONNRESET); + l2cap_chan_unlock(chan); /* Return 0 so that we don't trigger an unnecessary * command reject packet. diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index efb71b022ab6..9c4daf715cf8 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -2725,10 +2725,40 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, } if (cp->addr.type == BDADDR_BREDR) { + /* If disconnection is requested, then look up the + * connection. If the remote device is connected, it + * will be later used to terminate the link. + * + * Setting it to NULL explicitly will cause no + * termination of the link. + */ + if (cp->disconnect) + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, + &cp->addr.bdaddr); + else + conn = NULL; + err = hci_remove_link_key(hdev, &cp->addr.bdaddr); } else { u8 addr_type; + conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, + &cp->addr.bdaddr); + if (conn) { + /* Defer clearing up the connection parameters + * until closing to give a chance of keeping + * them if a repairing happens. + */ + set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags); + + /* If disconnection is not requested, then + * clear the connection variable so that the + * link is not terminated. + */ + if (!cp->disconnect) + conn = NULL; + } + if (cp->addr.type == BDADDR_LE_PUBLIC) addr_type = ADDR_LE_DEV_PUBLIC; else @@ -2736,8 +2766,6 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type); - hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type); - err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type); } @@ -2747,17 +2775,9 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, goto unlock; } - if (cp->disconnect) { - if (cp->addr.type == BDADDR_BREDR) - conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, - &cp->addr.bdaddr); - else - conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, - &cp->addr.bdaddr); - } else { - conn = NULL; - } - + /* If the connection variable is set, then termination of the + * link is requested. + */ if (!conn) { err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0, &rp, sizeof(rp)); @@ -3062,6 +3082,11 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status) hci_conn_put(conn); mgmt_pending_remove(cmd); + + /* The device is paired so there is no need to remove + * its connection parameters anymore. + */ + clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags); } void mgmt_smp_complete(struct hci_conn *conn, bool complete) @@ -6171,26 +6196,36 @@ static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data, return eir_len; } -void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, - u8 addr_type, u32 flags, u8 *name, u8 name_len, - u8 *dev_class) +void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, + u32 flags, u8 *name, u8 name_len) { char buf[512]; struct mgmt_ev_device_connected *ev = (void *) buf; u16 eir_len = 0; - bacpy(&ev->addr.bdaddr, bdaddr); - ev->addr.type = link_to_bdaddr(link_type, addr_type); + bacpy(&ev->addr.bdaddr, &conn->dst); + ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type); ev->flags = __cpu_to_le32(flags); - if (name_len > 0) - eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, - name, name_len); + /* We must ensure that the EIR Data fields are ordered and + * unique. Keep it simple for now and avoid the problem by not + * adding any BR/EDR data to the LE adv. + */ + if (conn->le_adv_data_len > 0) { + memcpy(&ev->eir[eir_len], + conn->le_adv_data, conn->le_adv_data_len); + eir_len = conn->le_adv_data_len; + } else { + if (name_len > 0) + eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, + name, name_len); - if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0) - eir_len = eir_append_data(ev->eir, eir_len, - EIR_CLASS_OF_DEV, dev_class, 3); + if (memcmp(conn->dev_class, "\0\0\0", 3) != 0) + eir_len = eir_append_data(ev->eir, eir_len, + EIR_CLASS_OF_DEV, + conn->dev_class, 3); + } ev->eir_len = cpu_to_le16(eir_len); diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index af73bc3acb40..bce9c3d39324 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -78,8 +78,8 @@ static struct rfcomm_session *rfcomm_session_del(struct rfcomm_session *s); #define __get_type(b) ((b & 0xef)) #define __test_ea(b) ((b & 0x01)) -#define __test_cr(b) ((b & 0x02)) -#define __test_pf(b) ((b & 0x10)) +#define __test_cr(b) (!!(b & 0x02)) +#define __test_pf(b) (!!(b & 0x10)) #define __addr(cr, dlci) (((dlci & 0x3f) << 2) | (cr << 1) | 0x01) #define __ctrl(type, pf) (((type & 0xef) | (pf << 4))) @@ -904,7 +904,7 @@ static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type) hdr->len = __len8(sizeof(*mcc) + 1); mcc = (void *) ptr; ptr += sizeof(*mcc); - mcc->type = __mcc_type(cr, RFCOMM_NSC); + mcc->type = __mcc_type(0, RFCOMM_NSC); mcc->len = __len8(1); /* Type that we didn't like */ diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index f09b6b65cf6b..3ebf65b50881 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -191,16 +191,13 @@ int smp_generate_rpa(struct hci_dev *hdev, u8 irk[16], bdaddr_t *rpa) return 0; } -static int smp_c1(struct smp_chan *smp, u8 k[16], u8 r[16], u8 preq[7], - u8 pres[7], u8 _iat, bdaddr_t *ia, u8 _rat, bdaddr_t *ra, - u8 res[16]) +static int smp_c1(struct crypto_blkcipher *tfm_aes, u8 k[16], u8 r[16], + u8 preq[7], u8 pres[7], u8 _iat, bdaddr_t *ia, u8 _rat, + bdaddr_t *ra, u8 res[16]) { - struct hci_dev *hdev = smp->conn->hcon->hdev; u8 p1[16], p2[16]; int err; - BT_DBG("%s", hdev->name); - memset(p1, 0, 16); /* p1 = pres || preq || _rat || _iat */ @@ -218,7 +215,7 @@ static int smp_c1(struct smp_chan *smp, u8 k[16], u8 r[16], u8 preq[7], u128_xor((u128 *) res, (u128 *) r, (u128 *) p1); /* res = e(k, res) */ - err = smp_e(smp->tfm_aes, k, res); + err = smp_e(tfm_aes, k, res); if (err) { BT_ERR("Encrypt data error"); return err; @@ -228,26 +225,23 @@ static int smp_c1(struct smp_chan *smp, u8 k[16], u8 r[16], u8 preq[7], u128_xor((u128 *) res, (u128 *) res, (u128 *) p2); /* res = e(k, res) */ - err = smp_e(smp->tfm_aes, k, res); + err = smp_e(tfm_aes, k, res); if (err) BT_ERR("Encrypt data error"); return err; } -static int smp_s1(struct smp_chan *smp, u8 k[16], u8 r1[16], u8 r2[16], - u8 _r[16]) +static int smp_s1(struct crypto_blkcipher *tfm_aes, u8 k[16], u8 r1[16], + u8 r2[16], u8 _r[16]) { - struct hci_dev *hdev = smp->conn->hcon->hdev; int err; - BT_DBG("%s", hdev->name); - /* Just least significant octets from r1 and r2 are considered */ memcpy(_r, r2, 8); memcpy(_r + 8, r1, 8); - err = smp_e(smp->tfm_aes, k, _r); + err = smp_e(tfm_aes, k, _r); if (err) BT_ERR("Encrypt data error"); @@ -547,7 +541,7 @@ static u8 smp_confirm(struct smp_chan *smp) BT_DBG("conn %p", conn); - ret = smp_c1(smp, smp->tk, smp->prnd, smp->preq, smp->prsp, + ret = smp_c1(smp->tfm_aes, smp->tk, smp->prnd, smp->preq, smp->prsp, conn->hcon->init_addr_type, &conn->hcon->init_addr, conn->hcon->resp_addr_type, &conn->hcon->resp_addr, cp.confirm_val); @@ -578,7 +572,7 @@ static u8 smp_random(struct smp_chan *smp) BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave"); - ret = smp_c1(smp, smp->tk, smp->rrnd, smp->preq, smp->prsp, + ret = smp_c1(smp->tfm_aes, smp->tk, smp->rrnd, smp->preq, smp->prsp, hcon->init_addr_type, &hcon->init_addr, hcon->resp_addr_type, &hcon->resp_addr, confirm); if (ret) @@ -594,7 +588,7 @@ static u8 smp_random(struct smp_chan *smp) __le64 rand = 0; __le16 ediv = 0; - smp_s1(smp, smp->tk, smp->rrnd, smp->prnd, stk); + smp_s1(smp->tfm_aes, smp->tk, smp->rrnd, smp->prnd, stk); memset(stk + smp->enc_key_size, 0, SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size); @@ -613,7 +607,7 @@ static u8 smp_random(struct smp_chan *smp) smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), smp->prnd); - smp_s1(smp, smp->tk, smp->prnd, smp->rrnd, stk); + smp_s1(smp->tfm_aes, smp->tk, smp->prnd, smp->rrnd, stk); memset(stk + smp->enc_key_size, 0, SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size); @@ -970,7 +964,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) if (sec_level > conn->hcon->pending_sec_level) conn->hcon->pending_sec_level = sec_level; - /* If we need MITM check that it can be acheived */ + /* If we need MITM check that it can be achieved */ if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) { u8 method; @@ -1028,7 +1022,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) auth = rsp->auth_req & AUTH_REQ_MASK; - /* If we need MITM check that it can be acheived */ + /* If we need MITM check that it can be achieved */ if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) { u8 method; |