diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/bluetooth/Kconfig | 1 | ||||
-rw-r--r-- | net/bluetooth/Makefile | 1 | ||||
-rw-r--r-- | net/bluetooth/af_bluetooth.c | 4 | ||||
-rw-r--r-- | net/bluetooth/eir.c | 62 | ||||
-rw-r--r-- | net/bluetooth/eir.h | 1 | ||||
-rw-r--r-- | net/bluetooth/hci_conn.c | 900 | ||||
-rw-r--r-- | net/bluetooth/hci_core.c | 569 | ||||
-rw-r--r-- | net/bluetooth/hci_event.c | 529 | ||||
-rw-r--r-- | net/bluetooth/hci_request.c | 429 | ||||
-rw-r--r-- | net/bluetooth/hci_request.h | 16 | ||||
-rw-r--r-- | net/bluetooth/hci_sock.c | 11 | ||||
-rw-r--r-- | net/bluetooth/hci_sync.c | 628 | ||||
-rw-r--r-- | net/bluetooth/iso.c | 1824 | ||||
-rw-r--r-- | net/bluetooth/l2cap_core.c | 1 | ||||
-rw-r--r-- | net/bluetooth/lib.c | 71 | ||||
-rw-r--r-- | net/bluetooth/mgmt.c | 338 | ||||
-rw-r--r-- | net/bluetooth/msft.c | 269 | ||||
-rw-r--r-- | net/bluetooth/msft.h | 6 |
18 files changed, 4548 insertions, 1112 deletions
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig index e0ab4cd7afc3..ae3bdc6dfc92 100644 --- a/net/bluetooth/Kconfig +++ b/net/bluetooth/Kconfig @@ -29,6 +29,7 @@ menuconfig BT SCO audio links L2CAP (Logical Link Control and Adaptation Protocol) SMP (Security Manager Protocol) on LE (Low Energy) links + ISO isochronous links HCI Device drivers (Interface to the hardware) RFCOMM Module (RFCOMM Protocol) BNEP Module (Bluetooth Network Encapsulation Protocol) diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile index a52bba8500e1..0e7b7db42750 100644 --- a/net/bluetooth/Makefile +++ b/net/bluetooth/Makefile @@ -18,6 +18,7 @@ bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ eir.o hci_sync.o bluetooth-$(CONFIG_BT_BREDR) += sco.o +bluetooth-$(CONFIG_BT_LE) += iso.o bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o bluetooth-$(CONFIG_BT_LEDS) += leds.o bluetooth-$(CONFIG_BT_MSFTEXT) += msft.o diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index b506409bb498..dc65974f5adb 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -38,7 +38,7 @@ #include "selftest.h" /* Bluetooth sockets */ -#define BT_MAX_PROTO 8 +#define BT_MAX_PROTO (BTPROTO_LAST + 1) static const struct net_proto_family *bt_proto[BT_MAX_PROTO]; static DEFINE_RWLOCK(bt_proto_lock); @@ -52,6 +52,7 @@ static const char *const bt_key_strings[BT_MAX_PROTO] = { "sk_lock-AF_BLUETOOTH-BTPROTO_CMTP", "sk_lock-AF_BLUETOOTH-BTPROTO_HIDP", "sk_lock-AF_BLUETOOTH-BTPROTO_AVDTP", + "sk_lock-AF_BLUETOOTH-BTPROTO_ISO", }; static struct lock_class_key bt_slock_key[BT_MAX_PROTO]; @@ -64,6 +65,7 @@ static const char *const bt_slock_key_strings[BT_MAX_PROTO] = { "slock-AF_BLUETOOTH-BTPROTO_CMTP", "slock-AF_BLUETOOTH-BTPROTO_HIDP", "slock-AF_BLUETOOTH-BTPROTO_AVDTP", + "slock-AF_BLUETOOTH-BTPROTO_ISO", }; void bt_sock_reclassify_lock(struct sock *sk, int proto) diff --git a/net/bluetooth/eir.c b/net/bluetooth/eir.c index 7d77fb00c2bf..8a85f6cdfbc1 100644 --- a/net/bluetooth/eir.c +++ b/net/bluetooth/eir.c @@ -13,6 +13,20 @@ #define PNP_INFO_SVCLASS_ID 0x1200 +static u8 eir_append_name(u8 *eir, u16 eir_len, u8 type, u8 *data, u8 data_len) +{ + u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1]; + + /* If data is already NULL terminated just pass it directly */ + if (data[data_len - 1] == '\0') + return eir_append_data(eir, eir_len, type, data, data_len); + + memcpy(name, data, HCI_MAX_SHORT_NAME_LENGTH); + name[HCI_MAX_SHORT_NAME_LENGTH] = '\0'; + + return eir_append_data(eir, eir_len, type, name, sizeof(name)); +} + u8 eir_append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len) { size_t short_len; @@ -23,29 +37,26 @@ u8 eir_append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len) return ad_len; /* use complete name if present and fits */ - complete_len = strlen(hdev->dev_name); + complete_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name)); if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH) - return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE, + return eir_append_name(ptr, ad_len, EIR_NAME_COMPLETE, hdev->dev_name, complete_len + 1); /* use short name if present */ - short_len = strlen(hdev->short_name); + short_len = strnlen(hdev->short_name, sizeof(hdev->short_name)); if (short_len) - return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, - hdev->short_name, short_len + 1); + return eir_append_name(ptr, ad_len, EIR_NAME_SHORT, + hdev->short_name, + short_len == HCI_MAX_SHORT_NAME_LENGTH ? + short_len : short_len + 1); /* use shortened full name if present, we already know that name * is longer then HCI_MAX_SHORT_NAME_LENGTH */ - if (complete_len) { - u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1]; - - memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH); - name[HCI_MAX_SHORT_NAME_LENGTH] = '\0'; - - return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name, - sizeof(name)); - } + if (complete_len) + return eir_append_name(ptr, ad_len, EIR_NAME_SHORT, + hdev->dev_name, + HCI_MAX_SHORT_NAME_LENGTH); return ad_len; } @@ -181,7 +192,7 @@ void eir_create(struct hci_dev *hdev, u8 *data) u8 *ptr = data; size_t name_len; - name_len = strlen(hdev->dev_name); + name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name)); if (name_len > 0) { /* EIR Data type */ @@ -225,6 +236,27 @@ void eir_create(struct hci_dev *hdev, u8 *data) ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); } +u8 eir_create_per_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) +{ + struct adv_info *adv = NULL; + u8 ad_len = 0; + + /* Return 0 when the current instance identifier is invalid. */ + if (instance) { + adv = hci_find_adv_instance(hdev, instance); + if (!adv) + return 0; + } + + if (adv) { + memcpy(ptr, adv->per_adv_data, adv->per_adv_data_len); + ad_len += adv->per_adv_data_len; + ptr += adv->per_adv_data_len; + } + + return ad_len; +} + u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) { struct adv_info *adv = NULL; diff --git a/net/bluetooth/eir.h b/net/bluetooth/eir.h index 62f2374078f2..0df19f2f4af9 100644 --- a/net/bluetooth/eir.h +++ b/net/bluetooth/eir.h @@ -11,6 +11,7 @@ void eir_create(struct hci_dev *hdev, u8 *data); u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr); u8 eir_create_scan_rsp(struct hci_dev *hdev, u8 instance, u8 *ptr); +u8 eir_create_per_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr); u8 eir_append_local_name(struct hci_dev *hdev, u8 *eir, u8 ad_len); u8 eir_append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index ac06c9724c7f..f54864e19866 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -30,10 +30,13 @@ #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> +#include <net/bluetooth/iso.h> +#include <net/bluetooth/mgmt.h> #include "hci_request.h" #include "smp.h" #include "a2mp.h" +#include "eir.h" struct sco_param { u16 pkt_type; @@ -118,10 +121,16 @@ static void hci_conn_cleanup(struct hci_conn *conn) if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags)) hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type); + if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) + hci_remove_link_key(hdev, &conn->dst); + hci_chan_list_flush(conn); hci_conn_hash_del(hdev, conn); + if (conn->cleanup) + conn->cleanup(conn); + if (conn->type == SCO_LINK || conn->type == ESCO_LINK) { switch (conn->setting & SCO_AIRMODE_MASK) { case SCO_AIRMODE_CVSD: @@ -678,6 +687,199 @@ static void le_conn_timeout(struct work_struct *work) hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM); } +struct iso_list_data { + union { + u8 cig; + u8 big; + }; + union { + u8 cis; + u8 bis; + u16 sync_handle; + }; + int count; + struct { + struct hci_cp_le_set_cig_params cp; + struct hci_cis_params cis[0x11]; + } pdu; +}; + +static void bis_list(struct hci_conn *conn, void *data) +{ + struct iso_list_data *d = data; + + /* Skip if not broadcast/ANY address */ + if (bacmp(&conn->dst, BDADDR_ANY)) + return; + + if (d->big != conn->iso_qos.big || d->bis == BT_ISO_QOS_BIS_UNSET || + d->bis != conn->iso_qos.bis) + return; + + d->count++; +} + +static void find_bis(struct hci_conn *conn, void *data) +{ + struct iso_list_data *d = data; + + /* Ignore unicast */ + if (bacmp(&conn->dst, BDADDR_ANY)) + return; + + d->count++; +} + +static int terminate_big_sync(struct hci_dev *hdev, void *data) +{ + struct iso_list_data *d = data; + + bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis); + + hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL); + + /* Check if ISO connection is a BIS and terminate BIG if there are + * no other connections using it. + */ + hci_conn_hash_list_state(hdev, find_bis, ISO_LINK, BT_CONNECTED, d); + if (d->count) + return 0; + + return hci_le_terminate_big_sync(hdev, d->big, + HCI_ERROR_LOCAL_HOST_TERM); +} + +static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err) +{ + kfree(data); +} + +static int hci_le_terminate_big(struct hci_dev *hdev, u8 big, u8 bis) +{ + struct iso_list_data *d; + + bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", big, bis); + + d = kmalloc(sizeof(*d), GFP_KERNEL); + if (!d) + return -ENOMEM; + + memset(d, 0, sizeof(*d)); + d->big = big; + d->bis = bis; + + return hci_cmd_sync_queue(hdev, terminate_big_sync, d, + terminate_big_destroy); +} + +static int big_terminate_sync(struct hci_dev *hdev, void *data) +{ + struct iso_list_data *d = data; + + bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big, + d->sync_handle); + + /* Check if ISO connection is a BIS and terminate BIG if there are + * no other connections using it. + */ + hci_conn_hash_list_state(hdev, find_bis, ISO_LINK, BT_CONNECTED, d); + if (d->count) + return 0; + + hci_le_big_terminate_sync(hdev, d->big); + + return hci_le_pa_terminate_sync(hdev, d->sync_handle); +} + +static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, u16 sync_handle) +{ + struct iso_list_data *d; + + bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, sync_handle); + + d = kmalloc(sizeof(*d), GFP_KERNEL); + if (!d) + return -ENOMEM; + + memset(d, 0, sizeof(*d)); + d->big = big; + d->sync_handle = sync_handle; + + return hci_cmd_sync_queue(hdev, big_terminate_sync, d, + terminate_big_destroy); +} + +/* Cleanup BIS connection + * + * Detects if there any BIS left connected in a BIG + * broadcaster: Remove advertising instance and terminate BIG. + * broadcaster receiver: Teminate BIG sync and terminate PA sync. + */ +static void bis_cleanup(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + + bt_dev_dbg(hdev, "conn %p", conn); + + if (conn->role == HCI_ROLE_MASTER) { + if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags)) + return; + + hci_le_terminate_big(hdev, conn->iso_qos.big, + conn->iso_qos.bis); + } else { + hci_le_big_terminate(hdev, conn->iso_qos.big, + conn->sync_handle); + } +} + +static int remove_cig_sync(struct hci_dev *hdev, void *data) +{ + u8 handle = PTR_ERR(data); + + return hci_le_remove_cig_sync(hdev, handle); +} + +static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle) +{ + bt_dev_dbg(hdev, "handle 0x%2.2x", handle); + + return hci_cmd_sync_queue(hdev, remove_cig_sync, ERR_PTR(handle), NULL); +} + +static void find_cis(struct hci_conn *conn, void *data) +{ + struct iso_list_data *d = data; + + /* Ignore broadcast */ + if (!bacmp(&conn->dst, BDADDR_ANY)) + return; + + d->count++; +} + +/* Cleanup CIS connection: + * + * Detects if there any CIS left connected in a CIG and remove it. + */ +static void cis_cleanup(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + struct iso_list_data d; + + memset(&d, 0, sizeof(d)); + d.cig = conn->iso_qos.cig; + + /* Check if ISO connection is a CIS and remove CIG if there are + * no other connections using it. + */ + hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d); + if (d.count) + return; + + hci_le_remove_cig(hdev, conn->iso_qos.cig); +} + struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, u8 role) { @@ -722,6 +924,17 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, /* conn->src should reflect the local identity address */ hci_copy_identity_address(hdev, &conn->src, &conn->src_type); break; + case ISO_LINK: + /* conn->src should reflect the local identity address */ + hci_copy_identity_address(hdev, &conn->src, &conn->src_type); + + /* set proper cleanup function */ + if (!bacmp(dst, BDADDR_ANY)) + conn->cleanup = bis_cleanup; + else if (conn->role == HCI_ROLE_MASTER) + conn->cleanup = cis_cleanup; + + break; case SCO_LINK: if (lmp_esco_capable(hdev)) conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | @@ -947,7 +1160,7 @@ static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err) if (conn != hci_lookup_le_connect(hdev)) goto done; - hci_conn_failed(conn, err); + hci_conn_failed(conn, bt_status(err)); done: hci_dev_unlock(hdev); @@ -1093,6 +1306,108 @@ static int hci_explicit_conn_params_set(struct hci_dev *hdev, return 0; } +static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos) +{ + struct iso_list_data data; + + /* Allocate a BIG if not set */ + if (qos->big == BT_ISO_QOS_BIG_UNSET) { + for (data.big = 0x00; data.big < 0xef; data.big++) { + data.count = 0; + data.bis = 0xff; + + hci_conn_hash_list_state(hdev, bis_list, ISO_LINK, + BT_BOUND, &data); + if (!data.count) + break; + } + + if (data.big == 0xef) + return -EADDRNOTAVAIL; + + /* Update BIG */ + qos->big = data.big; + } + + return 0; +} + +static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos) +{ + struct iso_list_data data; + + /* Allocate BIS if not set */ + if (qos->bis == BT_ISO_QOS_BIS_UNSET) { + /* Find an unused adv set to advertise BIS, skip instance 0x00 + * since it is reserved as general purpose set. + */ + for (data.bis = 0x01; data.bis < hdev->le_num_of_adv_sets; + data.bis++) { + data.count = 0; + + hci_conn_hash_list_state(hdev, bis_list, ISO_LINK, + BT_BOUND, &data); + if (!data.count) + break; + } + + if (data.bis == hdev->le_num_of_adv_sets) + return -EADDRNOTAVAIL; + + /* Update BIS */ + qos->bis = data.bis; + } + + return 0; +} + +/* This function requires the caller holds hdev->lock */ +static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst, + struct bt_iso_qos *qos) +{ + struct hci_conn *conn; + struct iso_list_data data; + int err; + + /* Let's make sure that le is enabled.*/ + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { + if (lmp_le_capable(hdev)) + return ERR_PTR(-ECONNREFUSED); + return ERR_PTR(-EOPNOTSUPP); + } + + err = qos_set_big(hdev, qos); + if (err) + return ERR_PTR(err); + + err = qos_set_bis(hdev, qos); + if (err) + return ERR_PTR(err); + + data.big = qos->big; + data.bis = qos->bis; + data.count = 0; + + /* Check if there is already a matching BIG/BIS */ + hci_conn_hash_list_state(hdev, bis_list, ISO_LINK, BT_BOUND, &data); + if (data.count) + return ERR_PTR(-EADDRINUSE); + + conn = hci_conn_hash_lookup_bis(hdev, dst, qos->big, qos->bis); + if (conn) + return ERR_PTR(-EADDRINUSE); + + conn = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER); + if (!conn) + return ERR_PTR(-ENOMEM); + + set_bit(HCI_CONN_PER_ADV, &conn->flags); + conn->state = BT_CONNECT; + + hci_conn_hold(conn); + return conn; +} + /* This function requires the caller holds hdev->lock */ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, u8 dst_type, u8 sec_level, @@ -1229,6 +1544,589 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, return sco; } +static void cis_add(struct iso_list_data *d, struct bt_iso_qos *qos) +{ + struct hci_cis_params *cis = &d->pdu.cis[d->pdu.cp.num_cis]; + + cis->cis_id = qos->cis; + cis->c_sdu = cpu_to_le16(qos->out.sdu); + cis->p_sdu = cpu_to_le16(qos->in.sdu); + cis->c_phy = qos->out.phy; + cis->p_phy = qos->in.phy; + cis->c_rtn = qos->out.rtn; + cis->p_rtn = qos->in.rtn; + + d->pdu.cp.num_cis++; +} + +static void cis_list(struct hci_conn *conn, void *data) +{ + struct iso_list_data *d = data; + + /* Skip if broadcast/ANY address */ + if (!bacmp(&conn->dst, BDADDR_ANY)) + return; + + if (d->cig != conn->iso_qos.cig || d->cis == BT_ISO_QOS_CIS_UNSET || + d->cis != conn->iso_qos.cis) + return; + + d->count++; + + if (d->pdu.cp.cig_id == BT_ISO_QOS_CIG_UNSET || + d->count >= ARRAY_SIZE(d->pdu.cis)) + return; + + cis_add(d, &conn->iso_qos); +} + +static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos) +{ + struct hci_dev *hdev = conn->hdev; + struct hci_cp_le_create_big cp; + + memset(&cp, 0, sizeof(cp)); + + cp.handle = qos->big; + cp.adv_handle = qos->bis; + cp.num_bis = 0x01; + hci_cpu_to_le24(qos->out.interval, cp.bis.sdu_interval); + cp.bis.sdu = cpu_to_le16(qos->out.sdu); + cp.bis.latency = cpu_to_le16(qos->out.latency); + cp.bis.rtn = qos->out.rtn; + cp.bis.phy = qos->out.phy; + cp.bis.packing = qos->packing; + cp.bis.framing = qos->framing; + cp.bis.encryption = 0x00; + memset(&cp.bis.bcode, 0, sizeof(cp.bis.bcode)); + + return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp); +} + +static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos) +{ + struct hci_dev *hdev = conn->hdev; + struct iso_list_data data; + + memset(&data, 0, sizeof(data)); + + /* Allocate a CIG if not set */ + if (qos->cig == BT_ISO_QOS_CIG_UNSET) { + for (data.cig = 0x00; data.cig < 0xff; data.cig++) { + data.count = 0; + data.cis = 0xff; + + hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, + BT_BOUND, &data); + if (data.count) + continue; + + hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, + BT_CONNECTED, &data); + if (!data.count) + break; + } + + if (data.cig == 0xff) + return false; + + /* Update CIG */ + qos->cig = data.cig; + } + + data.pdu.cp.cig_id = qos->cig; + hci_cpu_to_le24(qos->out.interval, data.pdu.cp.c_interval); + hci_cpu_to_le24(qos->in.interval, data.pdu.cp.p_interval); + data.pdu.cp.sca = qos->sca; + data.pdu.cp.packing = qos->packing; + data.pdu.cp.framing = qos->framing; + data.pdu.cp.c_latency = cpu_to_le16(qos->out.latency); + data.pdu.cp.p_latency = cpu_to_le16(qos->in.latency); + + if (qos->cis != BT_ISO_QOS_CIS_UNSET) { + data.count = 0; + data.cig = qos->cig; + data.cis = qos->cis; + + hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND, + &data); + if (data.count) + return false; + + cis_add(&data, qos); + } + + /* Reprogram all CIS(s) with the same CIG */ + for (data.cig = qos->cig, data.cis = 0x00; data.cis < 0x11; + data.cis++) { + data.count = 0; + + hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND, + &data); + if (data.count) + continue; + + /* Allocate a CIS if not set */ + if (qos->cis == BT_ISO_QOS_CIS_UNSET) { + /* Update CIS */ + qos->cis = data.cis; + cis_add(&data, qos); + } + } + + if (qos->cis == BT_ISO_QOS_CIS_UNSET || !data.pdu.cp.num_cis) + return false; + + if (hci_send_cmd(hdev, HCI_OP_LE_SET_CIG_PARAMS, + sizeof(data.pdu.cp) + + (data.pdu.cp.num_cis * sizeof(*data.pdu.cis)), + &data.pdu) < 0) + return false; + + return true; +} + +struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst, + __u8 dst_type, struct bt_iso_qos *qos) +{ + struct hci_conn *cis; + + cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type); + if (!cis) { + cis = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER); + if (!cis) + return ERR_PTR(-ENOMEM); + cis->cleanup = cis_cleanup; + } + + if (cis->state == BT_CONNECTED) + return cis; + + /* Check if CIS has been set and the settings matches */ + if (cis->state == BT_BOUND && + !memcmp(&cis->iso_qos, qos, sizeof(*qos))) + return cis; + + /* Update LINK PHYs according to QoS preference */ + cis->le_tx_phy = qos->out.phy; + cis->le_rx_phy = qos->in.phy; + + /* If output interval is not set use the input interval as it cannot be + * 0x000000. + */ + if (!qos->out.interval) + qos->out.interval = qos->in.interval; + + /* If input interval is not set use the output interval as it cannot be + * 0x000000. + */ + if (!qos->in.interval) + qos->in.interval = qos->out.interval; + + /* If output latency is not set use the input latency as it cannot be + * 0x0000. + */ + if (!qos->out.latency) + qos->out.latency = qos->in.latency; + + /* If input latency is not set use the output latency as it cannot be + * 0x0000. + */ + if (!qos->in.latency) + qos->in.latency = qos->out.latency; + + /* Mirror PHYs that are disabled as SDU will be set to 0 */ + if (!qos->in.phy) + qos->in.phy = qos->out.phy; + + if (!qos->out.phy) + qos->out.phy = qos->in.phy; + + if (!hci_le_set_cig_params(cis, qos)) { + hci_conn_drop(cis); + return ERR_PTR(-EINVAL); + } + + cis->iso_qos = *qos; + cis->state = BT_BOUND; + + return cis; +} + +bool hci_iso_setup_path(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + struct hci_cp_le_setup_iso_path cmd; + + memset(&cmd, 0, sizeof(cmd)); + + if (conn->iso_qos.out.sdu) { + cmd.handle = cpu_to_le16(conn->handle); + cmd.direction = 0x00; /* Input (Host to Controller) */ + cmd.path = 0x00; /* HCI path if enabled */ + cmd.codec = 0x03; /* Transparent Data */ + + if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd), + &cmd) < 0) + return false; + } + + if (conn->iso_qos.in.sdu) { + cmd.handle = cpu_to_le16(conn->handle); + cmd.direction = 0x01; /* Output (Controller to Host) */ + cmd.path = 0x00; /* HCI path if enabled */ + cmd.codec = 0x03; /* Transparent Data */ + + if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd), + &cmd) < 0) + return false; + } + + return true; +} + +static int hci_create_cis_sync(struct hci_dev *hdev, void *data) +{ + struct { + struct hci_cp_le_create_cis cp; + struct hci_cis cis[0x1f]; + } cmd; + struct hci_conn *conn = data; + u8 cig; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cis[0].acl_handle = cpu_to_le16(conn->link->handle); + cmd.cis[0].cis_handle = cpu_to_le16(conn->handle); + cmd.cp.num_cis++; + cig = conn->iso_qos.cig; + + hci_dev_lock(hdev); + + rcu_read_lock(); + + list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { + struct hci_cis *cis = &cmd.cis[cmd.cp.num_cis]; + + if (conn == data || conn->type != ISO_LINK || + conn->state == BT_CONNECTED || conn->iso_qos.cig != cig) + continue; + + /* Check if all CIS(s) belonging to a CIG are ready */ + if (conn->link->state != BT_CONNECTED || + conn->state != BT_CONNECT) { + cmd.cp.num_cis = 0; + break; + } + + /* Group all CIS with state BT_CONNECT since the spec don't + * allow to send them individually: + * + * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E + * page 2566: + * + * If the Host issues this command before all the + * HCI_LE_CIS_Established events from the previous use of the + * command have been generated, the Controller shall return the + * error code Command Disallowed (0x0C). + */ + cis->acl_handle = cpu_to_le16(conn->link->handle); + cis->cis_handle = cpu_to_le16(conn->handle); + cmd.cp.num_cis++; + } + + rcu_read_unlock(); + + hci_dev_unlock(hdev); + + if (!cmd.cp.num_cis) + return 0; + + return hci_send_cmd(hdev, HCI_OP_LE_CREATE_CIS, sizeof(cmd.cp) + + sizeof(cmd.cis[0]) * cmd.cp.num_cis, &cmd); +} + +int hci_le_create_cis(struct hci_conn *conn) +{ + struct hci_conn *cis; + struct hci_dev *hdev = conn->hdev; + int err; + + switch (conn->type) { + case LE_LINK: + if (!conn->link || conn->state != BT_CONNECTED) + return -EINVAL; + cis = conn->link; + break; + case ISO_LINK: + cis = conn; + break; + default: + return -EINVAL; + } + + if (cis->state == BT_CONNECT) + return 0; + + /* Queue Create CIS */ + err = hci_cmd_sync_queue(hdev, hci_create_cis_sync, cis, NULL); + if (err) + return err; + + cis->state = BT_CONNECT; + + return 0; +} + +static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn, + struct bt_iso_io_qos *qos, __u8 phy) +{ + /* Only set MTU if PHY is enabled */ + if (!qos->sdu && qos->phy) { + if (hdev->iso_mtu > 0) + qos->sdu = hdev->iso_mtu; + else if (hdev->le_mtu > 0) + qos->sdu = hdev->le_mtu; + else + qos->sdu = hdev->acl_mtu; + } + + /* Use the same PHY as ACL if set to any */ + if (qos->phy == BT_ISO_PHY_ANY) + qos->phy = phy; + + /* Use LE ACL connection interval if not set */ + if (!qos->interval) + /* ACL interval unit in 1.25 ms to us */ + qos->interval = conn->le_conn_interval * 1250; + + /* Use LE ACL connection latency if not set */ + if (!qos->latency) + qos->latency = conn->le_conn_latency; +} + +static struct hci_conn *hci_bind_bis(struct hci_conn *conn, + struct bt_iso_qos *qos) +{ + /* Update LINK PHYs according to QoS preference */ + conn->le_tx_phy = qos->out.phy; + conn->le_tx_phy = qos->out.phy; + conn->iso_qos = *qos; + conn->state = BT_BOUND; + + return conn; +} + +static int create_big_sync(struct hci_dev *hdev, void *data) +{ + struct hci_conn *conn = data; + struct bt_iso_qos *qos = &conn->iso_qos; + u16 interval, sync_interval = 0; + u32 flags = 0; + int err; + + if (qos->out.phy == 0x02) + flags |= MGMT_ADV_FLAG_SEC_2M; + + /* Align intervals */ + interval = qos->out.interval / 1250; + + if (qos->bis) + sync_interval = qos->sync_interval * 1600; + + err = hci_start_per_adv_sync(hdev, qos->bis, conn->le_per_adv_data_len, + conn->le_per_adv_data, flags, interval, + interval, sync_interval); + if (err) + return err; + + return hci_le_create_big(conn, &conn->iso_qos); +} + +static void create_pa_complete(struct hci_dev *hdev, void *data, int err) +{ + struct hci_cp_le_pa_create_sync *cp = data; + + bt_dev_dbg(hdev, ""); + + if (err) + bt_dev_err(hdev, "Unable to create PA: %d", err); + + kfree(cp); +} + +static int create_pa_sync(struct hci_dev *hdev, void *data) +{ + struct hci_cp_le_pa_create_sync *cp = data; + int err; + + err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC, + sizeof(*cp), cp, HCI_CMD_TIMEOUT); + if (err) { + hci_dev_clear_flag(hdev, HCI_PA_SYNC); + return err; + } + + return hci_update_passive_scan_sync(hdev); +} + +int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, + __u8 sid) +{ + struct hci_cp_le_pa_create_sync *cp; + + if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC)) + return -EBUSY; + + cp = kmalloc(sizeof(*cp), GFP_KERNEL); + if (!cp) { + hci_dev_clear_flag(hdev, HCI_PA_SYNC); + return -ENOMEM; + } + + /* Convert from ISO socket address type to HCI address type */ + if (dst_type == BDADDR_LE_PUBLIC) + dst_type = ADDR_LE_DEV_PUBLIC; + else + dst_type = ADDR_LE_DEV_RANDOM; + + memset(cp, 0, sizeof(*cp)); + cp->sid = sid; + cp->addr_type = dst_type; + bacpy(&cp->addr, dst); + + /* Queue start pa_create_sync and scan */ + return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete); +} + +int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos, + __u16 sync_handle, __u8 num_bis, __u8 bis[]) +{ + struct _packed { + struct hci_cp_le_big_create_sync cp; + __u8 bis[0x11]; + } pdu; + int err; + + if (num_bis > sizeof(pdu.bis)) + return -EINVAL; + + err = qos_set_big(hdev, qos); + if (err) + return err; + + memset(&pdu, 0, sizeof(pdu)); + pdu.cp.handle = qos->big; + pdu.cp.sync_handle = cpu_to_le16(sync_handle); + pdu.cp.num_bis = num_bis; + memcpy(pdu.bis, bis, num_bis); + + return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC, + sizeof(pdu.cp) + num_bis, &pdu); +} + +static void create_big_complete(struct hci_dev *hdev, void *data, int err) +{ + struct hci_conn *conn = data; + + bt_dev_dbg(hdev, "conn %p", conn); + + if (err) { + bt_dev_err(hdev, "Unable to create BIG: %d", err); + hci_connect_cfm(conn, err); + hci_conn_del(conn); + } +} + +struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst, + __u8 dst_type, struct bt_iso_qos *qos, + __u8 base_len, __u8 *base) +{ + struct hci_conn *conn; + int err; + + /* We need hci_conn object using the BDADDR_ANY as dst */ + conn = hci_add_bis(hdev, dst, qos); + if (IS_ERR(conn)) + return conn; + + conn = hci_bind_bis(conn, qos); + if (!conn) { + hci_conn_drop(conn); + return ERR_PTR(-ENOMEM); + } + + /* Add Basic Announcement into Peridic Adv Data if BASE is set */ + if (base_len && base) { + base_len = eir_append_service_data(conn->le_per_adv_data, 0, + 0x1851, base, base_len); + conn->le_per_adv_data_len = base_len; + } + + /* Queue start periodic advertising and create BIG */ + err = hci_cmd_sync_queue(hdev, create_big_sync, conn, + create_big_complete); + if (err < 0) { + hci_conn_drop(conn); + return ERR_PTR(err); + } + + hci_iso_qos_setup(hdev, conn, &qos->out, + conn->le_tx_phy ? conn->le_tx_phy : + hdev->le_tx_def_phys); + + return conn; +} + +struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst, + __u8 dst_type, struct bt_iso_qos *qos) +{ + struct hci_conn *le; + struct hci_conn *cis; + + /* Convert from ISO socket address type to HCI address type */ + if (dst_type == BDADDR_LE_PUBLIC) + dst_type = ADDR_LE_DEV_PUBLIC; + else + dst_type = ADDR_LE_DEV_RANDOM; + + if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) + le = hci_connect_le(hdev, dst, dst_type, false, + BT_SECURITY_LOW, + HCI_LE_CONN_TIMEOUT, + HCI_ROLE_SLAVE); + else + le = hci_connect_le_scan(hdev, dst, dst_type, + BT_SECURITY_LOW, + HCI_LE_CONN_TIMEOUT, + CONN_REASON_ISO_CONNECT); + if (IS_ERR(le)) + return le; + + hci_iso_qos_setup(hdev, le, &qos->out, + le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys); + hci_iso_qos_setup(hdev, le, &qos->in, + le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys); + + cis = hci_bind_cis(hdev, dst, dst_type, qos); + if (IS_ERR(cis)) { + hci_conn_drop(le); + return cis; + } + + le->link = cis; + cis->link = le; + + hci_conn_hold(cis); + + /* If LE is already connected and CIS handle is already set proceed to + * Create CIS immediately. + */ + if (le->state == BT_CONNECTED && cis->handle != HCI_CONN_HANDLE_UNSET) + hci_le_create_cis(le); + + return cis; +} + /* Check link security requirement */ int hci_conn_check_link_mode(struct hci_conn *conn) { diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index a0f99baafd35..b3a5a3cc9372 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -29,6 +29,7 @@ #include <linux/rfkill.h> #include <linux/debugfs.h> #include <linux/crypto.h> +#include <linux/kcov.h> #include <linux/property.h> #include <linux/suspend.h> #include <linux/wait.h> @@ -594,6 +595,11 @@ static int hci_dev_do_reset(struct hci_dev *hdev) skb_queue_purge(&hdev->rx_q); skb_queue_purge(&hdev->cmd_q); + /* Cancel these to avoid queueing non-chained pending work */ + hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE); + cancel_delayed_work(&hdev->cmd_timer); + cancel_delayed_work(&hdev->ncmd_timer); + /* Avoid potential lockdep warnings from the *_flush() calls by * ensuring the workqueue is empty up front. */ @@ -607,8 +613,13 @@ static int hci_dev_do_reset(struct hci_dev *hdev) if (hdev->flush) hdev->flush(hdev); + hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE); + atomic_set(&hdev->cmd_cnt, 1); - hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0; + hdev->acl_cnt = 0; + hdev->sco_cnt = 0; + hdev->le_cnt = 0; + hdev->iso_cnt = 0; ret = hci_reset_sync(hdev); @@ -1691,63 +1702,77 @@ static void adv_instance_rpa_expired(struct work_struct *work) } /* This function requires the caller holds hdev->lock */ -int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, - u16 adv_data_len, u8 *adv_data, - u16 scan_rsp_len, u8 *scan_rsp_data, - u16 timeout, u16 duration, s8 tx_power, - u32 min_interval, u32 max_interval) +struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance, + u32 flags, u16 adv_data_len, u8 *adv_data, + u16 scan_rsp_len, u8 *scan_rsp_data, + u16 timeout, u16 duration, s8 tx_power, + u32 min_interval, u32 max_interval) { - struct adv_info *adv_instance; + struct adv_info *adv; - adv_instance = hci_find_adv_instance(hdev, instance); - if (adv_instance) { - memset(adv_instance->adv_data, 0, - sizeof(adv_instance->adv_data)); - memset(adv_instance->scan_rsp_data, 0, - sizeof(adv_instance->scan_rsp_data)); + adv = hci_find_adv_instance(hdev, instance); + if (adv) { + memset(adv->adv_data, 0, sizeof(adv->adv_data)); + memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data)); + memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data)); } else { if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets || instance < 1 || instance > hdev->le_num_of_adv_sets) - return -EOVERFLOW; + return ERR_PTR(-EOVERFLOW); - adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL); - if (!adv_instance) - return -ENOMEM; + adv = kzalloc(sizeof(*adv), GFP_KERNEL); + if (!adv) + return ERR_PTR(-ENOMEM); - adv_instance->pending = true; - adv_instance->instance = instance; - list_add(&adv_instance->list, &hdev->adv_instances); + adv->pending = true; + adv->instance = instance; + list_add(&adv->list, &hdev->adv_instances); hdev->adv_instance_cnt++; } - adv_instance->flags = flags; - adv_instance->adv_data_len = adv_data_len; - adv_instance->scan_rsp_len = scan_rsp_len; - adv_instance->min_interval = min_interval; - adv_instance->max_interval = max_interval; - adv_instance->tx_power = tx_power; + adv->flags = flags; + adv->min_interval = min_interval; + adv->max_interval = max_interval; + adv->tx_power = tx_power; - if (adv_data_len) - memcpy(adv_instance->adv_data, adv_data, adv_data_len); + hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data, + scan_rsp_len, scan_rsp_data); - if (scan_rsp_len) - memcpy(adv_instance->scan_rsp_data, - scan_rsp_data, scan_rsp_len); - - adv_instance->timeout = timeout; - adv_instance->remaining_time = timeout; + adv->timeout = timeout; + adv->remaining_time = timeout; if (duration == 0) - adv_instance->duration = hdev->def_multi_adv_rotation_duration; + adv->duration = hdev->def_multi_adv_rotation_duration; else - adv_instance->duration = duration; + adv->duration = duration; - INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb, - adv_instance_rpa_expired); + INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired); BT_DBG("%s for %dMR", hdev->name, instance); - return 0; + return adv; +} + +/* This function requires the caller holds hdev->lock */ +struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance, + u32 flags, u8 data_len, u8 *data, + u32 min_interval, u32 max_interval) +{ + struct adv_info *adv; + + adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL, + 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE, + min_interval, max_interval); + if (IS_ERR(adv)) + return adv; + + adv->periodic = true; + adv->per_adv_data_len = data_len; + + if (data) + memcpy(adv->per_adv_data, data, data_len); + + return adv; } /* This function requires the caller holds hdev->lock */ @@ -1755,29 +1780,33 @@ int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance, u16 adv_data_len, u8 *adv_data, u16 scan_rsp_len, u8 *scan_rsp_data) { - struct adv_info *adv_instance; + struct adv_info *adv; - adv_instance = hci_find_adv_instance(hdev, instance); + adv = hci_find_adv_instance(hdev, instance); /* If advertisement doesn't exist, we can't modify its data */ - if (!adv_instance) + if (!adv) return -ENOENT; - if (adv_data_len) { - memset(adv_instance->adv_data, 0, - sizeof(adv_instance->adv_data)); - memcpy(adv_instance->adv_data, adv_data, adv_data_len); - adv_instance->adv_data_len = adv_data_len; + if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) { + memset(adv->adv_data, 0, sizeof(adv->adv_data)); + memcpy(adv->adv_data, adv_data, adv_data_len); + adv->adv_data_len = adv_data_len; + adv->adv_data_changed = true; } - if (scan_rsp_len) { - memset(adv_instance->scan_rsp_data, 0, - sizeof(adv_instance->scan_rsp_data)); - memcpy(adv_instance->scan_rsp_data, - scan_rsp_data, scan_rsp_len); - adv_instance->scan_rsp_len = scan_rsp_len; + if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) { + memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data)); + memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len); + adv->scan_rsp_len = scan_rsp_len; + adv->scan_rsp_changed = true; } + /* Mark as changed if there are flags which would affect it */ + if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) || + adv->flags & MGMT_ADV_FLAG_LOCAL_NAME) + adv->scan_rsp_changed = true; + return 0; } @@ -1874,151 +1903,120 @@ void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) kfree(monitor); } -int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status) -{ - return mgmt_add_adv_patterns_monitor_complete(hdev, status); -} - -int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status) -{ - return mgmt_remove_adv_monitor_complete(hdev, status); -} - /* Assigns handle to a monitor, and if offloading is supported and power is on, * also attempts to forward the request to the controller. - * Returns true if request is forwarded (result is pending), false otherwise. - * This function requires the caller holds hdev->lock. + * This function requires the caller holds hci_req_sync_lock. */ -bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, - int *err) +int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) { int min, max, handle; + int status = 0; - *err = 0; + if (!monitor) + return -EINVAL; - if (!monitor) { - *err = -EINVAL; - return false; - } + hci_dev_lock(hdev); min = HCI_MIN_ADV_MONITOR_HANDLE; max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES; handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max, GFP_KERNEL); - if (handle < 0) { - *err = handle; - return false; - } + + hci_dev_unlock(hdev); + + if (handle < 0) + return handle; monitor->handle = handle; if (!hdev_is_powered(hdev)) - return false; + return status; switch (hci_get_adv_monitor_offload_ext(hdev)) { case HCI_ADV_MONITOR_EXT_NONE: - hci_update_passive_scan(hdev); - bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err); + bt_dev_dbg(hdev, "%s add monitor %d status %d", hdev->name, + monitor->handle, status); /* Message was not forwarded to controller - not an error */ - return false; + break; + case HCI_ADV_MONITOR_EXT_MSFT: - *err = msft_add_monitor_pattern(hdev, monitor); - bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name, - *err); + status = msft_add_monitor_pattern(hdev, monitor); + bt_dev_dbg(hdev, "%s add monitor %d msft status %d", hdev->name, + monitor->handle, status); break; } - return (*err == 0); + return status; } /* Attempts to tell the controller and free the monitor. If somehow the * controller doesn't have a corresponding handle, remove anyway. - * Returns true if request is forwarded (result is pending), false otherwise. - * This function requires the caller holds hdev->lock. + * This function requires the caller holds hci_req_sync_lock. */ -static bool hci_remove_adv_monitor(struct hci_dev *hdev, - struct adv_monitor *monitor, - u16 handle, int *err) +static int hci_remove_adv_monitor(struct hci_dev *hdev, + struct adv_monitor *monitor) { - *err = 0; + int status = 0; switch (hci_get_adv_monitor_offload_ext(hdev)) { case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */ + bt_dev_dbg(hdev, "%s remove monitor %d status %d", hdev->name, + monitor->handle, status); goto free_monitor; + case HCI_ADV_MONITOR_EXT_MSFT: - *err = msft_remove_monitor(hdev, monitor, handle); + status = msft_remove_monitor(hdev, monitor); + bt_dev_dbg(hdev, "%s remove monitor %d msft status %d", + hdev->name, monitor->handle, status); break; } /* In case no matching handle registered, just free the monitor */ - if (*err == -ENOENT) + if (status == -ENOENT) goto free_monitor; - return (*err == 0); + return status; free_monitor: - if (*err == -ENOENT) + if (status == -ENOENT) bt_dev_warn(hdev, "Removing monitor with no matching handle %d", monitor->handle); hci_free_adv_monitor(hdev, monitor); - *err = 0; - return false; + return status; } -/* Returns true if request is forwarded (result is pending), false otherwise. - * This function requires the caller holds hdev->lock. - */ -bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err) +/* This function requires the caller holds hci_req_sync_lock */ +int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle) { struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle); - bool pending; - - if (!monitor) { - *err = -EINVAL; - return false; - } - - pending = hci_remove_adv_monitor(hdev, monitor, handle, err); - if (!*err && !pending) - hci_update_passive_scan(hdev); - bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending", - hdev->name, handle, *err, pending ? "" : "not "); + if (!monitor) + return -EINVAL; - return pending; + return hci_remove_adv_monitor(hdev, monitor); } -/* Returns true if request is forwarded (result is pending), false otherwise. - * This function requires the caller holds hdev->lock. - */ -bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err) +/* This function requires the caller holds hci_req_sync_lock */ +int hci_remove_all_adv_monitor(struct hci_dev *hdev) { struct adv_monitor *monitor; int idr_next_id = 0; - bool pending = false; - bool update = false; - - *err = 0; + int status = 0; - while (!*err && !pending) { + while (1) { monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id); if (!monitor) break; - pending = hci_remove_adv_monitor(hdev, monitor, 0, err); + status = hci_remove_adv_monitor(hdev, monitor); + if (status) + return status; - if (!*err && !pending) - update = true; + idr_next_id++; } - if (update) - hci_update_passive_scan(hdev); - - bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending", - hdev->name, *err, pending ? "" : "not "); - - return pending; + return status; } /* This function requires the caller holds hdev->lock */ @@ -2640,12 +2638,9 @@ int hci_register_dev(struct hci_dev *hdev) hci_sock_dev_event(hdev, HCI_DEV_REG); hci_dev_hold(hdev); - if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) { - hdev->suspend_notifier.notifier_call = hci_suspend_notifier; - error = register_pm_notifier(&hdev->suspend_notifier); - if (error) - goto err_wqueue; - } + error = hci_register_suspend_notifier(hdev); + if (error) + goto err_wqueue; queue_work(hdev->req_workqueue, &hdev->power_on); @@ -2680,8 +2675,7 @@ void hci_unregister_dev(struct hci_dev *hdev) hci_cmd_sync_clear(hdev); - if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) - unregister_pm_notifier(&hdev->suspend_notifier); + hci_unregister_suspend_notifier(hdev); msft_unregister(hdev); @@ -2741,10 +2735,33 @@ void hci_release_dev(struct hci_dev *hdev) ida_simple_remove(&hci_index_ida, hdev->id); kfree_skb(hdev->sent_cmd); + kfree_skb(hdev->recv_event); kfree(hdev); } EXPORT_SYMBOL(hci_release_dev); +int hci_register_suspend_notifier(struct hci_dev *hdev) +{ + int ret = 0; + + if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) { + hdev->suspend_notifier.notifier_call = hci_suspend_notifier; + ret = register_pm_notifier(&hdev->suspend_notifier); + } + + return ret; +} + +int hci_unregister_suspend_notifier(struct hci_dev *hdev) +{ + int ret = 0; + + if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) + ret = unregister_pm_notifier(&hdev->suspend_notifier); + + return ret; +} + /* Suspend HCI device */ int hci_suspend_dev(struct hci_dev *hdev) { @@ -3025,6 +3042,37 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; } +/* Get data from last received event */ +void *hci_recv_event_data(struct hci_dev *hdev, __u8 event) +{ + struct hci_event_hdr *hdr; + int offset; + + if (!hdev->recv_event) + return NULL; + + hdr = (void *)hdev->recv_event->data; + offset = sizeof(*hdr); + + if (hdr->evt != event) { + /* In case of LE metaevent check the subevent match */ + if (hdr->evt == HCI_EV_LE_META) { + struct hci_ev_le_meta *ev; + + ev = (void *)hdev->recv_event->data + offset; + offset += sizeof(*ev); + if (ev->subevent == event) + goto found; + } + return NULL; + } + +found: + bt_dev_dbg(hdev, "event 0x%2.2x", event); + + return hdev->recv_event->data + offset; +} + /* Send ACL data */ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) { @@ -3132,9 +3180,117 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) queue_work(hdev->workqueue, &hdev->tx_work); } +/* Send ISO data */ +static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags) +{ + struct hci_iso_hdr *hdr; + int len = skb->len; + + skb_push(skb, HCI_ISO_HDR_SIZE); + skb_reset_transport_header(skb); + hdr = (struct hci_iso_hdr *)skb_transport_header(skb); + hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); + hdr->dlen = cpu_to_le16(len); +} + +static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue, + struct sk_buff *skb) +{ + struct hci_dev *hdev = conn->hdev; + struct sk_buff *list; + __u16 flags; + + skb->len = skb_headlen(skb); + skb->data_len = 0; + + hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; + + list = skb_shinfo(skb)->frag_list; + + flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00); + hci_add_iso_hdr(skb, conn->handle, flags); + + if (!list) { + /* Non fragmented */ + BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); + + skb_queue_tail(queue, skb); + } else { + /* Fragmented */ + BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); + + skb_shinfo(skb)->frag_list = NULL; + + __skb_queue_tail(queue, skb); + + do { + skb = list; list = list->next; + + hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; + flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END, + 0x00); + hci_add_iso_hdr(skb, conn->handle, flags); + + BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); + + __skb_queue_tail(queue, skb); + } while (list); + } +} + +void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb) +{ + struct hci_dev *hdev = conn->hdev; + + BT_DBG("%s len %d", hdev->name, skb->len); + + hci_queue_iso(conn, &conn->data_q, skb); + + queue_work(hdev->workqueue, &hdev->tx_work); +} + /* ---- HCI TX task (outgoing data) ---- */ /* HCI Connection scheduler */ +static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote) +{ + struct hci_dev *hdev; + int cnt, q; + + if (!conn) { + *quote = 0; + return; + } + + hdev = conn->hdev; + + switch (conn->type) { + case ACL_LINK: + cnt = hdev->acl_cnt; + break; + case AMP_LINK: + cnt = hdev->block_cnt; + break; + case SCO_LINK: + case ESCO_LINK: + cnt = hdev->sco_cnt; + break; + case LE_LINK: + cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; + break; + case ISO_LINK: + cnt = hdev->iso_mtu ? hdev->iso_cnt : + hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; + break; + default: + cnt = 0; + bt_dev_err(hdev, "unknown link type %d", conn->type); + } + + q = cnt / num; + *quote = q ? q : 1; +} + static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) { @@ -3167,29 +3323,7 @@ static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, rcu_read_unlock(); - if (conn) { - int cnt, q; - - switch (conn->type) { - case ACL_LINK: - cnt = hdev->acl_cnt; - break; - case SCO_LINK: - case ESCO_LINK: - cnt = hdev->sco_cnt; - break; - case LE_LINK: - cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; - break; - default: - cnt = 0; - bt_dev_err(hdev, "unknown link type %d", conn->type); - } - - q = cnt / num; - *quote = q ? q : 1; - } else - *quote = 0; + hci_quote_sent(conn, num, quote); BT_DBG("conn %p quote %d", conn, *quote); return conn; @@ -3223,7 +3357,7 @@ static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, struct hci_chan *chan = NULL; unsigned int num = 0, min = ~0, cur_prio = 0; struct hci_conn *conn; - int cnt, q, conn_num = 0; + int conn_num = 0; BT_DBG("%s", hdev->name); @@ -3273,27 +3407,8 @@ static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, if (!chan) return NULL; - switch (chan->conn->type) { - case ACL_LINK: - cnt = hdev->acl_cnt; - break; - case AMP_LINK: - cnt = hdev->block_cnt; - break; - case SCO_LINK: - case ESCO_LINK: - cnt = hdev->sco_cnt; - break; - case LE_LINK: - cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; - break; - default: - cnt = 0; - bt_dev_err(hdev, "unknown link type %d", chan->conn->type); - } + hci_quote_sent(chan->conn, num, quote); - q = cnt / num; - *quote = q ? q : 1; BT_DBG("chan %p quote %d", chan, *quote); return chan; } @@ -3582,18 +3697,46 @@ static void hci_sched_le(struct hci_dev *hdev) hci_prio_recalculate(hdev, LE_LINK); } +/* Schedule CIS */ +static void hci_sched_iso(struct hci_dev *hdev) +{ + struct hci_conn *conn; + struct sk_buff *skb; + int quote, *cnt; + + BT_DBG("%s", hdev->name); + + if (!hci_conn_num(hdev, ISO_LINK)) + return; + + cnt = hdev->iso_pkts ? &hdev->iso_cnt : + hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt; + while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, "e))) { + while (quote-- && (skb = skb_dequeue(&conn->data_q))) { + BT_DBG("skb %p len %d", skb, skb->len); + hci_send_frame(hdev, skb); + + conn->sent++; + if (conn->sent == ~0) + conn->sent = 0; + (*cnt)--; + } + } +} + static void hci_tx_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work); struct sk_buff *skb; - BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt, - hdev->sco_cnt, hdev->le_cnt); + BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt, + hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt); if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { /* Schedule queues and send stuff to HCI driver */ hci_sched_sco(hdev); hci_sched_esco(hdev); + hci_sched_iso(hdev); hci_sched_acl(hdev); hci_sched_le(hdev); } @@ -3676,6 +3819,43 @@ static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) kfree_skb(skb); } +static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_iso_hdr *hdr; + struct hci_conn *conn; + __u16 handle, flags; + + hdr = skb_pull_data(skb, sizeof(*hdr)); + if (!hdr) { + bt_dev_err(hdev, "ISO packet too small"); + goto drop; + } + + handle = __le16_to_cpu(hdr->handle); + flags = hci_flags(handle); + handle = hci_handle(handle); + + bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len, + handle, flags); + + hci_dev_lock(hdev); + conn = hci_conn_hash_lookup_handle(hdev, handle); + hci_dev_unlock(hdev); + + if (!conn) { + bt_dev_err(hdev, "ISO packet for unknown connection handle %d", + handle); + goto drop; + } + + /* Send to upper protocol */ + iso_recv(conn, skb, flags); + return; + +drop: + kfree_skb(skb); +} + static bool hci_req_is_complete(struct hci_dev *hdev) { struct sk_buff *skb; @@ -3781,7 +3961,14 @@ static void hci_rx_work(struct work_struct *work) BT_DBG("%s", hdev->name); - while ((skb = skb_dequeue(&hdev->rx_q))) { + /* The kcov_remote functions used for collecting packet parsing + * coverage information from this background thread and associate + * the coverage with the syscall's thread which originally injected + * the packet. This helps fuzzing the kernel. + */ + for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) { + kcov_remote_start_common(skb_get_kcov_handle(skb)); + /* Send copy to monitor */ hci_send_to_monitor(hdev, skb); @@ -3830,6 +4017,11 @@ static void hci_rx_work(struct work_struct *work) hci_scodata_packet(hdev, skb); break; + case HCI_ISODATA_PKT: + BT_DBG("%s ISO data packet", hdev->name); + hci_isodata_packet(hdev, skb); + break; + default: kfree_skb(skb); break; @@ -3864,7 +4056,8 @@ static void hci_cmd_work(struct work_struct *work) if (res < 0) __hci_cmd_sync_cancel(hdev, -res); - if (test_bit(HCI_RESET, &hdev->flags)) + if (test_bit(HCI_RESET, &hdev->flags) || + hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) cancel_delayed_work(&hdev->cmd_timer); else schedule_delayed_work(&hdev->cmd_timer, diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index af17dfb20e01..ea33dd0cd478 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -2741,7 +2741,7 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); if (conn->type == ACL_LINK) { - if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) + if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) hci_remove_link_key(hdev, &conn->dst); } @@ -3173,7 +3173,7 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data, hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, sizeof(cp), &cp); - hci_req_update_scan(hdev); + hci_update_scan(hdev); } /* Set packet type for incoming connection */ @@ -3368,10 +3368,10 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data, reason, mgmt_connected); if (conn->type == ACL_LINK) { - if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) + if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) hci_remove_link_key(hdev, &conn->dst); - hci_req_update_scan(hdev); + hci_update_scan(hdev); } params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); @@ -3768,12 +3768,182 @@ static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd) cancel_delayed_work(&hdev->ncmd_timer); atomic_set(&hdev->cmd_cnt, 1); } else { - schedule_delayed_work(&hdev->ncmd_timer, - HCI_NCMD_TIMEOUT); + if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) + schedule_delayed_work(&hdev->ncmd_timer, + HCI_NCMD_TIMEOUT); } } } +static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_le_read_buffer_size_v2 *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + hdev->le_mtu = __le16_to_cpu(rp->acl_mtu); + hdev->le_pkts = rp->acl_max_pkt; + hdev->iso_mtu = __le16_to_cpu(rp->iso_mtu); + hdev->iso_pkts = rp->iso_max_pkt; + + hdev->le_cnt = hdev->le_pkts; + hdev->iso_cnt = hdev->iso_pkts; + + BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu, + hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts); + + return rp->status; +} + +static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_le_set_cig_params *rp = data; + struct hci_conn *conn; + int i = 0; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + hci_dev_lock(hdev); + + if (rp->status) { + while ((conn = hci_conn_hash_lookup_cig(hdev, rp->cig_id))) { + conn->state = BT_CLOSED; + hci_connect_cfm(conn, rp->status); + hci_conn_del(conn); + } + goto unlock; + } + + rcu_read_lock(); + + list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { + if (conn->type != ISO_LINK || conn->iso_qos.cig != rp->cig_id || + conn->state == BT_CONNECTED) + continue; + + conn->handle = __le16_to_cpu(rp->handle[i++]); + + bt_dev_dbg(hdev, "%p handle 0x%4.4x link %p", conn, + conn->handle, conn->link); + + /* Create CIS if LE is already connected */ + if (conn->link && conn->link->state == BT_CONNECTED) + hci_le_create_cis(conn->link); + + if (i == rp->num_handles) + break; + } + + rcu_read_unlock(); + +unlock: + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_le_setup_iso_path *rp = data; + struct hci_cp_le_setup_iso_path *cp; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH); + if (!cp) + return rp->status; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); + if (!conn) + goto unlock; + + if (rp->status) { + hci_connect_cfm(conn, rp->status); + hci_conn_del(conn); + goto unlock; + } + + switch (cp->direction) { + /* Input (Host to Controller) */ + case 0x00: + /* Only confirm connection if output only */ + if (conn->iso_qos.out.sdu && !conn->iso_qos.in.sdu) + hci_connect_cfm(conn, rp->status); + break; + /* Output (Controller to Host) */ + case 0x01: + /* Confirm connection since conn->iso_qos is always configured + * last. + */ + hci_connect_cfm(conn, rp->status); + break; + } + +unlock: + hci_dev_unlock(hdev); + return rp->status; +} + +static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status) +{ + bt_dev_dbg(hdev, "status 0x%2.2x", status); +} + +static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + struct hci_cp_le_set_per_adv_params *cp; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS); + if (!cp) + return rp->status; + + /* TODO: set the conn state */ + return rp->status; +} + +static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + __u8 *sent; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE); + if (!sent) + return rp->status; + + hci_dev_lock(hdev); + + if (*sent) + hci_dev_set_flag(hdev, HCI_LE_PER_ADV); + else + hci_dev_clear_flag(hdev, HCI_LE_PER_ADV); + + hci_dev_unlock(hdev); + + return rp->status; +} + #define HCI_CC_VL(_op, _func, _min, _max) \ { \ .op = _op, \ @@ -3947,9 +4117,18 @@ static const struct hci_cc { hci_cc_le_set_adv_set_random_addr), HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set), HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets), + HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param), + HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE, + hci_cc_le_set_per_adv_enable), HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power, sizeof(struct hci_rp_le_read_transmit_power)), - HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode) + HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode), + HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2, + sizeof(struct hci_rp_le_read_buffer_size_v2)), + HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params, + sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE), + HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path, + sizeof(struct hci_rp_le_setup_iso_path)), }; static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc, @@ -4012,6 +4191,40 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data, queue_work(hdev->workqueue, &hdev->cmd_work); } +static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status) +{ + struct hci_cp_le_create_cis *cp; + int i; + + bt_dev_dbg(hdev, "status 0x%2.2x", status); + + if (!status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS); + if (!cp) + return; + + hci_dev_lock(hdev); + + /* Remove connection if command failed */ + for (i = 0; cp->num_cis; cp->num_cis--, i++) { + struct hci_conn *conn; + u16 handle; + + handle = __le16_to_cpu(cp->cis[i].cis_handle); + + conn = hci_conn_hash_lookup_handle(hdev, handle); + if (conn) { + conn->state = BT_CLOSED; + hci_connect_cfm(conn, status); + hci_conn_del(conn); + } + } + + hci_dev_unlock(hdev); +} + #define HCI_CS(_op, _func) \ { \ .op = _op, \ @@ -4041,7 +4254,9 @@ static const struct hci_cs { HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn), HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features), HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc), - HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn) + HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn), + HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis), + HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big), }; static void hci_cmd_status_evt(struct hci_dev *hdev, void *data, @@ -4177,6 +4392,22 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data, hdev->sco_cnt = hdev->sco_pkts; break; + case ISO_LINK: + if (hdev->iso_pkts) { + hdev->iso_cnt += count; + if (hdev->iso_cnt > hdev->iso_pkts) + hdev->iso_cnt = hdev->iso_pkts; + } else if (hdev->le_pkts) { + hdev->le_cnt += count; + if (hdev->le_cnt > hdev->le_pkts) + hdev->le_cnt = hdev->le_pkts; + } else { + hdev->acl_cnt += count; + if (hdev->acl_cnt > hdev->acl_pkts) + hdev->acl_cnt = hdev->acl_pkts; + } + break; + default: bt_dev_err(hdev, "unknown type %d conn %p", conn->type, conn); @@ -6249,6 +6480,39 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data, hci_dev_unlock(hdev); } +static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle) +{ + struct hci_cp_le_pa_term_sync cp; + + memset(&cp, 0, sizeof(cp)); + cp.handle = handle; + + return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp); +} + +static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_le_pa_sync_established *ev = data; + int mask = hdev->link_mode; + __u8 flags = 0; + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + if (ev->status) + return; + + hci_dev_lock(hdev); + + hci_dev_clear_flag(hdev, HCI_PA_SYNC); + + mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags); + if (!(mask & HCI_LM_ACCEPT)) + hci_le_pa_term_sync(hdev, ev->handle); + + hci_dev_unlock(hdev); +} + static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { @@ -6479,6 +6743,226 @@ unlock: hci_dev_unlock(hdev); } +static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_evt_le_cis_established *ev = data; + struct hci_conn *conn; + u16 handle = __le16_to_cpu(ev->handle); + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, handle); + if (!conn) { + bt_dev_err(hdev, + "Unable to find connection with handle 0x%4.4x", + handle); + goto unlock; + } + + if (conn->role == HCI_ROLE_SLAVE) { + __le32 interval; + + memset(&interval, 0, sizeof(interval)); + + memcpy(&interval, ev->c_latency, sizeof(ev->c_latency)); + conn->iso_qos.in.interval = le32_to_cpu(interval); + memcpy(&interval, ev->p_latency, sizeof(ev->p_latency)); + conn->iso_qos.out.interval = le32_to_cpu(interval); + conn->iso_qos.in.latency = le16_to_cpu(ev->interval); + conn->iso_qos.out.latency = le16_to_cpu(ev->interval); + conn->iso_qos.in.sdu = le16_to_cpu(ev->c_mtu); + conn->iso_qos.out.sdu = le16_to_cpu(ev->p_mtu); + conn->iso_qos.in.phy = ev->c_phy; + conn->iso_qos.out.phy = ev->p_phy; + } + + if (!ev->status) { + conn->state = BT_CONNECTED; + hci_debugfs_create_conn(conn); + hci_conn_add_sysfs(conn); + hci_iso_setup_path(conn); + goto unlock; + } + + hci_connect_cfm(conn, ev->status); + hci_conn_del(conn); + +unlock: + hci_dev_unlock(hdev); +} + +static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle) +{ + struct hci_cp_le_reject_cis cp; + + memset(&cp, 0, sizeof(cp)); + cp.handle = handle; + cp.reason = HCI_ERROR_REJ_BAD_ADDR; + hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp); +} + +static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle) +{ + struct hci_cp_le_accept_cis cp; + + memset(&cp, 0, sizeof(cp)); + cp.handle = handle; + hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp); +} + +static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_evt_le_cis_req *ev = data; + u16 acl_handle, cis_handle; + struct hci_conn *acl, *cis; + int mask; + __u8 flags = 0; + + acl_handle = __le16_to_cpu(ev->acl_handle); + cis_handle = __le16_to_cpu(ev->cis_handle); + + bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x", + acl_handle, cis_handle, ev->cig_id, ev->cis_id); + + hci_dev_lock(hdev); + + acl = hci_conn_hash_lookup_handle(hdev, acl_handle); + if (!acl) + goto unlock; + + mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags); + if (!(mask & HCI_LM_ACCEPT)) { + hci_le_reject_cis(hdev, ev->cis_handle); + goto unlock; + } + + cis = hci_conn_hash_lookup_handle(hdev, cis_handle); + if (!cis) { + cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE); + if (!cis) { + hci_le_reject_cis(hdev, ev->cis_handle); + goto unlock; + } + cis->handle = cis_handle; + } + + cis->iso_qos.cig = ev->cig_id; + cis->iso_qos.cis = ev->cis_id; + + if (!(flags & HCI_PROTO_DEFER)) { + hci_le_accept_cis(hdev, ev->cis_handle); + } else { + cis->state = BT_CONNECT2; + hci_connect_cfm(cis, 0); + } + +unlock: + hci_dev_unlock(hdev); +} + +static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_evt_le_create_big_complete *ev = data; + struct hci_conn *conn; + + BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); + + if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE, + flex_array_size(ev, bis_handle, ev->num_bis))) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_big(hdev, ev->handle); + if (!conn) + goto unlock; + + if (ev->num_bis) + conn->handle = __le16_to_cpu(ev->bis_handle[0]); + + if (!ev->status) { + conn->state = BT_CONNECTED; + hci_debugfs_create_conn(conn); + hci_conn_add_sysfs(conn); + hci_iso_setup_path(conn); + goto unlock; + } + + hci_connect_cfm(conn, ev->status); + hci_conn_del(conn); + +unlock: + hci_dev_unlock(hdev); +} + +static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_evt_le_big_sync_estabilished *ev = data; + struct hci_conn *bis; + int i; + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED, + flex_array_size(ev, bis, ev->num_bis))) + return; + + if (ev->status) + return; + + hci_dev_lock(hdev); + + for (i = 0; i < ev->num_bis; i++) { + u16 handle = le16_to_cpu(ev->bis[i]); + __le32 interval; + + bis = hci_conn_hash_lookup_handle(hdev, handle); + if (!bis) { + bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY, + HCI_ROLE_SLAVE); + if (!bis) + continue; + bis->handle = handle; + } + + bis->iso_qos.big = ev->handle; + memset(&interval, 0, sizeof(interval)); + memcpy(&interval, ev->latency, sizeof(ev->latency)); + bis->iso_qos.in.interval = le32_to_cpu(interval); + /* Convert ISO Interval (1.25 ms slots) to latency (ms) */ + bis->iso_qos.in.latency = le16_to_cpu(ev->interval) * 125 / 100; + bis->iso_qos.in.sdu = le16_to_cpu(ev->max_pdu); + + hci_connect_cfm(bis, ev->status); + } + + hci_dev_unlock(hdev); +} + +static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_evt_le_big_info_adv_report *ev = data; + int mask = hdev->link_mode; + __u8 flags = 0; + + bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle)); + + hci_dev_lock(hdev); + + mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags); + if (!(mask & HCI_LM_ACCEPT)) + hci_le_pa_term_sync(hdev, ev->sync_handle); + + hci_dev_unlock(hdev); +} + #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \ [_op] = { \ .func = _func, \ @@ -6539,9 +7023,34 @@ static const struct hci_le_ev { HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt, sizeof(struct hci_ev_le_ext_adv_report), HCI_MAX_EVENT_SIZE), + /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */ + HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED, + hci_le_pa_sync_estabilished_evt, + sizeof(struct hci_ev_le_pa_sync_established)), /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */ HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt, sizeof(struct hci_evt_le_ext_adv_set_term)), + /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */ + HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt, + sizeof(struct hci_evt_le_cis_established)), + /* [0x1a = HCI_EVT_LE_CIS_REQ] */ + HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt, + sizeof(struct hci_evt_le_cis_req)), + /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */ + HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE, + hci_le_create_big_complete_evt, + sizeof(struct hci_evt_le_create_big_complete), + HCI_MAX_EVENT_SIZE), + /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */ + HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED, + hci_le_big_sync_established_evt, + sizeof(struct hci_evt_le_big_sync_estabilished), + HCI_MAX_EVENT_SIZE), + /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */ + HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT, + hci_le_big_info_adv_report_evt, + sizeof(struct hci_evt_le_big_info_adv_report), + HCI_MAX_EVENT_SIZE), }; static void hci_le_meta_evt(struct hci_dev *hdev, void *data, @@ -6580,7 +7089,6 @@ static void hci_le_meta_evt(struct hci_dev *hdev, void *data, if (skb->len > subev->max_len) bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u", ev->subevent, skb->len, subev->max_len); - data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len); if (!data) return; @@ -6935,6 +7443,9 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) goto done; } + kfree_skb(hdev->recv_event); + hdev->recv_event = skb_clone(skb, GFP_KERNEL); + event = hdr->evt; if (!event) { bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x", diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 38ecaf9264ee..e64d558e5d69 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -827,7 +827,6 @@ void __hci_req_disable_advertising(struct hci_request *req) { if (ext_adv_capable(req->hdev)) { __hci_req_disable_ext_adv_instance(req, 0x00); - } else { u8 enable = 0x00; @@ -1338,15 +1337,15 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) bdaddr_t random_addr; u8 own_addr_type; int err; - struct adv_info *adv_instance; - bool secondary_adv; + struct adv_info *adv; + bool secondary_adv, require_privacy; if (instance > 0) { - adv_instance = hci_find_adv_instance(hdev, instance); - if (!adv_instance) + adv = hci_find_adv_instance(hdev, instance); + if (!adv) return -EINVAL; } else { - adv_instance = NULL; + adv = NULL; } flags = hci_adv_instance_flags(hdev, instance); @@ -1364,18 +1363,24 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) * advertising is used. In that case it is fine to use a * non-resolvable private address. */ - err = hci_get_random_address(hdev, !connectable, - adv_use_rpa(hdev, flags), adv_instance, + require_privacy = !connectable; + + /* Don't require privacy for periodic adv? */ + if (adv && adv->periodic) + require_privacy = false; + + err = hci_get_random_address(hdev, require_privacy, + adv_use_rpa(hdev, flags), adv, &own_addr_type, &random_addr); if (err < 0) return err; memset(&cp, 0, sizeof(cp)); - if (adv_instance) { - hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval); - hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval); - cp.tx_power = adv_instance->tx_power; + if (adv) { + hci_cpu_to_le24(adv->min_interval, cp.min_interval); + hci_cpu_to_le24(adv->max_interval, cp.max_interval); + cp.tx_power = adv->tx_power; } else { hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval); hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval); @@ -1396,7 +1401,8 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) else cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND); } else { - if (secondary_adv) + /* Secondary and periodic cannot use legacy PDUs */ + if (secondary_adv || (adv && adv->periodic)) cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND); else cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND); @@ -1426,8 +1432,8 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) struct hci_cp_le_set_adv_set_rand_addr cp; /* Check if random address need to be updated */ - if (adv_instance) { - if (!bacmp(&random_addr, &adv_instance->random_addr)) + if (adv) { + if (!bacmp(&random_addr, &adv->random_addr)) return 0; } else { if (!bacmp(&random_addr, &hdev->random_addr)) @@ -1835,21 +1841,6 @@ void __hci_req_update_scan(struct hci_request *req) hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); } -static int update_scan(struct hci_request *req, unsigned long opt) -{ - hci_dev_lock(req->hdev); - __hci_req_update_scan(req); - hci_dev_unlock(req->hdev); - return 0; -} - -static void scan_update_work(struct work_struct *work) -{ - struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update); - - hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL); -} - static u8 get_service_classes(struct hci_dev *hdev) { struct bt_uuid *uuid; @@ -1890,69 +1881,6 @@ void __hci_req_update_class(struct hci_request *req) hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod); } -static void write_iac(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - struct hci_cp_write_current_iac_lap cp; - - if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) - return; - - if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { - /* Limited discoverable mode */ - cp.num_iac = min_t(u8, hdev->num_iac, 2); - cp.iac_lap[0] = 0x00; /* LIAC */ - cp.iac_lap[1] = 0x8b; - cp.iac_lap[2] = 0x9e; - cp.iac_lap[3] = 0x33; /* GIAC */ - cp.iac_lap[4] = 0x8b; - cp.iac_lap[5] = 0x9e; - } else { - /* General discoverable mode */ - cp.num_iac = 1; - cp.iac_lap[0] = 0x33; /* GIAC */ - cp.iac_lap[1] = 0x8b; - cp.iac_lap[2] = 0x9e; - } - - hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP, - (cp.num_iac * 3) + 1, &cp); -} - -static int discoverable_update(struct hci_request *req, unsigned long opt) -{ - struct hci_dev *hdev = req->hdev; - - hci_dev_lock(hdev); - - if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { - write_iac(req); - __hci_req_update_scan(req); - __hci_req_update_class(req); - } - - /* Advertising instances don't use the global discoverable setting, so - * only update AD if advertising was enabled using Set Advertising. - */ - if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) { - __hci_req_update_adv_data(req, 0x00); - - /* Discoverable mode affects the local advertising - * address in limited privacy mode. - */ - if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) { - if (ext_adv_capable(hdev)) - __hci_req_start_ext_adv(req, 0x00); - else - __hci_req_enable_advertising(req); - } - } - - hci_dev_unlock(hdev); - - return 0; -} - void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn, u8 reason) { @@ -2227,146 +2155,6 @@ unlock: hci_dev_unlock(hdev); } -static int active_scan(struct hci_request *req, unsigned long opt) -{ - uint16_t interval = opt; - struct hci_dev *hdev = req->hdev; - u8 own_addr_type; - /* Accept list is not used for discovery */ - u8 filter_policy = 0x00; - /* Default is to enable duplicates filter */ - u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE; - /* Discovery doesn't require controller address resolution */ - bool addr_resolv = false; - int err; - - bt_dev_dbg(hdev, ""); - - /* If controller is scanning, it means the background scanning is - * running. Thus, we should temporarily stop it in order to set the - * discovery scanning parameters. - */ - if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { - hci_req_add_le_scan_disable(req, false); - cancel_interleave_scan(hdev); - } - - /* All active scans will be done with either a resolvable private - * address (when privacy feature has been enabled) or non-resolvable - * private address. - */ - err = hci_update_random_address(req, true, scan_use_rpa(hdev), - &own_addr_type); - if (err < 0) - own_addr_type = ADDR_LE_DEV_PUBLIC; - - hci_dev_lock(hdev); - if (hci_is_adv_monitoring(hdev)) { - /* Duplicate filter should be disabled when some advertisement - * monitor is activated, otherwise AdvMon can only receive one - * advertisement for one peer(*) during active scanning, and - * might report loss to these peers. - * - * Note that different controllers have different meanings of - * |duplicate|. Some of them consider packets with the same - * address as duplicate, and others consider packets with the - * same address and the same RSSI as duplicate. Although in the - * latter case we don't need to disable duplicate filter, but - * it is common to have active scanning for a short period of - * time, the power impact should be neglectable. - */ - filter_dup = LE_SCAN_FILTER_DUP_DISABLE; - } - hci_dev_unlock(hdev); - - hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, - hdev->le_scan_window_discovery, own_addr_type, - filter_policy, filter_dup, addr_resolv); - return 0; -} - -static int interleaved_discov(struct hci_request *req, unsigned long opt) -{ - int err; - - bt_dev_dbg(req->hdev, ""); - - err = active_scan(req, opt); - if (err) - return err; - - return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN); -} - -static void start_discovery(struct hci_dev *hdev, u8 *status) -{ - unsigned long timeout; - - bt_dev_dbg(hdev, "type %u", hdev->discovery.type); - - switch (hdev->discovery.type) { - case DISCOV_TYPE_BREDR: - if (!hci_dev_test_flag(hdev, HCI_INQUIRY)) - hci_req_sync(hdev, bredr_inquiry, - DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT, - status); - return; - case DISCOV_TYPE_INTERLEAVED: - /* When running simultaneous discovery, the LE scanning time - * should occupy the whole discovery time sine BR/EDR inquiry - * and LE scanning are scheduled by the controller. - * - * For interleaving discovery in comparison, BR/EDR inquiry - * and LE scanning are done sequentially with separate - * timeouts. - */ - if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, - &hdev->quirks)) { - timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); - /* During simultaneous discovery, we double LE scan - * interval. We must leave some time for the controller - * to do BR/EDR inquiry. - */ - hci_req_sync(hdev, interleaved_discov, - hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT, - status); - break; - } - - timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout); - hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery, - HCI_CMD_TIMEOUT, status); - break; - case DISCOV_TYPE_LE: - timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); - hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery, - HCI_CMD_TIMEOUT, status); - break; - default: - *status = HCI_ERROR_UNSPECIFIED; - return; - } - - if (*status) - return; - - bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout)); - - /* When service discovery is used and the controller has a - * strict duplicate filter, it is important to remember the - * start and duration of the scan. This is required for - * restarting scanning during the discovery phase. - */ - if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) && - hdev->discovery.result_filtering) { - hdev->discovery.scan_start = jiffies; - hdev->discovery.scan_duration = timeout; - } - - queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable, - timeout); -} - bool hci_req_stop_discovery(struct hci_request *req) { struct hci_dev *hdev = req->hdev; @@ -2462,180 +2250,8 @@ error: return err; } -static int stop_discovery(struct hci_request *req, unsigned long opt) -{ - hci_dev_lock(req->hdev); - hci_req_stop_discovery(req); - hci_dev_unlock(req->hdev); - - return 0; -} - -static void discov_update(struct work_struct *work) -{ - struct hci_dev *hdev = container_of(work, struct hci_dev, - discov_update); - u8 status = 0; - - switch (hdev->discovery.state) { - case DISCOVERY_STARTING: - start_discovery(hdev, &status); - mgmt_start_discovery_complete(hdev, status); - if (status) - hci_discovery_set_state(hdev, DISCOVERY_STOPPED); - else - hci_discovery_set_state(hdev, DISCOVERY_FINDING); - break; - case DISCOVERY_STOPPING: - hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status); - mgmt_stop_discovery_complete(hdev, status); - if (!status) - hci_discovery_set_state(hdev, DISCOVERY_STOPPED); - break; - case DISCOVERY_STOPPED: - default: - return; - } -} - -static void discov_off(struct work_struct *work) -{ - struct hci_dev *hdev = container_of(work, struct hci_dev, - discov_off.work); - - bt_dev_dbg(hdev, ""); - - hci_dev_lock(hdev); - - /* When discoverable timeout triggers, then just make sure - * the limited discoverable flag is cleared. Even in the case - * of a timeout triggered from general discoverable, it is - * safe to unconditionally clear the flag. - */ - hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); - hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); - hdev->discov_timeout = 0; - - hci_dev_unlock(hdev); - - hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL); - mgmt_new_settings(hdev); -} - -static int powered_update_hci(struct hci_request *req, unsigned long opt) -{ - struct hci_dev *hdev = req->hdev; - u8 link_sec; - - hci_dev_lock(hdev); - - if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) && - !lmp_host_ssp_capable(hdev)) { - u8 mode = 0x01; - - hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode); - - if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) { - u8 support = 0x01; - - hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT, - sizeof(support), &support); - } - } - - if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) && - lmp_bredr_capable(hdev)) { - struct hci_cp_write_le_host_supported cp; - - cp.le = 0x01; - cp.simul = 0x00; - - /* Check first if we already have the right - * host state (host features set) - */ - if (cp.le != lmp_host_le_capable(hdev) || - cp.simul != lmp_host_le_br_capable(hdev)) - hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, - sizeof(cp), &cp); - } - - if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { - /* Make sure the controller has a good default for - * advertising data. This also applies to the case - * where BR/EDR was toggled during the AUTO_OFF phase. - */ - if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || - list_empty(&hdev->adv_instances)) { - int err; - - if (ext_adv_capable(hdev)) { - err = __hci_req_setup_ext_adv_instance(req, - 0x00); - if (!err) - __hci_req_update_scan_rsp_data(req, - 0x00); - } else { - err = 0; - __hci_req_update_adv_data(req, 0x00); - __hci_req_update_scan_rsp_data(req, 0x00); - } - - if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) { - if (!ext_adv_capable(hdev)) - __hci_req_enable_advertising(req); - else if (!err) - __hci_req_enable_ext_advertising(req, - 0x00); - } - } else if (!list_empty(&hdev->adv_instances)) { - struct adv_info *adv_instance; - - adv_instance = list_first_entry(&hdev->adv_instances, - struct adv_info, list); - __hci_req_schedule_adv_instance(req, - adv_instance->instance, - true); - } - } - - link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY); - if (link_sec != test_bit(HCI_AUTH, &hdev->flags)) - hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, - sizeof(link_sec), &link_sec); - - if (lmp_bredr_capable(hdev)) { - if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) - __hci_req_write_fast_connectable(req, true); - else - __hci_req_write_fast_connectable(req, false); - __hci_req_update_scan(req); - __hci_req_update_class(req); - __hci_req_update_name(req); - __hci_req_update_eir(req); - } - - hci_dev_unlock(hdev); - return 0; -} - -int __hci_req_hci_power_on(struct hci_dev *hdev) -{ - /* Register the available SMP channels (BR/EDR and LE) only when - * successfully powering on the controller. This late - * registration is required so that LE SMP can clearly decide if - * the public address or static address is used. - */ - smp_register(hdev); - - return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT, - NULL); -} - void hci_request_setup(struct hci_dev *hdev) { - INIT_WORK(&hdev->discov_update, discov_update); - INIT_WORK(&hdev->scan_update, scan_update_work); - INIT_DELAYED_WORK(&hdev->discov_off, discov_off); INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work); INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work); INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire); @@ -2646,9 +2262,6 @@ void hci_request_cancel_all(struct hci_dev *hdev) { __hci_cmd_sync_cancel(hdev, ENODEV); - cancel_work_sync(&hdev->discov_update); - cancel_work_sync(&hdev->scan_update); - cancel_delayed_work_sync(&hdev->discov_off); cancel_delayed_work_sync(&hdev->le_scan_disable); cancel_delayed_work_sync(&hdev->le_scan_restart); diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h index 7f8df258e295..39d001fa3acf 100644 --- a/net/bluetooth/hci_request.h +++ b/net/bluetooth/hci_request.h @@ -68,8 +68,6 @@ int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param); -int __hci_req_hci_power_on(struct hci_dev *hdev); - void __hci_req_write_fast_connectable(struct hci_request *req, bool enable); void __hci_req_update_name(struct hci_request *req); void __hci_req_update_eir(struct hci_request *req); @@ -85,6 +83,9 @@ void __hci_req_enable_advertising(struct hci_request *req); void __hci_req_disable_advertising(struct hci_request *req); void __hci_req_update_adv_data(struct hci_request *req, u8 instance); int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance); +int hci_req_start_per_adv(struct hci_dev *hdev, u8 instance, u32 flags, + u16 min_interval, u16 max_interval, + u16 sync_interval); void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance); int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance, @@ -94,8 +95,14 @@ void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk, bool force); int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance); +int __hci_req_setup_per_adv_instance(struct hci_request *req, u8 instance, + u16 min_interval, u16 max_interval); int __hci_req_start_ext_adv(struct hci_request *req, u8 instance); +int __hci_req_start_per_adv(struct hci_request *req, u8 instance, u32 flags, + u16 min_interval, u16 max_interval, + u16 sync_interval); int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance); +int __hci_req_enable_per_advertising(struct hci_request *req, u8 instance); int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance); int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance); void __hci_req_clear_ext_adv_sets(struct hci_request *req); @@ -110,11 +117,6 @@ bool hci_req_stop_discovery(struct hci_request *req); int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec); -static inline void hci_req_update_scan(struct hci_dev *hdev) -{ - queue_work(hdev->req_workqueue, &hdev->scan_update); -} - void __hci_req_update_scan(struct hci_request *req); int hci_update_random_address(struct hci_request *req, bool require_privacy, diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 189e3115c8c6..0d015d4a8e41 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -869,7 +869,8 @@ static int hci_sock_release(struct socket *sock) hdev = hci_pi(sk)->hdev; if (hdev) { - if (hci_pi(sk)->channel == HCI_CHANNEL_USER) { + if (hci_pi(sk)->channel == HCI_CHANNEL_USER && + !hci_dev_test_flag(hdev, HCI_UNREGISTER)) { /* When releasing a user channel exclusive access, * call hci_dev_do_close directly instead of calling * hci_dev_close to ensure the exclusive access will @@ -878,9 +879,15 @@ static int hci_sock_release(struct socket *sock) * The checking of HCI_AUTO_OFF is not needed in this * case since it will have been cleared already when * opening the user channel. + * + * Make sure to also check that we haven't already + * unregistered since all the cleanup will have already + * been complete and hdev will get released when we put + * below. */ hci_dev_do_close(hdev); hci_dev_clear_flag(hdev, HCI_USER_CHANNEL); + hci_register_suspend_notifier(hdev); mgmt_index_added(hdev); } @@ -1209,6 +1216,7 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, } mgmt_index_removed(hdev); + hci_unregister_suspend_notifier(hdev); err = hci_dev_open(hdev->id); if (err) { @@ -1223,6 +1231,7 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, err = 0; } else { hci_dev_clear_flag(hdev, HCI_USER_CHANNEL); + hci_register_suspend_notifier(hdev); mgmt_index_added(hdev); hci_dev_put(hdev); goto done; diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index 1739e8cb3291..148ce629a59f 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -849,26 +849,38 @@ static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) u8 data[HCI_MAX_EXT_AD_LENGTH]; } pdu; u8 len; + struct adv_info *adv = NULL; + int err; memset(&pdu, 0, sizeof(pdu)); - len = eir_create_scan_rsp(hdev, instance, pdu.data); - - if (hdev->scan_rsp_data_len == len && - !memcmp(pdu.data, hdev->scan_rsp_data, len)) - return 0; + if (instance) { + adv = hci_find_adv_instance(hdev, instance); + if (!adv || !adv->scan_rsp_changed) + return 0; + } - memcpy(hdev->scan_rsp_data, pdu.data, len); - hdev->scan_rsp_data_len = len; + len = eir_create_scan_rsp(hdev, instance, pdu.data); pdu.cp.handle = instance; pdu.cp.length = len; pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; - return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, - sizeof(pdu.cp) + len, &pdu.cp, - HCI_CMD_TIMEOUT); + err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, + sizeof(pdu.cp) + len, &pdu.cp, + HCI_CMD_TIMEOUT); + if (err) + return err; + + if (adv) { + adv->scan_rsp_changed = false; + } else { + memcpy(hdev->scan_rsp_data, pdu.data, len); + hdev->scan_rsp_data_len = len; + } + + return 0; } static int __hci_set_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) @@ -965,6 +977,187 @@ int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance) return hci_enable_ext_advertising_sync(hdev, instance); } +static int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance) +{ + struct hci_cp_le_set_per_adv_enable cp; + + /* If periodic advertising already disabled there is nothing to do. */ + if (!hci_dev_test_flag(hdev, HCI_LE_PER_ADV)) + return 0; + + memset(&cp, 0, sizeof(cp)); + + cp.enable = 0x00; + cp.handle = instance; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_set_per_adv_params_sync(struct hci_dev *hdev, u8 instance, + u16 min_interval, u16 max_interval) +{ + struct hci_cp_le_set_per_adv_params cp; + + memset(&cp, 0, sizeof(cp)); + + if (!min_interval) + min_interval = DISCOV_LE_PER_ADV_INT_MIN; + + if (!max_interval) + max_interval = DISCOV_LE_PER_ADV_INT_MAX; + + cp.handle = instance; + cp.min_interval = cpu_to_le16(min_interval); + cp.max_interval = cpu_to_le16(max_interval); + cp.periodic_properties = 0x0000; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_set_per_adv_data_sync(struct hci_dev *hdev, u8 instance) +{ + struct { + struct hci_cp_le_set_per_adv_data cp; + u8 data[HCI_MAX_PER_AD_LENGTH]; + } pdu; + u8 len; + + memset(&pdu, 0, sizeof(pdu)); + + if (instance) { + struct adv_info *adv = hci_find_adv_instance(hdev, instance); + + if (!adv || !adv->periodic) + return 0; + } + + len = eir_create_per_adv_data(hdev, instance, pdu.data); + + pdu.cp.length = len; + pdu.cp.handle = instance; + pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_DATA, + sizeof(pdu.cp) + len, &pdu, + HCI_CMD_TIMEOUT); +} + +static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance) +{ + struct hci_cp_le_set_per_adv_enable cp; + + /* If periodic advertising already enabled there is nothing to do. */ + if (hci_dev_test_flag(hdev, HCI_LE_PER_ADV)) + return 0; + + memset(&cp, 0, sizeof(cp)); + + cp.enable = 0x01; + cp.handle = instance; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +/* Checks if periodic advertising data contains a Basic Announcement and if it + * does generates a Broadcast ID and add Broadcast Announcement. + */ +static int hci_adv_bcast_annoucement(struct hci_dev *hdev, struct adv_info *adv) +{ + u8 bid[3]; + u8 ad[4 + 3]; + + /* Skip if NULL adv as instance 0x00 is used for general purpose + * advertising so it cannot used for the likes of Broadcast Announcement + * as it can be overwritten at any point. + */ + if (!adv) + return 0; + + /* Check if PA data doesn't contains a Basic Audio Announcement then + * there is nothing to do. + */ + if (!eir_get_service_data(adv->per_adv_data, adv->per_adv_data_len, + 0x1851, NULL)) + return 0; + + /* Check if advertising data already has a Broadcast Announcement since + * the process may want to control the Broadcast ID directly and in that + * case the kernel shall no interfere. + */ + if (eir_get_service_data(adv->adv_data, adv->adv_data_len, 0x1852, + NULL)) + return 0; + + /* Generate Broadcast ID */ + get_random_bytes(bid, sizeof(bid)); + eir_append_service_data(ad, 0, 0x1852, bid, sizeof(bid)); + hci_set_adv_instance_data(hdev, adv->instance, sizeof(ad), ad, 0, NULL); + + return hci_update_adv_data_sync(hdev, adv->instance); +} + +int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len, + u8 *data, u32 flags, u16 min_interval, + u16 max_interval, u16 sync_interval) +{ + struct adv_info *adv = NULL; + int err; + bool added = false; + + hci_disable_per_advertising_sync(hdev, instance); + + if (instance) { + adv = hci_find_adv_instance(hdev, instance); + /* Create an instance if that could not be found */ + if (!adv) { + adv = hci_add_per_instance(hdev, instance, flags, + data_len, data, + sync_interval, + sync_interval); + if (IS_ERR(adv)) + return PTR_ERR(adv); + added = true; + } + } + + /* Only start advertising if instance 0 or if a dedicated instance has + * been added. + */ + if (!adv || added) { + err = hci_start_ext_adv_sync(hdev, instance); + if (err < 0) + goto fail; + + err = hci_adv_bcast_annoucement(hdev, adv); + if (err < 0) + goto fail; + } + + err = hci_set_per_adv_params_sync(hdev, instance, min_interval, + max_interval); + if (err < 0) + goto fail; + + err = hci_set_per_adv_data_sync(hdev, instance); + if (err < 0) + goto fail; + + err = hci_enable_per_advertising_sync(hdev, instance); + if (err < 0) + goto fail; + + return 0; + +fail: + if (added) + hci_remove_adv_instance(hdev, instance); + + return err; +} + static int hci_start_adv_sync(struct hci_dev *hdev, u8 instance) { int err; @@ -1104,6 +1297,42 @@ int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance, HCI_CMD_TIMEOUT, sk); } +static int remove_ext_adv_sync(struct hci_dev *hdev, void *data) +{ + struct adv_info *adv = data; + u8 instance = 0; + + if (adv) + instance = adv->instance; + + return hci_remove_ext_adv_instance_sync(hdev, instance, NULL); +} + +int hci_remove_ext_adv_instance(struct hci_dev *hdev, u8 instance) +{ + struct adv_info *adv = NULL; + + if (instance) { + adv = hci_find_adv_instance(hdev, instance); + if (!adv) + return -EINVAL; + } + + return hci_cmd_sync_queue(hdev, remove_ext_adv_sync, adv, NULL); +} + +int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason) +{ + struct hci_cp_le_term_big cp; + + memset(&cp, 0, sizeof(cp)); + cp.handle = handle; + cp.reason = reason; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_TERM_BIG, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + static void cancel_adv_timeout(struct hci_dev *hdev) { if (hdev->adv_instance_timeout) { @@ -1119,27 +1348,39 @@ static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance) u8 data[HCI_MAX_EXT_AD_LENGTH]; } pdu; u8 len; + struct adv_info *adv = NULL; + int err; memset(&pdu, 0, sizeof(pdu)); - len = eir_create_adv_data(hdev, instance, pdu.data); - - /* There's nothing to do if the data hasn't changed */ - if (hdev->adv_data_len == len && - memcmp(pdu.data, hdev->adv_data, len) == 0) - return 0; + if (instance) { + adv = hci_find_adv_instance(hdev, instance); + if (!adv || !adv->adv_data_changed) + return 0; + } - memcpy(hdev->adv_data, pdu.data, len); - hdev->adv_data_len = len; + len = eir_create_adv_data(hdev, instance, pdu.data); pdu.cp.length = len; pdu.cp.handle = instance; pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; - return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA, - sizeof(pdu.cp) + len, &pdu.cp, - HCI_CMD_TIMEOUT); + err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA, + sizeof(pdu.cp) + len, &pdu.cp, + HCI_CMD_TIMEOUT); + if (err) + return err; + + /* Update data if the command succeed */ + if (adv) { + adv->adv_data_changed = false; + } else { + memcpy(hdev->adv_data, pdu.data, len); + hdev->adv_data_len = len; + } + + return 0; } static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance) @@ -1612,6 +1853,9 @@ static int hci_le_add_resolve_list_sync(struct hci_dev *hdev, bacpy(&cp.bdaddr, ¶ms->addr); memcpy(cp.peer_irk, irk->val, 16); + /* Default privacy mode is always Network */ + params->privacy_mode = HCI_NETWORK_PRIVACY; + done: if (hci_dev_test_flag(hdev, HCI_PRIVACY)) memcpy(cp.local_irk, hdev->irk, 16); @@ -1865,12 +2109,15 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev) } /* Go through the current accept list programmed into the - * controller one by one and check if that address is still - * in the list of pending connections or list of devices to + * controller one by one and check if that address is connected or is + * still in the list of pending connections or list of devices to * report. If not present in either list, then remove it from * the controller. */ list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) { + if (hci_conn_hash_lookup_le(hdev, &b->bdaddr, b->bdaddr_type)) + continue; + pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns, &b->bdaddr, b->bdaddr_type); @@ -2171,7 +2418,8 @@ int hci_update_passive_scan_sync(struct hci_dev *hdev) if (list_empty(&hdev->pend_le_conns) && list_empty(&hdev->pend_le_reports) && - !hci_is_adv_monitoring(hdev)) { + !hci_is_adv_monitoring(hdev) && + !hci_dev_test_flag(hdev, HCI_PA_SYNC)) { /* If there is no pending LE connections or devices * to be scanned for or no ADV monitors, we should stop the * background scanning. @@ -2206,6 +2454,16 @@ int hci_update_passive_scan_sync(struct hci_dev *hdev) return err; } +static int update_scan_sync(struct hci_dev *hdev, void *data) +{ + return hci_update_scan_sync(hdev); +} + +int hci_update_scan(struct hci_dev *hdev) +{ + return hci_cmd_sync_queue(hdev, update_scan_sync, NULL, NULL); +} + static int update_passive_scan_sync(struct hci_dev *hdev, void *data) { return hci_update_passive_scan_sync(hdev); @@ -2760,6 +3018,12 @@ static const struct hci_init_stage amp_init2[] = { /* Read Buffer Size (ACL mtu, max pkt, etc.) */ static int hci_read_buffer_size_sync(struct hci_dev *hdev) { + /* Use Read LE Buffer Size V2 if supported */ + if (hdev->commands[41] & 0x20) + return __hci_cmd_sync_status(hdev, + HCI_OP_LE_READ_BUFFER_SIZE_V2, + 0, NULL, HCI_CMD_TIMEOUT); + return __hci_cmd_sync_status(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL, HCI_CMD_TIMEOUT); } @@ -3011,6 +3275,10 @@ static int hci_init2_sync(struct hci_dev *hdev) if (hdev->dev_type == HCI_AMP) return hci_init_stage_sync(hdev, amp_init2); + err = hci_init_stage_sync(hdev, hci_init2); + if (err) + return err; + if (lmp_bredr_capable(hdev)) { err = hci_init_stage_sync(hdev, br_init2); if (err) @@ -3028,7 +3296,7 @@ static int hci_init2_sync(struct hci_dev *hdev) hci_dev_set_flag(hdev, HCI_LE_ENABLED); } - return hci_init_stage_sync(hdev, hci_init2); + return 0; } static int hci_set_event_mask_sync(struct hci_dev *hdev) @@ -3191,7 +3459,7 @@ static int hci_read_page_scan_activity_sync(struct hci_dev *hdev) static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev) { if (!(hdev->commands[18] & 0x04) || - test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) + !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING, @@ -3349,6 +3617,19 @@ static int hci_le_set_event_mask_sync(struct hci_dev *hdev) if (ext_adv_capable(hdev)) events[2] |= 0x02; /* LE Advertising Set Terminated */ + if (cis_capable(hdev)) { + events[3] |= 0x01; /* LE CIS Established */ + if (cis_peripheral_capable(hdev)) + events[3] |= 0x02; /* LE CIS Request */ + } + + if (bis_capable(hdev)) { + events[3] |= 0x04; /* LE Create BIG Complete */ + events[3] |= 0x08; /* LE Terminate BIG Complete */ + events[3] |= 0x10; /* LE BIG Sync Established */ + events[3] |= 0x20; /* LE BIG Sync Loss */ + } + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EVENT_MASK, sizeof(events), events, HCI_CMD_TIMEOUT); } @@ -3489,6 +3770,24 @@ static int hci_set_le_support_sync(struct hci_dev *hdev) sizeof(cp), &cp, HCI_CMD_TIMEOUT); } +/* LE Set Host Feature */ +static int hci_le_set_host_feature_sync(struct hci_dev *hdev) +{ + struct hci_cp_le_set_host_feature cp; + + if (!iso_capable(hdev)) + return 0; + + memset(&cp, 0, sizeof(cp)); + + /* Isochronous Channels (Host Support) */ + cp.bit_number = 32; + cp.bit_value = 1; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_HOST_FEATURE, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + /* LE Controller init stage 3 command sequence */ static const struct hci_init_stage le_init3[] = { /* HCI_OP_LE_SET_EVENT_MASK */ @@ -3515,6 +3814,8 @@ static const struct hci_init_stage le_init3[] = { HCI_INIT(hci_le_read_num_support_adv_sets_sync), /* HCI_OP_WRITE_LE_HOST_SUPPORTED */ HCI_INIT(hci_set_le_support_sync), + /* HCI_OP_LE_SET_HOST_FEATURE */ + HCI_INIT(hci_le_set_host_feature_sync), {} }; @@ -3578,7 +3879,7 @@ static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev) if (lmp_cpb_central_capable(hdev)) { events[1] |= 0x40; /* Triggered Clock Capture */ events[1] |= 0x80; /* Synchronization Train Complete */ - events[2] |= 0x10; /* Peripheral Page Response Timeout */ + events[2] |= 0x08; /* Truncated Page Complete */ events[2] |= 0x20; /* CPB Channel Map Change */ changed = true; } @@ -3590,7 +3891,7 @@ static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev) events[2] |= 0x01; /* Synchronization Train Received */ events[2] |= 0x02; /* CPB Receive */ events[2] |= 0x04; /* CPB Timeout */ - events[2] |= 0x08; /* Truncated Page Complete */ + events[2] |= 0x10; /* Peripheral Page Response Timeout */ changed = true; } @@ -3676,7 +3977,7 @@ static int hci_set_err_data_report_sync(struct hci_dev *hdev) bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED); if (!(hdev->commands[18] & 0x08) || - test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) + !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING)) return 0; if (enabled == hdev->err_data_reporting) @@ -3835,9 +4136,6 @@ static const struct { HCI_QUIRK_BROKEN(STORED_LINK_KEY, "HCI Delete Stored Link Key command is advertised, " "but not supported."), - HCI_QUIRK_BROKEN(ERR_DATA_REPORTING, - "HCI Read Default Erroneous Data Reporting command is " - "advertised, but not supported."), HCI_QUIRK_BROKEN(READ_TRANSMIT_POWER, "HCI Read Transmit Power Level command is advertised, " "but not supported."), @@ -3848,90 +4146,40 @@ static const struct { "advertised, but not supported.") }; -int hci_dev_open_sync(struct hci_dev *hdev) +/* This function handles hdev setup stage: + * + * Calls hdev->setup + * Setup address if HCI_QUIRK_USE_BDADDR_PROPERTY is set. + */ +static int hci_dev_setup_sync(struct hci_dev *hdev) { int ret = 0; - - bt_dev_dbg(hdev, ""); - - if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { - ret = -ENODEV; - goto done; - } + bool invalid_bdaddr; + size_t i; if (!hci_dev_test_flag(hdev, HCI_SETUP) && - !hci_dev_test_flag(hdev, HCI_CONFIG)) { - /* Check for rfkill but allow the HCI setup stage to - * proceed (which in itself doesn't cause any RF activity). - */ - if (hci_dev_test_flag(hdev, HCI_RFKILLED)) { - ret = -ERFKILL; - goto done; - } - - /* Check for valid public address or a configured static - * random address, but let the HCI setup proceed to - * be able to determine if there is a public address - * or not. - * - * In case of user channel usage, it is not important - * if a public address or static random address is - * available. - * - * This check is only valid for BR/EDR controllers - * since AMP controllers do not have an address. - */ - if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && - hdev->dev_type == HCI_PRIMARY && - !bacmp(&hdev->bdaddr, BDADDR_ANY) && - !bacmp(&hdev->static_addr, BDADDR_ANY)) { - ret = -EADDRNOTAVAIL; - goto done; - } - } - - if (test_bit(HCI_UP, &hdev->flags)) { - ret = -EALREADY; - goto done; - } - - if (hdev->open(hdev)) { - ret = -EIO; - goto done; - } - - set_bit(HCI_RUNNING, &hdev->flags); - hci_sock_dev_event(hdev, HCI_DEV_OPEN); - - atomic_set(&hdev->cmd_cnt, 1); - set_bit(HCI_INIT, &hdev->flags); + !test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) + return 0; - if (hci_dev_test_flag(hdev, HCI_SETUP) || - test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) { - bool invalid_bdaddr; - size_t i; + bt_dev_dbg(hdev, ""); - hci_sock_dev_event(hdev, HCI_DEV_SETUP); + hci_sock_dev_event(hdev, HCI_DEV_SETUP); - if (hdev->setup) - ret = hdev->setup(hdev); + if (hdev->setup) + ret = hdev->setup(hdev); - for (i = 0; i < ARRAY_SIZE(hci_broken_table); i++) { - if (test_bit(hci_broken_table[i].quirk, &hdev->quirks)) - bt_dev_warn(hdev, "%s", - hci_broken_table[i].desc); - } - - /* The transport driver can set the quirk to mark the - * BD_ADDR invalid before creating the HCI device or in - * its setup callback. - */ - invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, - &hdev->quirks); + for (i = 0; i < ARRAY_SIZE(hci_broken_table); i++) { + if (test_bit(hci_broken_table[i].quirk, &hdev->quirks)) + bt_dev_warn(hdev, "%s", hci_broken_table[i].desc); + } - if (ret) - goto setup_failed; + /* The transport driver can set the quirk to mark the + * BD_ADDR invalid before creating the HCI device or in + * its setup callback. + */ + invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); + if (!ret) { if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) { if (!bacmp(&hdev->public_addr, BDADDR_ANY)) hci_dev_get_bd_addr_from_property(hdev); @@ -3950,33 +4198,51 @@ int hci_dev_open_sync(struct hci_dev *hdev) invalid_bdaddr = false; } } + } -setup_failed: - /* The transport driver can set these quirks before - * creating the HCI device or in its setup callback. - * - * For the invalid BD_ADDR quirk it is possible that - * it becomes a valid address if the bootloader does - * provide it (see above). - * - * In case any of them is set, the controller has to - * start up as unconfigured. - */ - if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || - invalid_bdaddr) - hci_dev_set_flag(hdev, HCI_UNCONFIGURED); + /* The transport driver can set these quirks before + * creating the HCI device or in its setup callback. + * + * For the invalid BD_ADDR quirk it is possible that + * it becomes a valid address if the bootloader does + * provide it (see above). + * + * In case any of them is set, the controller has to + * start up as unconfigured. + */ + if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || + invalid_bdaddr) + hci_dev_set_flag(hdev, HCI_UNCONFIGURED); - /* For an unconfigured controller it is required to - * read at least the version information provided by - * the Read Local Version Information command. - * - * If the set_bdaddr driver callback is provided, then - * also the original Bluetooth public device address - * will be read using the Read BD Address command. - */ - if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) - ret = hci_unconf_init_sync(hdev); - } + /* For an unconfigured controller it is required to + * read at least the version information provided by + * the Read Local Version Information command. + * + * If the set_bdaddr driver callback is provided, then + * also the original Bluetooth public device address + * will be read using the Read BD Address command. + */ + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) + return hci_unconf_init_sync(hdev); + + return ret; +} + +/* This function handles hdev init stage: + * + * Calls hci_dev_setup_sync to perform setup stage + * Calls hci_init_sync to perform HCI command init sequence + */ +static int hci_dev_init_sync(struct hci_dev *hdev) +{ + int ret; + + bt_dev_dbg(hdev, ""); + + atomic_set(&hdev->cmd_cnt, 1); + set_bit(HCI_INIT, &hdev->flags); + + ret = hci_dev_setup_sync(hdev); if (hci_dev_test_flag(hdev, HCI_CONFIG)) { /* If public address change is configured, ensure that @@ -4016,6 +4282,65 @@ setup_failed: clear_bit(HCI_INIT, &hdev->flags); + return ret; +} + +int hci_dev_open_sync(struct hci_dev *hdev) +{ + int ret; + + bt_dev_dbg(hdev, ""); + + if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { + ret = -ENODEV; + goto done; + } + + if (!hci_dev_test_flag(hdev, HCI_SETUP) && + !hci_dev_test_flag(hdev, HCI_CONFIG)) { + /* Check for rfkill but allow the HCI setup stage to + * proceed (which in itself doesn't cause any RF activity). + */ + if (hci_dev_test_flag(hdev, HCI_RFKILLED)) { + ret = -ERFKILL; + goto done; + } + + /* Check for valid public address or a configured static + * random address, but let the HCI setup proceed to + * be able to determine if there is a public address + * or not. + * + * In case of user channel usage, it is not important + * if a public address or static random address is + * available. + * + * This check is only valid for BR/EDR controllers + * since AMP controllers do not have an address. + */ + if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + hdev->dev_type == HCI_PRIMARY && + !bacmp(&hdev->bdaddr, BDADDR_ANY) && + !bacmp(&hdev->static_addr, BDADDR_ANY)) { + ret = -EADDRNOTAVAIL; + goto done; + } + } + + if (test_bit(HCI_UP, &hdev->flags)) { + ret = -EALREADY; + goto done; + } + + if (hdev->open(hdev)) { + ret = -EIO; + goto done; + } + + set_bit(HCI_RUNNING, &hdev->flags); + hci_sock_dev_event(hdev, HCI_DEV_OPEN); + + ret = hci_dev_init_sync(hdev); if (!ret) { hci_dev_hold(hdev); hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); @@ -4435,8 +4760,7 @@ static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } -static int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, - u8 reason) +int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { int err; @@ -5039,13 +5363,13 @@ static int hci_resume_scan_sync(struct hci_dev *hdev) if (!hdev->scanning_paused) return 0; + hdev->scanning_paused = false; + hci_update_scan_sync(hdev); /* Reset passive scanning to normal */ hci_update_passive_scan_sync(hdev); - hdev->scanning_paused = false; - return 0; } @@ -5064,7 +5388,6 @@ int hci_resume_sync(struct hci_dev *hdev) return 0; hdev->suspended = false; - hdev->scanning_paused = false; /* Restore event mask */ hci_set_event_mask_sync(hdev); @@ -5375,3 +5698,36 @@ done: hci_resume_advertising_sync(hdev); return err; } + +int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle) +{ + struct hci_cp_le_remove_cig cp; + + memset(&cp, 0, sizeof(cp)); + cp.cig_id = handle; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_REMOVE_CIG, sizeof(cp), + &cp, HCI_CMD_TIMEOUT); +} + +int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle) +{ + struct hci_cp_le_big_term_sync cp; + + memset(&cp, 0, sizeof(cp)); + cp.handle = handle; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_BIG_TERM_SYNC, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle) +{ + struct hci_cp_le_pa_term_sync cp; + + memset(&cp, 0, sizeof(cp)); + cp.handle = cpu_to_le16(handle); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_TERM_SYNC, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c new file mode 100644 index 000000000000..ff09c353e64e --- /dev/null +++ b/net/bluetooth/iso.c @@ -0,0 +1,1824 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * BlueZ - Bluetooth protocol stack for Linux + * + * Copyright (C) 2022 Intel Corporation + */ + +#include <linux/module.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/sched/signal.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/iso.h> + +static const struct proto_ops iso_sock_ops; + +static struct bt_sock_list iso_sk_list = { + .lock = __RW_LOCK_UNLOCKED(iso_sk_list.lock) +}; + +/* ---- ISO connections ---- */ +struct iso_conn { + struct hci_conn *hcon; + + /* @lock: spinlock protecting changes to iso_conn fields */ + spinlock_t lock; + struct sock *sk; + + struct delayed_work timeout_work; + + struct sk_buff *rx_skb; + __u32 rx_len; + __u16 tx_sn; +}; + +#define iso_conn_lock(c) spin_lock(&(c)->lock) +#define iso_conn_unlock(c) spin_unlock(&(c)->lock) + +static void iso_sock_close(struct sock *sk); +static void iso_sock_kill(struct sock *sk); + +/* ----- ISO socket info ----- */ +#define iso_pi(sk) ((struct iso_pinfo *)sk) + +struct iso_pinfo { + struct bt_sock bt; + bdaddr_t src; + __u8 src_type; + bdaddr_t dst; + __u8 dst_type; + __u8 bc_sid; + __u8 bc_num_bis; + __u8 bc_bis[ISO_MAX_NUM_BIS]; + __u16 sync_handle; + __u32 flags; + struct bt_iso_qos qos; + __u8 base_len; + __u8 base[HCI_MAX_PER_AD_LENGTH]; + struct iso_conn *conn; +}; + +/* ---- ISO timers ---- */ +#define ISO_CONN_TIMEOUT (HZ * 40) +#define ISO_DISCONN_TIMEOUT (HZ * 2) + +static void iso_sock_timeout(struct work_struct *work) +{ + struct iso_conn *conn = container_of(work, struct iso_conn, + timeout_work.work); + struct sock *sk; + + iso_conn_lock(conn); + sk = conn->sk; + if (sk) + sock_hold(sk); + iso_conn_unlock(conn); + + if (!sk) + return; + + BT_DBG("sock %p state %d", sk, sk->sk_state); + + lock_sock(sk); + sk->sk_err = ETIMEDOUT; + sk->sk_state_change(sk); + release_sock(sk); + sock_put(sk); +} + +static void iso_sock_set_timer(struct sock *sk, long timeout) +{ + if (!iso_pi(sk)->conn) + return; + + BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout); + cancel_delayed_work(&iso_pi(sk)->conn->timeout_work); + schedule_delayed_work(&iso_pi(sk)->conn->timeout_work, timeout); +} + +static void iso_sock_clear_timer(struct sock *sk) +{ + if (!iso_pi(sk)->conn) + return; + + BT_DBG("sock %p state %d", sk, sk->sk_state); + cancel_delayed_work(&iso_pi(sk)->conn->timeout_work); +} + +/* ---- ISO connections ---- */ +static struct iso_conn *iso_conn_add(struct hci_conn *hcon) +{ + struct iso_conn *conn = hcon->iso_data; + + if (conn) + return conn; + + conn = kzalloc(sizeof(*conn), GFP_KERNEL); + if (!conn) + return NULL; + + spin_lock_init(&conn->lock); + INIT_DELAYED_WORK(&conn->timeout_work, iso_sock_timeout); + + hcon->iso_data = conn; + conn->hcon = hcon; + conn->tx_sn = 0; + + BT_DBG("hcon %p conn %p", hcon, conn); + + return conn; +} + +/* Delete channel. Must be called on the locked socket. */ +static void iso_chan_del(struct sock *sk, int err) +{ + struct iso_conn *conn; + struct sock *parent; + + conn = iso_pi(sk)->conn; + + BT_DBG("sk %p, conn %p, err %d", sk, conn, err); + + if (conn) { + iso_conn_lock(conn); + conn->sk = NULL; + iso_pi(sk)->conn = NULL; + iso_conn_unlock(conn); + + if (conn->hcon) + hci_conn_drop(conn->hcon); + } + + sk->sk_state = BT_CLOSED; + sk->sk_err = err; + + parent = bt_sk(sk)->parent; + if (parent) { + bt_accept_unlink(sk); + parent->sk_data_ready(parent); + } else { + sk->sk_state_change(sk); + } + + sock_set_flag(sk, SOCK_ZAPPED); +} + +static void iso_conn_del(struct hci_conn *hcon, int err) +{ + struct iso_conn *conn = hcon->iso_data; + struct sock *sk; + + if (!conn) + return; + + BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); + + /* Kill socket */ + iso_conn_lock(conn); + sk = conn->sk; + if (sk) + sock_hold(sk); + iso_conn_unlock(conn); + + if (sk) { + lock_sock(sk); + iso_sock_clear_timer(sk); + iso_chan_del(sk, err); + release_sock(sk); + sock_put(sk); + } + + /* Ensure no more work items will run before freeing conn. */ + cancel_delayed_work_sync(&conn->timeout_work); + + hcon->iso_data = NULL; + kfree(conn); +} + +static int __iso_chan_add(struct iso_conn *conn, struct sock *sk, + struct sock *parent) +{ + BT_DBG("conn %p", conn); + + if (iso_pi(sk)->conn == conn && conn->sk == sk) + return 0; + + if (conn->sk) { + BT_ERR("conn->sk already set"); + return -EBUSY; + } + + iso_pi(sk)->conn = conn; + conn->sk = sk; + + if (parent) + bt_accept_enqueue(parent, sk, true); + + return 0; +} + +static int iso_chan_add(struct iso_conn *conn, struct sock *sk, + struct sock *parent) +{ + int err; + + iso_conn_lock(conn); + err = __iso_chan_add(conn, sk, parent); + iso_conn_unlock(conn); + + return err; +} + +static int iso_connect_bis(struct sock *sk) +{ + struct iso_conn *conn; + struct hci_conn *hcon; + struct hci_dev *hdev; + int err; + + BT_DBG("%pMR", &iso_pi(sk)->src); + + hdev = hci_get_route(&iso_pi(sk)->dst, &iso_pi(sk)->src, + iso_pi(sk)->src_type); + if (!hdev) + return -EHOSTUNREACH; + + hci_dev_lock(hdev); + + if (!bis_capable(hdev)) { + err = -EOPNOTSUPP; + goto done; + } + + /* Fail if out PHYs are marked as disabled */ + if (!iso_pi(sk)->qos.out.phy) { + err = -EINVAL; + goto done; + } + + hcon = hci_connect_bis(hdev, &iso_pi(sk)->dst, iso_pi(sk)->dst_type, + &iso_pi(sk)->qos, iso_pi(sk)->base_len, + iso_pi(sk)->base); + if (IS_ERR(hcon)) { + err = PTR_ERR(hcon); + goto done; + } + + conn = iso_conn_add(hcon); + if (!conn) { + hci_conn_drop(hcon); + err = -ENOMEM; + goto done; + } + + /* Update source addr of the socket */ + bacpy(&iso_pi(sk)->src, &hcon->src); + + err = iso_chan_add(conn, sk, NULL); + if (err) + goto done; + + if (hcon->state == BT_CONNECTED) { + iso_sock_clear_timer(sk); + sk->sk_state = BT_CONNECTED; + } else { + sk->sk_state = BT_CONNECT; + iso_sock_set_timer(sk, sk->sk_sndtimeo); + } + +done: + hci_dev_unlock(hdev); + hci_dev_put(hdev); + return err; +} + +static int iso_connect_cis(struct sock *sk) +{ + struct iso_conn *conn; + struct hci_conn *hcon; + struct hci_dev *hdev; + int err; + + BT_DBG("%pMR -> %pMR", &iso_pi(sk)->src, &iso_pi(sk)->dst); + + hdev = hci_get_route(&iso_pi(sk)->dst, &iso_pi(sk)->src, + iso_pi(sk)->src_type); + if (!hdev) + return -EHOSTUNREACH; + + hci_dev_lock(hdev); + + if (!cis_central_capable(hdev)) { + err = -EOPNOTSUPP; + goto done; + } + + /* Fail if either PHYs are marked as disabled */ + if (!iso_pi(sk)->qos.in.phy && !iso_pi(sk)->qos.out.phy) { + err = -EINVAL; + goto done; + } + + /* Just bind if DEFER_SETUP has been set */ + if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { + hcon = hci_bind_cis(hdev, &iso_pi(sk)->dst, + iso_pi(sk)->dst_type, &iso_pi(sk)->qos); + if (IS_ERR(hcon)) { + err = PTR_ERR(hcon); + goto done; + } + } else { + hcon = hci_connect_cis(hdev, &iso_pi(sk)->dst, + iso_pi(sk)->dst_type, &iso_pi(sk)->qos); + if (IS_ERR(hcon)) { + err = PTR_ERR(hcon); + goto done; + } + } + + conn = iso_conn_add(hcon); + if (!conn) { + hci_conn_drop(hcon); + err = -ENOMEM; + goto done; + } + + /* Update source addr of the socket */ + bacpy(&iso_pi(sk)->src, &hcon->src); + + err = iso_chan_add(conn, sk, NULL); + if (err) + goto done; + + if (hcon->state == BT_CONNECTED) { + iso_sock_clear_timer(sk); + sk->sk_state = BT_CONNECTED; + } else if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { + iso_sock_clear_timer(sk); + sk->sk_state = BT_CONNECT; + } else { + sk->sk_state = BT_CONNECT; + iso_sock_set_timer(sk, sk->sk_sndtimeo); + } + +done: + hci_dev_unlock(hdev); + hci_dev_put(hdev); + return err; +} + +static int iso_send_frame(struct sock *sk, struct sk_buff *skb) +{ + struct iso_conn *conn = iso_pi(sk)->conn; + struct hci_iso_data_hdr *hdr; + int len = 0; + + BT_DBG("sk %p len %d", sk, skb->len); + + if (skb->len > iso_pi(sk)->qos.out.sdu) + return -EMSGSIZE; + + len = skb->len; + + /* Push ISO data header */ + hdr = skb_push(skb, HCI_ISO_DATA_HDR_SIZE); + hdr->sn = cpu_to_le16(conn->tx_sn++); + hdr->slen = cpu_to_le16(hci_iso_data_len_pack(len, + HCI_ISO_STATUS_VALID)); + + if (sk->sk_state == BT_CONNECTED) + hci_send_iso(conn->hcon, skb); + else + len = -ENOTCONN; + + return len; +} + +static void iso_recv_frame(struct iso_conn *conn, struct sk_buff *skb) +{ + struct sock *sk; + + iso_conn_lock(conn); + sk = conn->sk; + iso_conn_unlock(conn); + + if (!sk) + goto drop; + + BT_DBG("sk %p len %d", sk, skb->len); + + if (sk->sk_state != BT_CONNECTED) + goto drop; + + if (!sock_queue_rcv_skb(sk, skb)) + return; + +drop: + kfree_skb(skb); +} + +/* -------- Socket interface ---------- */ +static struct sock *__iso_get_sock_listen_by_addr(bdaddr_t *ba) +{ + struct sock *sk; + + sk_for_each(sk, &iso_sk_list.head) { + if (sk->sk_state != BT_LISTEN) + continue; + + if (!bacmp(&iso_pi(sk)->src, ba)) + return sk; + } + + return NULL; +} + +static struct sock *__iso_get_sock_listen_by_sid(bdaddr_t *ba, bdaddr_t *bc, + __u8 sid) +{ + struct sock *sk; + + sk_for_each(sk, &iso_sk_list.head) { + if (sk->sk_state != BT_LISTEN) + continue; + + if (bacmp(&iso_pi(sk)->src, ba)) + continue; + + if (bacmp(&iso_pi(sk)->dst, bc)) + continue; + + if (iso_pi(sk)->bc_sid == sid) + return sk; + } + + return NULL; +} + +typedef bool (*iso_sock_match_t)(struct sock *sk, void *data); + +/* Find socket listening: + * source bdaddr (Unicast) + * destination bdaddr (Broadcast only) + * match func - pass NULL to ignore + * match func data - pass -1 to ignore + * Returns closest match. + */ +static struct sock *iso_get_sock_listen(bdaddr_t *src, bdaddr_t *dst, + iso_sock_match_t match, void *data) +{ + struct sock *sk = NULL, *sk1 = NULL; + + read_lock(&iso_sk_list.lock); + + sk_for_each(sk, &iso_sk_list.head) { + if (sk->sk_state != BT_LISTEN) + continue; + + /* Match Broadcast destination */ + if (bacmp(dst, BDADDR_ANY) && bacmp(&iso_pi(sk)->dst, dst)) + continue; + + /* Use Match function if provided */ + if (match && !match(sk, data)) + continue; + + /* Exact match. */ + if (!bacmp(&iso_pi(sk)->src, src)) + break; + + /* Closest match */ + if (!bacmp(&iso_pi(sk)->src, BDADDR_ANY)) + sk1 = sk; + } + + read_unlock(&iso_sk_list.lock); + + return sk ? sk : sk1; +} + +static void iso_sock_destruct(struct sock *sk) +{ + BT_DBG("sk %p", sk); + + skb_queue_purge(&sk->sk_receive_queue); + skb_queue_purge(&sk->sk_write_queue); +} + +static void iso_sock_cleanup_listen(struct sock *parent) +{ + struct sock *sk; + + BT_DBG("parent %p", parent); + + /* Close not yet accepted channels */ + while ((sk = bt_accept_dequeue(parent, NULL))) { + iso_sock_close(sk); + iso_sock_kill(sk); + } + + parent->sk_state = BT_CLOSED; + sock_set_flag(parent, SOCK_ZAPPED); +} + +/* Kill socket (only if zapped and orphan) + * Must be called on unlocked socket. + */ +static void iso_sock_kill(struct sock *sk) +{ + if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket || + sock_flag(sk, SOCK_DEAD)) + return; + + BT_DBG("sk %p state %d", sk, sk->sk_state); + + /* Kill poor orphan */ + bt_sock_unlink(&iso_sk_list, sk); + sock_set_flag(sk, SOCK_DEAD); + sock_put(sk); +} + +static void iso_conn_defer_reject(struct hci_conn *conn) +{ + struct hci_cp_le_reject_cis cp; + + BT_DBG("conn %p", conn); + + memset(&cp, 0, sizeof(cp)); + cp.handle = cpu_to_le16(conn->handle); + cp.reason = HCI_ERROR_REJ_BAD_ADDR; + hci_send_cmd(conn->hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp); +} + +static void __iso_sock_close(struct sock *sk) +{ + BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); + + switch (sk->sk_state) { + case BT_LISTEN: + iso_sock_cleanup_listen(sk); + break; + + case BT_CONNECTED: + case BT_CONFIG: + if (iso_pi(sk)->conn->hcon) { + sk->sk_state = BT_DISCONN; + iso_sock_set_timer(sk, ISO_DISCONN_TIMEOUT); + iso_conn_lock(iso_pi(sk)->conn); + hci_conn_drop(iso_pi(sk)->conn->hcon); + iso_pi(sk)->conn->hcon = NULL; + iso_conn_unlock(iso_pi(sk)->conn); + } else { + iso_chan_del(sk, ECONNRESET); + } + break; + + case BT_CONNECT2: + if (iso_pi(sk)->conn->hcon) + iso_conn_defer_reject(iso_pi(sk)->conn->hcon); + iso_chan_del(sk, ECONNRESET); + break; + case BT_CONNECT: + /* In case of DEFER_SETUP the hcon would be bound to CIG which + * needs to be removed so just call hci_conn_del so the cleanup + * callback do what is needed. + */ + if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags) && + iso_pi(sk)->conn->hcon) { + hci_conn_del(iso_pi(sk)->conn->hcon); + iso_pi(sk)->conn->hcon = NULL; + } + + iso_chan_del(sk, ECONNRESET); + break; + case BT_DISCONN: + iso_chan_del(sk, ECONNRESET); + break; + + default: + sock_set_flag(sk, SOCK_ZAPPED); + break; + } +} + +/* Must be called on unlocked socket. */ +static void iso_sock_close(struct sock *sk) +{ + iso_sock_clear_timer(sk); + lock_sock(sk); + __iso_sock_close(sk); + release_sock(sk); + iso_sock_kill(sk); +} + +static void iso_sock_init(struct sock *sk, struct sock *parent) +{ + BT_DBG("sk %p", sk); + + if (parent) { + sk->sk_type = parent->sk_type; + bt_sk(sk)->flags = bt_sk(parent)->flags; + security_sk_clone(parent, sk); + } +} + +static struct proto iso_proto = { + .name = "ISO", + .owner = THIS_MODULE, + .obj_size = sizeof(struct iso_pinfo) +}; + +#define DEFAULT_IO_QOS \ +{ \ + .interval = 10000u, \ + .latency = 10u, \ + .sdu = 40u, \ + .phy = BT_ISO_PHY_2M, \ + .rtn = 2u, \ +} + +static struct bt_iso_qos default_qos = { + .cig = BT_ISO_QOS_CIG_UNSET, + .cis = BT_ISO_QOS_CIS_UNSET, + .sca = 0x00, + .packing = 0x00, + .framing = 0x00, + .in = DEFAULT_IO_QOS, + .out = DEFAULT_IO_QOS, +}; + +static struct sock *iso_sock_alloc(struct net *net, struct socket *sock, + int proto, gfp_t prio, int kern) +{ + struct sock *sk; + + sk = sk_alloc(net, PF_BLUETOOTH, prio, &iso_proto, kern); + if (!sk) + return NULL; + + sock_init_data(sock, sk); + INIT_LIST_HEAD(&bt_sk(sk)->accept_q); + + sk->sk_destruct = iso_sock_destruct; + sk->sk_sndtimeo = ISO_CONN_TIMEOUT; + + sock_reset_flag(sk, SOCK_ZAPPED); + + sk->sk_protocol = proto; + sk->sk_state = BT_OPEN; + + /* Set address type as public as default src address is BDADDR_ANY */ + iso_pi(sk)->src_type = BDADDR_LE_PUBLIC; + + iso_pi(sk)->qos = default_qos; + + bt_sock_link(&iso_sk_list, sk); + return sk; +} + +static int iso_sock_create(struct net *net, struct socket *sock, int protocol, + int kern) +{ + struct sock *sk; + + BT_DBG("sock %p", sock); + + sock->state = SS_UNCONNECTED; + + if (sock->type != SOCK_SEQPACKET) + return -ESOCKTNOSUPPORT; + + sock->ops = &iso_sock_ops; + + sk = iso_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern); + if (!sk) + return -ENOMEM; + + iso_sock_init(sk, NULL); + return 0; +} + +static int iso_sock_bind_bc(struct socket *sock, struct sockaddr *addr, + int addr_len) +{ + struct sockaddr_iso *sa = (struct sockaddr_iso *)addr; + struct sock *sk = sock->sk; + int i; + + BT_DBG("sk %p bc_sid %u bc_num_bis %u", sk, sa->iso_bc->bc_sid, + sa->iso_bc->bc_num_bis); + + if (addr_len > sizeof(*sa) + sizeof(*sa->iso_bc) || + sa->iso_bc->bc_num_bis < 0x01 || sa->iso_bc->bc_num_bis > 0x1f) + return -EINVAL; + + bacpy(&iso_pi(sk)->dst, &sa->iso_bc->bc_bdaddr); + iso_pi(sk)->dst_type = sa->iso_bc->bc_bdaddr_type; + iso_pi(sk)->sync_handle = -1; + iso_pi(sk)->bc_sid = sa->iso_bc->bc_sid; + iso_pi(sk)->bc_num_bis = sa->iso_bc->bc_num_bis; + + for (i = 0; i < iso_pi(sk)->bc_num_bis; i++) { + if (sa->iso_bc->bc_bis[i] < 0x01 || + sa->iso_bc->bc_bis[i] > 0x1f) + return -EINVAL; + + memcpy(iso_pi(sk)->bc_bis, sa->iso_bc->bc_bis, + iso_pi(sk)->bc_num_bis); + } + + return 0; +} + +static int iso_sock_bind(struct socket *sock, struct sockaddr *addr, + int addr_len) +{ + struct sockaddr_iso *sa = (struct sockaddr_iso *)addr; + struct sock *sk = sock->sk; + int err = 0; + + BT_DBG("sk %p %pMR type %u", sk, &sa->iso_bdaddr, sa->iso_bdaddr_type); + + if (!addr || addr_len < sizeof(struct sockaddr_iso) || + addr->sa_family != AF_BLUETOOTH) + return -EINVAL; + + lock_sock(sk); + + if (sk->sk_state != BT_OPEN) { + err = -EBADFD; + goto done; + } + + if (sk->sk_type != SOCK_SEQPACKET) { + err = -EINVAL; + goto done; + } + + /* Check if the address type is of LE type */ + if (!bdaddr_type_is_le(sa->iso_bdaddr_type)) { + err = -EINVAL; + goto done; + } + + bacpy(&iso_pi(sk)->src, &sa->iso_bdaddr); + iso_pi(sk)->src_type = sa->iso_bdaddr_type; + + /* Check for Broadcast address */ + if (addr_len > sizeof(*sa)) { + err = iso_sock_bind_bc(sock, addr, addr_len); + if (err) + goto done; + } + + sk->sk_state = BT_BOUND; + +done: + release_sock(sk); + return err; +} + +static int iso_sock_connect(struct socket *sock, struct sockaddr *addr, + int alen, int flags) +{ + struct sockaddr_iso *sa = (struct sockaddr_iso *)addr; + struct sock *sk = sock->sk; + int err; + + BT_DBG("sk %p", sk); + + if (alen < sizeof(struct sockaddr_iso) || + addr->sa_family != AF_BLUETOOTH) + return -EINVAL; + + if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) + return -EBADFD; + + if (sk->sk_type != SOCK_SEQPACKET) + return -EINVAL; + + /* Check if the address type is of LE type */ + if (!bdaddr_type_is_le(sa->iso_bdaddr_type)) + return -EINVAL; + + lock_sock(sk); + + bacpy(&iso_pi(sk)->dst, &sa->iso_bdaddr); + iso_pi(sk)->dst_type = sa->iso_bdaddr_type; + + if (bacmp(&iso_pi(sk)->dst, BDADDR_ANY)) + err = iso_connect_cis(sk); + else + err = iso_connect_bis(sk); + + if (err) + goto done; + + if (!test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { + err = bt_sock_wait_state(sk, BT_CONNECTED, + sock_sndtimeo(sk, flags & O_NONBLOCK)); + } + +done: + release_sock(sk); + return err; +} + +static int iso_listen_bis(struct sock *sk) +{ + struct hci_dev *hdev; + int err = 0; + + BT_DBG("%pMR -> %pMR (SID 0x%2.2x)", &iso_pi(sk)->src, + &iso_pi(sk)->dst, iso_pi(sk)->bc_sid); + + write_lock(&iso_sk_list.lock); + + if (__iso_get_sock_listen_by_sid(&iso_pi(sk)->src, &iso_pi(sk)->dst, + iso_pi(sk)->bc_sid)) + err = -EADDRINUSE; + + write_unlock(&iso_sk_list.lock); + + if (err) + return err; + + hdev = hci_get_route(&iso_pi(sk)->dst, &iso_pi(sk)->src, + iso_pi(sk)->src_type); + if (!hdev) + return -EHOSTUNREACH; + + hci_dev_lock(hdev); + + err = hci_pa_create_sync(hdev, &iso_pi(sk)->dst, iso_pi(sk)->dst_type, + iso_pi(sk)->bc_sid); + + hci_dev_unlock(hdev); + + return err; +} + +static int iso_listen_cis(struct sock *sk) +{ + int err = 0; + + BT_DBG("%pMR", &iso_pi(sk)->src); + + write_lock(&iso_sk_list.lock); + + if (__iso_get_sock_listen_by_addr(&iso_pi(sk)->src)) + err = -EADDRINUSE; + + write_unlock(&iso_sk_list.lock); + + return err; +} + +static int iso_sock_listen(struct socket *sock, int backlog) +{ + struct sock *sk = sock->sk; + int err = 0; + + BT_DBG("sk %p backlog %d", sk, backlog); + + lock_sock(sk); + + if (sk->sk_state != BT_BOUND) { + err = -EBADFD; + goto done; + } + + if (sk->sk_type != SOCK_SEQPACKET) { + err = -EINVAL; + goto done; + } + + if (!bacmp(&iso_pi(sk)->dst, BDADDR_ANY)) + err = iso_listen_cis(sk); + else + err = iso_listen_bis(sk); + + if (err) + goto done; + + sk->sk_max_ack_backlog = backlog; + sk->sk_ack_backlog = 0; + + sk->sk_state = BT_LISTEN; + +done: + release_sock(sk); + return err; +} + +static int iso_sock_accept(struct socket *sock, struct socket *newsock, + int flags, bool kern) +{ + DEFINE_WAIT_FUNC(wait, woken_wake_function); + struct sock *sk = sock->sk, *ch; + long timeo; + int err = 0; + + lock_sock(sk); + + timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); + + BT_DBG("sk %p timeo %ld", sk, timeo); + + /* Wait for an incoming connection. (wake-one). */ + add_wait_queue_exclusive(sk_sleep(sk), &wait); + while (1) { + if (sk->sk_state != BT_LISTEN) { + err = -EBADFD; + break; + } + + ch = bt_accept_dequeue(sk, newsock); + if (ch) + break; + + if (!timeo) { + err = -EAGAIN; + break; + } + + if (signal_pending(current)) { + err = sock_intr_errno(timeo); + break; + } + + release_sock(sk); + + timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); + lock_sock(sk); + } + remove_wait_queue(sk_sleep(sk), &wait); + + if (err) + goto done; + + newsock->state = SS_CONNECTED; + + BT_DBG("new socket %p", ch); + +done: + release_sock(sk); + return err; +} + +static int iso_sock_getname(struct socket *sock, struct sockaddr *addr, + int peer) +{ + struct sockaddr_iso *sa = (struct sockaddr_iso *)addr; + struct sock *sk = sock->sk; + + BT_DBG("sock %p, sk %p", sock, sk); + + addr->sa_family = AF_BLUETOOTH; + + if (peer) { + bacpy(&sa->iso_bdaddr, &iso_pi(sk)->dst); + sa->iso_bdaddr_type = iso_pi(sk)->dst_type; + } else { + bacpy(&sa->iso_bdaddr, &iso_pi(sk)->src); + sa->iso_bdaddr_type = iso_pi(sk)->src_type; + } + + return sizeof(struct sockaddr_iso); +} + +static int iso_sock_sendmsg(struct socket *sock, struct msghdr *msg, + size_t len) +{ + struct sock *sk = sock->sk; + struct iso_conn *conn = iso_pi(sk)->conn; + struct sk_buff *skb, **frag; + int err; + + BT_DBG("sock %p, sk %p", sock, sk); + + err = sock_error(sk); + if (err) + return err; + + if (msg->msg_flags & MSG_OOB) + return -EOPNOTSUPP; + + if (sk->sk_state != BT_CONNECTED) + return -ENOTCONN; + + skb = bt_skb_sendmsg(sk, msg, len, conn->hcon->hdev->iso_mtu, + HCI_ISO_DATA_HDR_SIZE, 0); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + len -= skb->len; + + BT_DBG("skb %p len %d", sk, skb->len); + + /* Continuation fragments */ + frag = &skb_shinfo(skb)->frag_list; + while (len) { + struct sk_buff *tmp; + + tmp = bt_skb_sendmsg(sk, msg, len, conn->hcon->hdev->iso_mtu, + 0, 0); + if (IS_ERR(tmp)) { + kfree_skb(skb); + return PTR_ERR(tmp); + } + + *frag = tmp; + + len -= tmp->len; + + skb->len += tmp->len; + skb->data_len += tmp->len; + + BT_DBG("frag %p len %d", *frag, tmp->len); + + frag = &(*frag)->next; + } + + lock_sock(sk); + + if (sk->sk_state == BT_CONNECTED) + err = iso_send_frame(sk, skb); + else + err = -ENOTCONN; + + release_sock(sk); + + if (err < 0) + kfree_skb(skb); + return err; +} + +static void iso_conn_defer_accept(struct hci_conn *conn) +{ + struct hci_cp_le_accept_cis cp; + struct hci_dev *hdev = conn->hdev; + + BT_DBG("conn %p", conn); + + conn->state = BT_CONFIG; + + cp.handle = cpu_to_le16(conn->handle); + + hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp); +} + +static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg, + size_t len, int flags) +{ + struct sock *sk = sock->sk; + struct iso_pinfo *pi = iso_pi(sk); + int err; + + BT_DBG("sk %p", sk); + + lock_sock(sk); + + if (test_and_clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { + switch (sk->sk_state) { + case BT_CONNECT2: + iso_conn_defer_accept(pi->conn->hcon); + sk->sk_state = BT_CONFIG; + release_sock(sk); + return 0; + case BT_CONNECT: + err = iso_connect_cis(sk); + release_sock(sk); + return err; + } + } + + release_sock(sk); + + return bt_sock_recvmsg(sock, msg, len, flags); +} + +static bool check_io_qos(struct bt_iso_io_qos *qos) +{ + /* If no PHY is enable SDU must be 0 */ + if (!qos->phy && qos->sdu) + return false; + + if (qos->interval && (qos->interval < 0xff || qos->interval > 0xfffff)) + return false; + + if (qos->latency && (qos->latency < 0x05 || qos->latency > 0xfa0)) + return false; + + if (qos->phy > BT_ISO_PHY_ANY) + return false; + + return true; +} + +static bool check_qos(struct bt_iso_qos *qos) +{ + if (qos->sca > 0x07) + return false; + + if (qos->packing > 0x01) + return false; + + if (qos->framing > 0x01) + return false; + + if (!check_io_qos(&qos->in)) + return false; + + if (!check_io_qos(&qos->out)) + return false; + + return true; +} + +static int iso_sock_setsockopt(struct socket *sock, int level, int optname, + sockptr_t optval, unsigned int optlen) +{ + struct sock *sk = sock->sk; + int len, err = 0; + struct bt_iso_qos qos; + u32 opt; + + BT_DBG("sk %p", sk); + + lock_sock(sk); + + switch (optname) { + case BT_DEFER_SETUP: + if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { + err = -EINVAL; + break; + } + + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { + err = -EFAULT; + break; + } + + if (opt) + set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); + else + clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); + break; + + case BT_ISO_QOS: + if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND && + sk->sk_state != BT_CONNECT2) { + err = -EINVAL; + break; + } + + len = min_t(unsigned int, sizeof(qos), optlen); + if (len != sizeof(qos)) + return -EINVAL; + + memset(&qos, 0, sizeof(qos)); + + if (copy_from_sockptr(&qos, optval, len)) { + err = -EFAULT; + break; + } + + if (!check_qos(&qos)) { + err = -EINVAL; + break; + } + + iso_pi(sk)->qos = qos; + + break; + + case BT_ISO_BASE: + if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND && + sk->sk_state != BT_CONNECT2) { + err = -EINVAL; + break; + } + + if (optlen > sizeof(iso_pi(sk)->base)) { + err = -EOVERFLOW; + break; + } + + len = min_t(unsigned int, sizeof(iso_pi(sk)->base), optlen); + + if (copy_from_sockptr(iso_pi(sk)->base, optval, len)) { + err = -EFAULT; + break; + } + + iso_pi(sk)->base_len = len; + + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static int iso_sock_getsockopt(struct socket *sock, int level, int optname, + char __user *optval, int __user *optlen) +{ + struct sock *sk = sock->sk; + int len, err = 0; + struct bt_iso_qos qos; + u8 base_len; + u8 *base; + + BT_DBG("sk %p", sk); + + if (get_user(len, optlen)) + return -EFAULT; + + lock_sock(sk); + + switch (optname) { + case BT_DEFER_SETUP: + if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { + err = -EINVAL; + break; + } + + if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags), + (u32 __user *)optval)) + err = -EFAULT; + + break; + + case BT_ISO_QOS: + if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONNECT2) + qos = iso_pi(sk)->conn->hcon->iso_qos; + else + qos = iso_pi(sk)->qos; + + len = min_t(unsigned int, len, sizeof(qos)); + if (copy_to_user(optval, (char *)&qos, len)) + err = -EFAULT; + + break; + + case BT_ISO_BASE: + if (sk->sk_state == BT_CONNECTED) { + base_len = iso_pi(sk)->conn->hcon->le_per_adv_data_len; + base = iso_pi(sk)->conn->hcon->le_per_adv_data; + } else { + base_len = iso_pi(sk)->base_len; + base = iso_pi(sk)->base; + } + + len = min_t(unsigned int, len, base_len); + if (copy_to_user(optval, base, len)) + err = -EFAULT; + + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static int iso_sock_shutdown(struct socket *sock, int how) +{ + struct sock *sk = sock->sk; + int err = 0; + + BT_DBG("sock %p, sk %p", sock, sk); + + if (!sk) + return 0; + + sock_hold(sk); + lock_sock(sk); + + if (!sk->sk_shutdown) { + sk->sk_shutdown = SHUTDOWN_MASK; + iso_sock_clear_timer(sk); + __iso_sock_close(sk); + + if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && + !(current->flags & PF_EXITING)) + err = bt_sock_wait_state(sk, BT_CLOSED, + sk->sk_lingertime); + } + + release_sock(sk); + sock_put(sk); + + return err; +} + +static int iso_sock_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + int err = 0; + + BT_DBG("sock %p, sk %p", sock, sk); + + if (!sk) + return 0; + + iso_sock_close(sk); + + if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && + !(current->flags & PF_EXITING)) { + lock_sock(sk); + err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); + release_sock(sk); + } + + sock_orphan(sk); + iso_sock_kill(sk); + return err; +} + +static void iso_sock_ready(struct sock *sk) +{ + BT_DBG("sk %p", sk); + + if (!sk) + return; + + lock_sock(sk); + iso_sock_clear_timer(sk); + sk->sk_state = BT_CONNECTED; + sk->sk_state_change(sk); + release_sock(sk); +} + +struct iso_list_data { + struct hci_conn *hcon; + int count; +}; + +static bool iso_match_big(struct sock *sk, void *data) +{ + struct hci_evt_le_big_sync_estabilished *ev = data; + + return ev->handle == iso_pi(sk)->qos.big; +} + +static void iso_conn_ready(struct iso_conn *conn) +{ + struct sock *parent; + struct sock *sk = conn->sk; + struct hci_ev_le_big_sync_estabilished *ev; + + BT_DBG("conn %p", conn); + + if (sk) { + iso_sock_ready(conn->sk); + } else { + iso_conn_lock(conn); + + if (!conn->hcon) { + iso_conn_unlock(conn); + return; + } + + ev = hci_recv_event_data(conn->hcon->hdev, + HCI_EVT_LE_BIG_SYNC_ESTABILISHED); + if (ev) + parent = iso_get_sock_listen(&conn->hcon->src, + &conn->hcon->dst, + iso_match_big, ev); + else + parent = iso_get_sock_listen(&conn->hcon->src, + BDADDR_ANY, NULL, NULL); + + if (!parent) { + iso_conn_unlock(conn); + return; + } + + lock_sock(parent); + + sk = iso_sock_alloc(sock_net(parent), NULL, + BTPROTO_ISO, GFP_ATOMIC, 0); + if (!sk) { + release_sock(parent); + iso_conn_unlock(conn); + return; + } + + iso_sock_init(sk, parent); + + bacpy(&iso_pi(sk)->src, &conn->hcon->src); + iso_pi(sk)->src_type = conn->hcon->src_type; + + /* If hcon has no destination address (BDADDR_ANY) it means it + * was created by HCI_EV_LE_BIG_SYNC_ESTABILISHED so we need to + * initialize using the parent socket destination address. + */ + if (!bacmp(&conn->hcon->dst, BDADDR_ANY)) { + bacpy(&conn->hcon->dst, &iso_pi(parent)->dst); + conn->hcon->dst_type = iso_pi(parent)->dst_type; + conn->hcon->sync_handle = iso_pi(parent)->sync_handle; + } + + bacpy(&iso_pi(sk)->dst, &conn->hcon->dst); + iso_pi(sk)->dst_type = conn->hcon->dst_type; + + hci_conn_hold(conn->hcon); + __iso_chan_add(conn, sk, parent); + + if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) + sk->sk_state = BT_CONNECT2; + else + sk->sk_state = BT_CONNECTED; + + /* Wake up parent */ + parent->sk_data_ready(parent); + + release_sock(parent); + + iso_conn_unlock(conn); + } +} + +static bool iso_match_sid(struct sock *sk, void *data) +{ + struct hci_ev_le_pa_sync_established *ev = data; + + return ev->sid == iso_pi(sk)->bc_sid; +} + +static bool iso_match_sync_handle(struct sock *sk, void *data) +{ + struct hci_evt_le_big_info_adv_report *ev = data; + + return le16_to_cpu(ev->sync_handle) == iso_pi(sk)->sync_handle; +} + +/* ----- ISO interface with lower layer (HCI) ----- */ + +int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) +{ + struct hci_ev_le_pa_sync_established *ev1; + struct hci_evt_le_big_info_adv_report *ev2; + struct sock *sk; + int lm = 0; + + bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr); + + /* Broadcast receiver requires handling of some events before it can + * proceed to establishing a BIG sync: + * + * 1. HCI_EV_LE_PA_SYNC_ESTABLISHED: The socket may specify a specific + * SID to listen to and once sync is estabilished its handle needs to + * be stored in iso_pi(sk)->sync_handle so it can be matched once + * receiving the BIG Info. + * 2. HCI_EVT_LE_BIG_INFO_ADV_REPORT: When connect_ind is triggered by a + * a BIG Info it attempts to check if there any listening socket with + * the same sync_handle and if it does then attempt to create a sync. + */ + ev1 = hci_recv_event_data(hdev, HCI_EV_LE_PA_SYNC_ESTABLISHED); + if (ev1) { + sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr, iso_match_sid, + ev1); + if (sk) + iso_pi(sk)->sync_handle = le16_to_cpu(ev1->handle); + + goto done; + } + + ev2 = hci_recv_event_data(hdev, HCI_EVT_LE_BIG_INFO_ADV_REPORT); + if (ev2) { + sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr, + iso_match_sync_handle, ev2); + if (sk) { + int err; + + if (ev2->num_bis < iso_pi(sk)->bc_num_bis) + iso_pi(sk)->bc_num_bis = ev2->num_bis; + + err = hci_le_big_create_sync(hdev, + &iso_pi(sk)->qos, + iso_pi(sk)->sync_handle, + iso_pi(sk)->bc_num_bis, + iso_pi(sk)->bc_bis); + if (err) { + bt_dev_err(hdev, "hci_le_big_create_sync: %d", + err); + sk = NULL; + } + } + } else { + sk = iso_get_sock_listen(&hdev->bdaddr, BDADDR_ANY, NULL, NULL); + } + +done: + if (!sk) + return lm; + + lm |= HCI_LM_ACCEPT; + + if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) + *flags |= HCI_PROTO_DEFER; + + return lm; +} + +static void iso_connect_cfm(struct hci_conn *hcon, __u8 status) +{ + if (hcon->type != ISO_LINK) { + if (hcon->type != LE_LINK) + return; + + /* Check if LE link has failed */ + if (status) { + if (hcon->link) + iso_conn_del(hcon->link, bt_to_errno(status)); + return; + } + + /* Create CIS if pending */ + hci_le_create_cis(hcon); + return; + } + + BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status); + + if (!status) { + struct iso_conn *conn; + + conn = iso_conn_add(hcon); + if (conn) + iso_conn_ready(conn); + } else { + iso_conn_del(hcon, bt_to_errno(status)); + } +} + +static void iso_disconn_cfm(struct hci_conn *hcon, __u8 reason) +{ + if (hcon->type != ISO_LINK) + return; + + BT_DBG("hcon %p reason %d", hcon, reason); + + iso_conn_del(hcon, bt_to_errno(reason)); +} + +void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) +{ + struct iso_conn *conn = hcon->iso_data; + struct hci_iso_data_hdr *hdr; + __u16 pb, ts, len; + + if (!conn) + goto drop; + + pb = hci_iso_flags_pb(flags); + ts = hci_iso_flags_ts(flags); + + BT_DBG("conn %p len %d pb 0x%x ts 0x%x", conn, skb->len, pb, ts); + + switch (pb) { + case ISO_START: + case ISO_SINGLE: + if (conn->rx_len) { + BT_ERR("Unexpected start frame (len %d)", skb->len); + kfree_skb(conn->rx_skb); + conn->rx_skb = NULL; + conn->rx_len = 0; + } + + if (ts) { + /* TODO: add timestamp to the packet? */ + hdr = skb_pull_data(skb, HCI_ISO_TS_DATA_HDR_SIZE); + if (!hdr) { + BT_ERR("Frame is too short (len %d)", skb->len); + goto drop; + } + + } else { + hdr = skb_pull_data(skb, HCI_ISO_DATA_HDR_SIZE); + if (!hdr) { + BT_ERR("Frame is too short (len %d)", skb->len); + goto drop; + } + } + + len = __le16_to_cpu(hdr->slen); + flags = hci_iso_data_flags(len); + len = hci_iso_data_len(len); + + BT_DBG("Start: total len %d, frag len %d flags 0x%4.4x", len, + skb->len, flags); + + if (len == skb->len) { + /* Complete frame received */ + iso_recv_frame(conn, skb); + return; + } + + if (pb == ISO_SINGLE) { + BT_ERR("Frame malformed (len %d, expected len %d)", + skb->len, len); + goto drop; + } + + if (skb->len > len) { + BT_ERR("Frame is too long (len %d, expected len %d)", + skb->len, len); + goto drop; + } + + /* Allocate skb for the complete frame (with header) */ + conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL); + if (!conn->rx_skb) + goto drop; + + skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), + skb->len); + conn->rx_len = len - skb->len; + break; + + case ISO_CONT: + BT_DBG("Cont: frag len %d (expecting %d)", skb->len, + conn->rx_len); + + if (!conn->rx_len) { + BT_ERR("Unexpected continuation frame (len %d)", + skb->len); + goto drop; + } + + if (skb->len > conn->rx_len) { + BT_ERR("Fragment is too long (len %d, expected %d)", + skb->len, conn->rx_len); + kfree_skb(conn->rx_skb); + conn->rx_skb = NULL; + conn->rx_len = 0; + goto drop; + } + + skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), + skb->len); + conn->rx_len -= skb->len; + return; + + case ISO_END: + skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), + skb->len); + conn->rx_len -= skb->len; + + if (!conn->rx_len) { + struct sk_buff *rx_skb = conn->rx_skb; + + /* Complete frame received. iso_recv_frame + * takes ownership of the skb so set the global + * rx_skb pointer to NULL first. + */ + conn->rx_skb = NULL; + iso_recv_frame(conn, rx_skb); + } + break; + } + +drop: + kfree_skb(skb); +} + +static struct hci_cb iso_cb = { + .name = "ISO", + .connect_cfm = iso_connect_cfm, + .disconn_cfm = iso_disconn_cfm, +}; + +static int iso_debugfs_show(struct seq_file *f, void *p) +{ + struct sock *sk; + + read_lock(&iso_sk_list.lock); + + sk_for_each(sk, &iso_sk_list.head) { + seq_printf(f, "%pMR %pMR %d\n", &iso_pi(sk)->src, + &iso_pi(sk)->dst, sk->sk_state); + } + + read_unlock(&iso_sk_list.lock); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(iso_debugfs); + +static struct dentry *iso_debugfs; + +static const struct proto_ops iso_sock_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .release = iso_sock_release, + .bind = iso_sock_bind, + .connect = iso_sock_connect, + .listen = iso_sock_listen, + .accept = iso_sock_accept, + .getname = iso_sock_getname, + .sendmsg = iso_sock_sendmsg, + .recvmsg = iso_sock_recvmsg, + .poll = bt_sock_poll, + .ioctl = bt_sock_ioctl, + .mmap = sock_no_mmap, + .socketpair = sock_no_socketpair, + .shutdown = iso_sock_shutdown, + .setsockopt = iso_sock_setsockopt, + .getsockopt = iso_sock_getsockopt +}; + +static const struct net_proto_family iso_sock_family_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .create = iso_sock_create, +}; + +static bool iso_inited; + +bool iso_enabled(void) +{ + return iso_inited; +} + +int iso_init(void) +{ + int err; + + BUILD_BUG_ON(sizeof(struct sockaddr_iso) > sizeof(struct sockaddr)); + + if (iso_inited) + return -EALREADY; + + err = proto_register(&iso_proto, 0); + if (err < 0) + return err; + + err = bt_sock_register(BTPROTO_ISO, &iso_sock_family_ops); + if (err < 0) { + BT_ERR("ISO socket registration failed"); + goto error; + } + + err = bt_procfs_init(&init_net, "iso", &iso_sk_list, NULL); + if (err < 0) { + BT_ERR("Failed to create ISO proc file"); + bt_sock_unregister(BTPROTO_ISO); + goto error; + } + + BT_INFO("ISO socket layer initialized"); + + hci_register_cb(&iso_cb); + + if (IS_ERR_OR_NULL(bt_debugfs)) + return 0; + + if (!iso_debugfs) { + iso_debugfs = debugfs_create_file("iso", 0444, bt_debugfs, + NULL, &iso_debugfs_fops); + } + + iso_inited = true; + + return 0; + +error: + proto_unregister(&iso_proto); + return err; +} + +int iso_exit(void) +{ + if (!iso_inited) + return -EALREADY; + + bt_procfs_cleanup(&init_net, "iso"); + + debugfs_remove(iso_debugfs); + iso_debugfs = NULL; + + hci_unregister_cb(&iso_cb); + + bt_sock_unregister(BTPROTO_ISO); + + proto_unregister(&iso_proto); + + iso_inited = false; + + return 0; +} diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index ae78490ecd3d..09ecaf556de5 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -1369,6 +1369,7 @@ static void l2cap_le_connect(struct l2cap_chan *chan) l2cap_le_flowctl_init(chan, 0); + memset(&req, 0, sizeof(req)); req.psm = chan->psm; req.scid = cpu_to_le16(chan->scid); req.mtu = cpu_to_le16(chan->imtu); diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c index 5326f41a58b7..469a0c95b6e8 100644 --- a/net/bluetooth/lib.c +++ b/net/bluetooth/lib.c @@ -135,6 +135,77 @@ int bt_to_errno(__u16 code) } EXPORT_SYMBOL(bt_to_errno); +/* Unix errno to Bluetooth error codes mapping */ +__u8 bt_status(int err) +{ + /* Don't convert if already positive value */ + if (err >= 0) + return err; + + switch (err) { + case -EBADRQC: + return 0x01; + + case -ENOTCONN: + return 0x02; + + case -EIO: + return 0x03; + + case -EHOSTDOWN: + return 0x04; + + case -EACCES: + return 0x05; + + case -EBADE: + return 0x06; + + case -ENOMEM: + return 0x07; + + case -ETIMEDOUT: + return 0x08; + + case -EMLINK: + return 0x09; + + case EALREADY: + return 0x0b; + + case -EBUSY: + return 0x0c; + + case -ECONNREFUSED: + return 0x0d; + + case -EOPNOTSUPP: + return 0x11; + + case -EINVAL: + return 0x12; + + case -ECONNRESET: + return 0x13; + + case -ECONNABORTED: + return 0x16; + + case ELOOP: + return 0x17; + + case -EPROTONOSUPPORT: + return 0x1a; + + case -EPROTO: + return 0x19; + + default: + return 0x1f; + } +} +EXPORT_SYMBOL(bt_status); + void bt_info(const char *format, ...) { struct va_format vaf; diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index ae758ab1b558..8cfafd7a0576 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -1023,11 +1023,39 @@ static void rpa_expired(struct work_struct *work) hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL); } +static void discov_off(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, + discov_off.work); + + bt_dev_dbg(hdev, ""); + + hci_dev_lock(hdev); + + /* When discoverable timeout triggers, then just make sure + * the limited discoverable flag is cleared. Even in the case + * of a timeout triggered from general discoverable, it is + * safe to unconditionally clear the flag. + */ + hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); + hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); + hdev->discov_timeout = 0; + + hci_update_discoverable(hdev); + + mgmt_new_settings(hdev); + + hci_dev_unlock(hdev); +} + static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev) { if (hci_dev_test_and_set_flag(hdev, HCI_MGMT)) return; + BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION); + + INIT_DELAYED_WORK(&hdev->discov_off, discov_off); INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off); INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired); @@ -1082,11 +1110,11 @@ static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir) eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE, hdev->appearance); - name_len = strlen(hdev->dev_name); + name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name)); eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE, hdev->dev_name, name_len); - name_len = strlen(hdev->short_name); + name_len = strnlen(hdev->short_name, sizeof(hdev->short_name)); eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT, hdev->short_name, name_len); @@ -1611,7 +1639,7 @@ static int set_connectable_update_settings(struct hci_dev *hdev, return err; if (changed) { - hci_req_update_scan(hdev); + hci_update_scan(hdev); hci_update_passive_scan(hdev); return new_settings(hdev, sk); } @@ -2528,6 +2556,37 @@ static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr, skip_sk); } +static void unpair_device_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_unpair_device *cp = cmd->param; + + if (!err) + device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk); + + cmd->cmd_complete(cmd, err); + mgmt_pending_free(cmd); +} + +static int unpair_device_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_unpair_device *cp = cmd->param; + struct hci_conn *conn; + + if (cp->addr.type == BDADDR_BREDR) + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, + &cp->addr.bdaddr); + else + conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, + le_addr_type(cp->addr.type)); + + if (!conn) + return 0; + + return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM); +} + static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { @@ -2638,7 +2697,7 @@ done: goto unlock; } - cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp, + cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp, sizeof(*cp)); if (!cmd) { err = -ENOMEM; @@ -2647,9 +2706,10 @@ done: cmd->cmd_complete = addr_cmd_complete; - err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM); + err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd, + unpair_device_complete); if (err < 0) - mgmt_pending_remove(cmd); + mgmt_pending_free(cmd); unlock: hci_dev_unlock(hdev); @@ -3925,10 +3985,16 @@ static const u8 rpa_resolution_uuid[16] = { 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15, }; +/* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */ +static const u8 iso_socket_uuid[16] = { + 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98, + 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f, +}; + static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { - char buf[102]; /* Enough space for 5 features: 2 + 20 * 5 */ + char buf[122]; /* Enough space for 6 features: 2 + 20 * 6 */ struct mgmt_rp_read_exp_features_info *rp = (void *)buf; u16 idx = 0; u32 flags; @@ -3992,6 +4058,13 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, idx++; } + if (IS_ENABLED(CONFIG_BT_LE)) { + flags = iso_enabled() ? BIT(0) : 0; + memcpy(rp->features[idx].uuid, iso_socket_uuid, 16); + rp->features[idx].flags = cpu_to_le32(flags); + idx++; + } + rp->feature_count = cpu_to_le16(idx); /* After reading the experimental features information, enable @@ -4384,6 +4457,57 @@ static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev, return err; } +#ifdef CONFIG_BT_LE +static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev, + struct mgmt_cp_set_exp_feature *cp, u16 data_len) +{ + struct mgmt_rp_set_exp_feature rp; + bool val, changed = false; + int err; + + /* Command requires to use the non-controller index */ + if (hdev) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_INDEX); + + /* Parameters are limited to a single octet */ + if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) + return mgmt_cmd_status(sk, MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); + + /* Only boolean on/off is supported */ + if (cp->param[0] != 0x00 && cp->param[0] != 0x01) + return mgmt_cmd_status(sk, MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); + + val = cp->param[0] ? true : false; + if (val) + err = iso_init(); + else + err = iso_exit(); + + if (!err) + changed = true; + + memcpy(rp.uuid, iso_socket_uuid, 16); + rp.flags = cpu_to_le32(val ? BIT(0) : 0); + + hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); + + err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, 0, + &rp, sizeof(rp)); + + if (changed) + exp_feature_changed(hdev, iso_socket_uuid, val, sk); + + return err; +} +#endif + static const struct mgmt_exp_feature { const u8 *uuid; int (*set_func)(struct sock *sk, struct hci_dev *hdev, @@ -4397,6 +4521,9 @@ static const struct mgmt_exp_feature { EXP_FEAT(quality_report_uuid, set_quality_report_func), EXP_FEAT(offload_codecs_uuid, set_offload_codec_func), EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func), +#ifdef CONFIG_BT_LE + EXP_FEAT(iso_socket_uuid, set_iso_socket_func), +#endif /* end with a null feature */ EXP_FEAT(NULL, NULL) @@ -4646,23 +4773,15 @@ static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev, return err; } -int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status) +static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, + void *data, int status) { struct mgmt_rp_add_adv_patterns_monitor rp; - struct mgmt_pending_cmd *cmd; - struct adv_monitor *monitor; - int err = 0; + struct mgmt_pending_cmd *cmd = data; + struct adv_monitor *monitor = cmd->user_data; hci_dev_lock(hdev); - cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev); - if (!cmd) { - cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev); - if (!cmd) - goto done; - } - - monitor = cmd->user_data; rp.monitor_handle = cpu_to_le16(monitor->handle); if (!status) { @@ -4673,26 +4792,29 @@ int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status) hci_update_passive_scan(hdev); } - err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, - mgmt_status(status), &rp, sizeof(rp)); + mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_status(status), &rp, sizeof(rp)); mgmt_pending_remove(cmd); -done: hci_dev_unlock(hdev); - bt_dev_dbg(hdev, "add monitor %d complete, status %u", + bt_dev_dbg(hdev, "add monitor %d complete, status %d", rp.monitor_handle, status); +} - return err; +static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct adv_monitor *monitor = cmd->user_data; + + return hci_add_adv_monitor(hdev, monitor); } static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev, struct adv_monitor *m, u8 status, void *data, u16 len, u16 op) { - struct mgmt_rp_add_adv_patterns_monitor rp; struct mgmt_pending_cmd *cmd; int err; - bool pending; hci_dev_lock(hdev); @@ -4714,12 +4836,11 @@ static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev, } cmd->user_data = m; - pending = hci_add_adv_monitor(hdev, m, &err); + err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd, + mgmt_add_adv_patterns_monitor_complete); if (err) { - if (err == -ENOSPC || err == -ENOMEM) + if (err == -ENOMEM) status = MGMT_STATUS_NO_RESOURCES; - else if (err == -EINVAL) - status = MGMT_STATUS_INVALID_PARAMS; else status = MGMT_STATUS_FAILED; @@ -4727,18 +4848,6 @@ static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev, goto unlock; } - if (!pending) { - mgmt_pending_remove(cmd); - rp.monitor_handle = cpu_to_le16(m->handle); - mgmt_adv_monitor_added(sk, hdev, m->handle); - m->state = ADV_MONITOR_STATE_REGISTERED; - hdev->adv_monitors_cnt++; - - hci_dev_unlock(hdev); - return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS, - &rp, sizeof(rp)); - } - hci_dev_unlock(hdev); return 0; @@ -4879,49 +4988,46 @@ done: MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI); } -int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status) +static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, + void *data, int status) { struct mgmt_rp_remove_adv_monitor rp; - struct mgmt_cp_remove_adv_monitor *cp; - struct mgmt_pending_cmd *cmd; - int err = 0; + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_remove_adv_monitor *cp = cmd->param; hci_dev_lock(hdev); - cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev); - if (!cmd) - goto done; - - cp = cmd->param; rp.monitor_handle = cp->monitor_handle; if (!status) hci_update_passive_scan(hdev); - err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, - mgmt_status(status), &rp, sizeof(rp)); + mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_status(status), &rp, sizeof(rp)); mgmt_pending_remove(cmd); -done: hci_dev_unlock(hdev); - bt_dev_dbg(hdev, "remove monitor %d complete, status %u", + bt_dev_dbg(hdev, "remove monitor %d complete, status %d", rp.monitor_handle, status); +} - return err; +static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_remove_adv_monitor *cp = cmd->param; + u16 handle = __le16_to_cpu(cp->monitor_handle); + + if (!handle) + return hci_remove_all_adv_monitor(hdev); + + return hci_remove_single_adv_monitor(hdev, handle); } static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { - struct mgmt_cp_remove_adv_monitor *cp = data; - struct mgmt_rp_remove_adv_monitor rp; struct mgmt_pending_cmd *cmd; - u16 handle = __le16_to_cpu(cp->monitor_handle); int err, status; - bool pending; - - BT_DBG("request for %s", hdev->name); - rp.monitor_handle = cp->monitor_handle; hci_dev_lock(hdev); @@ -4939,34 +5045,23 @@ static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev, goto unlock; } - if (handle) - pending = hci_remove_single_adv_monitor(hdev, handle, &err); - else - pending = hci_remove_all_adv_monitor(hdev, &err); + err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd, + mgmt_remove_adv_monitor_complete); if (err) { mgmt_pending_remove(cmd); - if (err == -ENOENT) - status = MGMT_STATUS_INVALID_INDEX; + if (err == -ENOMEM) + status = MGMT_STATUS_NO_RESOURCES; else status = MGMT_STATUS_FAILED; - goto unlock; - } - - /* monitor can be removed without forwarding request to controller */ - if (!pending) { mgmt_pending_remove(cmd); - hci_dev_unlock(hdev); - - return mgmt_cmd_complete(sk, hdev->id, - MGMT_OP_REMOVE_ADV_MONITOR, - MGMT_STATUS_SUCCESS, - &rp, sizeof(rp)); + goto unlock; } hci_dev_unlock(hdev); + return 0; unlock: @@ -6711,11 +6806,6 @@ static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err) mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status, &rp, sizeof(rp)); - if (conn) { - hci_conn_drop(conn); - hci_conn_put(conn); - } - mgmt_pending_free(cmd); } @@ -6734,15 +6824,10 @@ static int get_conn_info_sync(struct hci_dev *hdev, void *data) else conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); - if (!conn || conn != cmd->user_data || conn->state != BT_CONNECTED) { - if (cmd->user_data) { - hci_conn_drop(cmd->user_data); - hci_conn_put(cmd->user_data); - cmd->user_data = NULL; - } + if (!conn || conn->state != BT_CONNECTED) return MGMT_STATUS_NOT_CONNECTED; - } + cmd->user_data = conn; handle = cpu_to_le16(conn->handle); /* Refresh RSSI each time */ @@ -6821,11 +6906,12 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data, cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data, len); - if (!cmd) + if (!cmd) { err = -ENOMEM; - else + } else { err = hci_cmd_sync_queue(hdev, get_conn_info_sync, cmd, get_conn_info_complete); + } if (err < 0) { mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, @@ -6837,9 +6923,6 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data, goto unlock; } - hci_conn_hold(conn); - cmd->user_data = hci_conn_get(conn); - conn->conn_info_timestamp = jiffies; } else { /* Cache is valid, just reply with values cached in hci_conn */ @@ -6878,8 +6961,6 @@ static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err) if (conn) { rp.piconet_clock = cpu_to_le32(conn->clock); rp.accuracy = cpu_to_le16(conn->clock_accuracy); - hci_conn_drop(conn); - hci_conn_put(conn); } complete: @@ -6894,30 +6975,21 @@ static int get_clock_info_sync(struct hci_dev *hdev, void *data) struct mgmt_pending_cmd *cmd = data; struct mgmt_cp_get_clock_info *cp = cmd->param; struct hci_cp_read_clock hci_cp; - struct hci_conn *conn = cmd->user_data; - int err; + struct hci_conn *conn; memset(&hci_cp, 0, sizeof(hci_cp)); - err = hci_read_clock_sync(hdev, &hci_cp); + hci_read_clock_sync(hdev, &hci_cp); - if (conn) { - /* Make sure connection still exists */ - conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, - &cp->addr.bdaddr); + /* Make sure connection still exists */ + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); + if (!conn || conn->state != BT_CONNECTED) + return MGMT_STATUS_NOT_CONNECTED; - if (conn && conn == cmd->user_data && - conn->state == BT_CONNECTED) { - hci_cp.handle = cpu_to_le16(conn->handle); - hci_cp.which = 0x01; /* Piconet clock */ - err = hci_read_clock_sync(hdev, &hci_cp); - } else if (cmd->user_data) { - hci_conn_drop(cmd->user_data); - hci_conn_put(cmd->user_data); - cmd->user_data = NULL; - } - } + cmd->user_data = conn; + hci_cp.handle = cpu_to_le16(conn->handle); + hci_cp.which = 0x01; /* Piconet clock */ - return err; + return hci_read_clock_sync(hdev, &hci_cp); } static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data, @@ -6976,10 +7048,6 @@ static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data, if (cmd) mgmt_pending_free(cmd); - - } else if (conn) { - hci_conn_hold(conn); - cmd->user_data = hci_conn_get(conn); } @@ -7108,7 +7176,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, if (err) goto unlock; - hci_req_update_scan(hdev); + hci_update_scan(hdev); goto added; } @@ -7220,7 +7288,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev, goto unlock; } - hci_req_update_scan(hdev); + hci_update_scan(hdev); device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type); @@ -7284,7 +7352,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev, kfree(b); } - hci_req_update_scan(hdev); + hci_update_scan(hdev); list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) { if (p->auto_connect == HCI_AUTO_CONN_DISABLED) @@ -8090,7 +8158,7 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev, u16 timeout, duration; unsigned int prev_instance_cnt; u8 schedule_instance = 0; - struct adv_info *next_instance; + struct adv_info *adv, *next_instance; int err; struct mgmt_pending_cmd *cmd; @@ -8141,7 +8209,7 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev, prev_instance_cnt = hdev->adv_instance_cnt; - err = hci_add_adv_instance(hdev, cp->instance, flags, + adv = hci_add_adv_instance(hdev, cp->instance, flags, cp->adv_data_len, cp->data, cp->scan_rsp_len, cp->data + cp->adv_data_len, @@ -8149,7 +8217,7 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev, HCI_ADV_TX_POWER_NO_PREFERENCE, hdev->le_adv_min_interval, hdev->le_adv_max_interval); - if (err < 0) { + if (IS_ERR(adv)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, MGMT_STATUS_FAILED); goto unlock; @@ -8280,6 +8348,7 @@ static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev, struct mgmt_cp_add_ext_adv_params *cp = data; struct mgmt_rp_add_ext_adv_params rp; struct mgmt_pending_cmd *cmd = NULL; + struct adv_info *adv; u32 flags, min_interval, max_interval; u16 timeout, duration; u8 status; @@ -8349,11 +8418,11 @@ static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev, HCI_ADV_TX_POWER_NO_PREFERENCE; /* Create advertising instance with no advertising or response data */ - err = hci_add_adv_instance(hdev, cp->instance, flags, - 0, NULL, 0, NULL, timeout, duration, - tx_power, min_interval, max_interval); + adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL, + timeout, duration, tx_power, min_interval, + max_interval); - if (err < 0) { + if (IS_ERR(adv)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, MGMT_STATUS_FAILED); goto unlock; @@ -8866,6 +8935,11 @@ void mgmt_index_removed(struct hci_dev *hdev) mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev), HCI_MGMT_EXT_INDEX_EVENTS); + + /* Cancel any remaining timed work */ + cancel_delayed_work_sync(&hdev->discov_off); + cancel_delayed_work_sync(&hdev->service_cache); + cancel_delayed_work_sync(&hdev->rpa_expired); } void mgmt_power_on(struct hci_dev *hdev, int err) diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c index f43994523b1f..14975769f678 100644 --- a/net/bluetooth/msft.c +++ b/net/bluetooth/msft.c @@ -99,18 +99,11 @@ struct msft_data { __u8 evt_prefix_len; __u8 *evt_prefix; struct list_head handle_map; - __u16 pending_add_handle; - __u16 pending_remove_handle; __u8 resuming; __u8 suspending; __u8 filter_enabled; }; -static int __msft_add_monitor_pattern(struct hci_dev *hdev, - struct adv_monitor *monitor); -static int __msft_remove_monitor(struct hci_dev *hdev, - struct adv_monitor *monitor, u16 handle); - bool msft_monitor_supported(struct hci_dev *hdev) { return !!(msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR); @@ -164,34 +157,6 @@ failed: return false; } -static void reregister_monitor(struct hci_dev *hdev, int handle) -{ - struct adv_monitor *monitor; - struct msft_data *msft = hdev->msft_data; - int err; - - while (1) { - monitor = idr_get_next(&hdev->adv_monitors_idr, &handle); - if (!monitor) { - /* All monitors have been resumed */ - msft->resuming = false; - hci_update_passive_scan(hdev); - return; - } - - msft->pending_add_handle = (u16)handle; - err = __msft_add_monitor_pattern(hdev, monitor); - - /* If success, we return and wait for monitor added callback */ - if (!err) - return; - - /* Otherwise remove the monitor and keep registering */ - hci_free_adv_monitor(hdev, monitor); - handle++; - } -} - /* is_mgmt = true matches the handle exposed to userspace via mgmt. * is_mgmt = false matches the handle used by the msft controller. * This function requires the caller holds hdev->lock @@ -243,34 +208,27 @@ static int msft_monitor_device_del(struct hci_dev *hdev, __u16 mgmt_handle, return count; } -static void msft_le_monitor_advertisement_cb(struct hci_dev *hdev, - u8 status, u16 opcode, - struct sk_buff *skb) +static int msft_le_monitor_advertisement_cb(struct hci_dev *hdev, u16 opcode, + struct adv_monitor *monitor, + struct sk_buff *skb) { struct msft_rp_le_monitor_advertisement *rp; - struct adv_monitor *monitor; struct msft_monitor_advertisement_handle_data *handle_data; struct msft_data *msft = hdev->msft_data; + int status = 0; hci_dev_lock(hdev); - monitor = idr_find(&hdev->adv_monitors_idr, msft->pending_add_handle); - if (!monitor) { - bt_dev_err(hdev, "msft add advmon: monitor %u is not found!", - msft->pending_add_handle); + rp = (struct msft_rp_le_monitor_advertisement *)skb->data; + if (skb->len < sizeof(*rp)) { status = HCI_ERROR_UNSPECIFIED; goto unlock; } + status = rp->status; if (status) goto unlock; - rp = (struct msft_rp_le_monitor_advertisement *)skb->data; - if (skb->len < sizeof(*rp)) { - status = HCI_ERROR_UNSPECIFIED; - goto unlock; - } - handle_data = kmalloc(sizeof(*handle_data), GFP_KERNEL); if (!handle_data) { status = HCI_ERROR_UNSPECIFIED; @@ -285,29 +243,23 @@ static void msft_le_monitor_advertisement_cb(struct hci_dev *hdev, monitor->state = ADV_MONITOR_STATE_OFFLOADED; unlock: - if (status && monitor) + if (status) hci_free_adv_monitor(hdev, monitor); hci_dev_unlock(hdev); - if (!msft->resuming) - hci_add_adv_patterns_monitor_complete(hdev, status); + return status; } -static void msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev, - u8 status, u16 opcode, - struct sk_buff *skb) +static int msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev, + u16 opcode, + struct adv_monitor *monitor, + struct sk_buff *skb) { - struct msft_cp_le_cancel_monitor_advertisement *cp; struct msft_rp_le_cancel_monitor_advertisement *rp; - struct adv_monitor *monitor; struct msft_monitor_advertisement_handle_data *handle_data; struct msft_data *msft = hdev->msft_data; - int err; - bool pending; - - if (status) - goto done; + int status = 0; rp = (struct msft_rp_le_cancel_monitor_advertisement *)skb->data; if (skb->len < sizeof(*rp)) { @@ -315,22 +267,22 @@ static void msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev, goto done; } + status = rp->status; + if (status) + goto done; + hci_dev_lock(hdev); - cp = hci_sent_cmd_data(hdev, hdev->msft_opcode); - handle_data = msft_find_handle_data(hdev, cp->handle, false); + handle_data = msft_find_handle_data(hdev, monitor->handle, true); if (handle_data) { - monitor = idr_find(&hdev->adv_monitors_idr, - handle_data->mgmt_handle); - - if (monitor && monitor->state == ADV_MONITOR_STATE_OFFLOADED) + if (monitor->state == ADV_MONITOR_STATE_OFFLOADED) monitor->state = ADV_MONITOR_STATE_REGISTERED; /* Do not free the monitor if it is being removed due to * suspend. It will be re-monitored on resume. */ - if (monitor && !msft->suspending) { + if (!msft->suspending) { hci_free_adv_monitor(hdev, monitor); /* Clear any monitored devices by this Adv Monitor */ @@ -342,35 +294,19 @@ static void msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev, kfree(handle_data); } - /* If remove all monitors is required, we need to continue the process - * here because the earlier it was paused when waiting for the - * response from controller. - */ - if (msft->pending_remove_handle == 0) { - pending = hci_remove_all_adv_monitor(hdev, &err); - if (pending) { - hci_dev_unlock(hdev); - return; - } - - if (err) - status = HCI_ERROR_UNSPECIFIED; - } - hci_dev_unlock(hdev); done: - if (!msft->suspending) - hci_remove_adv_monitor_complete(hdev, status); + return status; } +/* This function requires the caller holds hci_req_sync_lock */ static int msft_remove_monitor_sync(struct hci_dev *hdev, struct adv_monitor *monitor) { struct msft_cp_le_cancel_monitor_advertisement cp; struct msft_monitor_advertisement_handle_data *handle_data; struct sk_buff *skb; - u8 status; handle_data = msft_find_handle_data(hdev, monitor->handle, true); @@ -386,13 +322,8 @@ static int msft_remove_monitor_sync(struct hci_dev *hdev, if (IS_ERR(skb)) return PTR_ERR(skb); - status = skb->data[0]; - skb_pull(skb, 1); - - msft_le_cancel_monitor_advertisement_cb(hdev, status, hdev->msft_opcode, - skb); - - return status; + return msft_le_cancel_monitor_advertisement_cb(hdev, hdev->msft_opcode, + monitor, skb); } /* This function requires the caller holds hci_req_sync_lock */ @@ -463,7 +394,6 @@ static int msft_add_monitor_sync(struct hci_dev *hdev, ptrdiff_t offset = 0; u8 pattern_count = 0; struct sk_buff *skb; - u8 status; if (!msft_monitor_pattern_valid(monitor)) return -EINVAL; @@ -505,20 +435,40 @@ static int msft_add_monitor_sync(struct hci_dev *hdev, if (IS_ERR(skb)) return PTR_ERR(skb); - status = skb->data[0]; - skb_pull(skb, 1); + return msft_le_monitor_advertisement_cb(hdev, hdev->msft_opcode, + monitor, skb); +} - msft_le_monitor_advertisement_cb(hdev, status, hdev->msft_opcode, skb); +/* This function requires the caller holds hci_req_sync_lock */ +static void reregister_monitor(struct hci_dev *hdev) +{ + struct adv_monitor *monitor; + struct msft_data *msft = hdev->msft_data; + int handle = 0; - return status; + if (!msft) + return; + + msft->resuming = true; + + while (1) { + monitor = idr_get_next(&hdev->adv_monitors_idr, &handle); + if (!monitor) + break; + + msft_add_monitor_sync(hdev, monitor); + + handle++; + } + + /* All monitors have been reregistered */ + msft->resuming = false; } /* This function requires the caller holds hci_req_sync_lock */ int msft_resume_sync(struct hci_dev *hdev) { struct msft_data *msft = hdev->msft_data; - struct adv_monitor *monitor; - int handle = 0; if (!msft || !msft_monitor_supported(hdev)) return 0; @@ -533,24 +483,12 @@ int msft_resume_sync(struct hci_dev *hdev) hci_dev_unlock(hdev); - msft->resuming = true; - - while (1) { - monitor = idr_get_next(&hdev->adv_monitors_idr, &handle); - if (!monitor) - break; - - msft_add_monitor_sync(hdev, monitor); - - handle++; - } - - /* All monitors have been resumed */ - msft->resuming = false; + reregister_monitor(hdev); return 0; } +/* This function requires the caller holds hci_req_sync_lock */ void msft_do_open(struct hci_dev *hdev) { struct msft_data *msft = hdev->msft_data; @@ -583,7 +521,7 @@ void msft_do_open(struct hci_dev *hdev) /* Monitors get removed on power off, so we need to explicitly * tell the controller to re-monitor. */ - reregister_monitor(hdev, 0); + reregister_monitor(hdev); } } @@ -829,66 +767,7 @@ static void msft_le_set_advertisement_filter_enable_cb(struct hci_dev *hdev, hci_dev_unlock(hdev); } -/* This function requires the caller holds hdev->lock */ -static int __msft_add_monitor_pattern(struct hci_dev *hdev, - struct adv_monitor *monitor) -{ - struct msft_cp_le_monitor_advertisement *cp; - struct msft_le_monitor_advertisement_pattern_data *pattern_data; - struct msft_le_monitor_advertisement_pattern *pattern; - struct adv_pattern *entry; - struct hci_request req; - struct msft_data *msft = hdev->msft_data; - size_t total_size = sizeof(*cp) + sizeof(*pattern_data); - ptrdiff_t offset = 0; - u8 pattern_count = 0; - int err = 0; - - if (!msft_monitor_pattern_valid(monitor)) - return -EINVAL; - - list_for_each_entry(entry, &monitor->patterns, list) { - pattern_count++; - total_size += sizeof(*pattern) + entry->length; - } - - cp = kmalloc(total_size, GFP_KERNEL); - if (!cp) - return -ENOMEM; - - cp->sub_opcode = MSFT_OP_LE_MONITOR_ADVERTISEMENT; - cp->rssi_high = monitor->rssi.high_threshold; - cp->rssi_low = monitor->rssi.low_threshold; - cp->rssi_low_interval = (u8)monitor->rssi.low_threshold_timeout; - cp->rssi_sampling_period = monitor->rssi.sampling_period; - - cp->cond_type = MSFT_MONITOR_ADVERTISEMENT_TYPE_PATTERN; - - pattern_data = (void *)cp->data; - pattern_data->count = pattern_count; - - list_for_each_entry(entry, &monitor->patterns, list) { - pattern = (void *)(pattern_data->data + offset); - /* the length also includes data_type and offset */ - pattern->length = entry->length + 2; - pattern->data_type = entry->ad_type; - pattern->start_byte = entry->offset; - memcpy(pattern->pattern, entry->value, entry->length); - offset += sizeof(*pattern) + entry->length; - } - - hci_req_init(&req, hdev); - hci_req_add(&req, hdev->msft_opcode, total_size, cp); - err = hci_req_run_skb(&req, msft_le_monitor_advertisement_cb); - kfree(cp); - - if (!err) - msft->pending_add_handle = monitor->handle; - - return err; -} - -/* This function requires the caller holds hdev->lock */ +/* This function requires the caller holds hci_req_sync_lock */ int msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor) { struct msft_data *msft = hdev->msft_data; @@ -899,41 +778,11 @@ int msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor) if (msft->resuming || msft->suspending) return -EBUSY; - return __msft_add_monitor_pattern(hdev, monitor); + return msft_add_monitor_sync(hdev, monitor); } -/* This function requires the caller holds hdev->lock */ -static int __msft_remove_monitor(struct hci_dev *hdev, - struct adv_monitor *monitor, u16 handle) -{ - struct msft_cp_le_cancel_monitor_advertisement cp; - struct msft_monitor_advertisement_handle_data *handle_data; - struct hci_request req; - struct msft_data *msft = hdev->msft_data; - int err = 0; - - handle_data = msft_find_handle_data(hdev, monitor->handle, true); - - /* If no matched handle, just remove without telling controller */ - if (!handle_data) - return -ENOENT; - - cp.sub_opcode = MSFT_OP_LE_CANCEL_MONITOR_ADVERTISEMENT; - cp.handle = handle_data->msft_handle; - - hci_req_init(&req, hdev); - hci_req_add(&req, hdev->msft_opcode, sizeof(cp), &cp); - err = hci_req_run_skb(&req, msft_le_cancel_monitor_advertisement_cb); - - if (!err) - msft->pending_remove_handle = handle; - - return err; -} - -/* This function requires the caller holds hdev->lock */ -int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, - u16 handle) +/* This function requires the caller holds hci_req_sync_lock */ +int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) { struct msft_data *msft = hdev->msft_data; @@ -943,7 +792,7 @@ int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, if (msft->resuming || msft->suspending) return -EBUSY; - return __msft_remove_monitor(hdev, monitor, handle); + return msft_remove_monitor_sync(hdev, monitor); } void msft_req_add_set_filter_enable(struct hci_request *req, bool enable) diff --git a/net/bluetooth/msft.h b/net/bluetooth/msft.h index afcaf7d3b1cb..2a63205b377b 100644 --- a/net/bluetooth/msft.h +++ b/net/bluetooth/msft.h @@ -20,8 +20,7 @@ void msft_do_close(struct hci_dev *hdev); void msft_vendor_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb); __u64 msft_get_features(struct hci_dev *hdev); int msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor); -int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, - u16 handle); +int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); void msft_req_add_set_filter_enable(struct hci_request *req, bool enable); int msft_set_filter_enable(struct hci_dev *hdev, bool enable); int msft_suspend_sync(struct hci_dev *hdev); @@ -49,8 +48,7 @@ static inline int msft_add_monitor_pattern(struct hci_dev *hdev, } static inline int msft_remove_monitor(struct hci_dev *hdev, - struct adv_monitor *monitor, - u16 handle) + struct adv_monitor *monitor) { return -EOPNOTSUPP; } |