Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next

Johan Hedberg says:

====================
pull request: bluetooth-next 2020-07-31

Here's the main bluetooth-next pull request for 5.9:

 - Fix firmware filenames for Marvell chipsets
 - Several suspend-related fixes
 - Addedd mgmt commands for runtime configuration
 - Multiple fixes for Qualcomm-based controllers
 - Add new monitoring feature for mgmt
 - Fix handling of legacy cipher (E4) together with security level 4
 - Add support for Realtek 8822CE controller
 - Fix issues with Chinese controllers using fake VID/PID values
 - Multiple other smaller fixes & improvements
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2020-07-31 15:11:52 -07:00
commit 4bb540dbe4
44 changed files with 2150 additions and 325 deletions

View File

@ -44,7 +44,7 @@ examples:
uart1 { uart1 {
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&uart1_pins>, <&uart1_rts_cts_pins>; pinctrl-0 = <&uart1_pins>, <&uart1_rts_cts_pins>;
uart-has-rtscts = <1>; uart-has-rtscts;
bluetooth { bluetooth {
compatible = "realtek,rtl8723bs-bt"; compatible = "realtek,rtl8723bs-bt";

View File

@ -106,7 +106,7 @@ static void bcm203x_complete(struct urb *urb)
} }
data->state = BCM203X_LOAD_FIRMWARE; data->state = BCM203X_LOAD_FIRMWARE;
/* fall through */ fallthrough;
case BCM203X_LOAD_FIRMWARE: case BCM203X_LOAD_FIRMWARE:
if (data->fw_sent == data->fw_size) { if (data->fw_sent == data->fw_size) {
usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, BCM203X_IN_EP), usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, BCM203X_IN_EP),

View File

@ -295,7 +295,6 @@ static void bluecard_write_wakeup(struct bluecard_info *info)
baud_reg = REG_CONTROL_BAUD_RATE_115200; baud_reg = REG_CONTROL_BAUD_RATE_115200;
break; break;
case PKT_BAUD_RATE_57600: case PKT_BAUD_RATE_57600:
/* Fall through... */
default: default:
baud_reg = REG_CONTROL_BAUD_RATE_57600; baud_reg = REG_CONTROL_BAUD_RATE_57600;
break; break;
@ -585,7 +584,6 @@ static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud)
hci_skb_pkt_type(skb) = PKT_BAUD_RATE_115200; hci_skb_pkt_type(skb) = PKT_BAUD_RATE_115200;
break; break;
case 57600: case 57600:
/* Fall through... */
default: default:
cmd[4] = 0x03; cmd[4] = 0x03;
hci_skb_pkt_type(skb) = PKT_BAUD_RATE_57600; hci_skb_pkt_type(skb) = PKT_BAUD_RATE_57600;

View File

@ -754,6 +754,65 @@ void btintel_reset_to_bootloader(struct hci_dev *hdev)
} }
EXPORT_SYMBOL_GPL(btintel_reset_to_bootloader); EXPORT_SYMBOL_GPL(btintel_reset_to_bootloader);
int btintel_read_debug_features(struct hci_dev *hdev,
struct intel_debug_features *features)
{
struct sk_buff *skb;
u8 page_no = 1;
/* Intel controller supports two pages, each page is of 128-bit
* feature bit mask. And each bit defines specific feature support
*/
skb = __hci_cmd_sync(hdev, 0xfca6, sizeof(page_no), &page_no,
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Reading supported features failed (%ld)",
PTR_ERR(skb));
return PTR_ERR(skb);
}
if (skb->len != (sizeof(features->page1) + 3)) {
bt_dev_err(hdev, "Supported features event size mismatch");
kfree_skb(skb);
return -EILSEQ;
}
memcpy(features->page1, skb->data + 3, sizeof(features->page1));
/* Read the supported features page2 if required in future.
*/
kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL_GPL(btintel_read_debug_features);
int btintel_set_debug_features(struct hci_dev *hdev,
const struct intel_debug_features *features)
{
u8 mask[11] = { 0x0a, 0x92, 0x02, 0x07, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00 };
struct sk_buff *skb;
if (!features)
return -EINVAL;
if (!(features->page1[0] & 0x3f)) {
bt_dev_info(hdev, "Telemetry exception format not supported");
return 0;
}
skb = __hci_cmd_sync(hdev, 0xfc8b, 11, mask, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Setting Intel telemetry ddc write event mask failed (%ld)",
PTR_ERR(skb));
return PTR_ERR(skb);
}
kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL_GPL(btintel_set_debug_features);
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION); MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION);
MODULE_VERSION(VERSION); MODULE_VERSION(VERSION);

View File

@ -62,6 +62,10 @@ struct intel_reset {
__le32 boot_param; __le32 boot_param;
} __packed; } __packed;
struct intel_debug_features {
__u8 page1[16];
} __packed;
#if IS_ENABLED(CONFIG_BT_INTEL) #if IS_ENABLED(CONFIG_BT_INTEL)
int btintel_check_bdaddr(struct hci_dev *hdev); int btintel_check_bdaddr(struct hci_dev *hdev);
@ -88,6 +92,10 @@ int btintel_read_boot_params(struct hci_dev *hdev,
int btintel_download_firmware(struct hci_dev *dev, const struct firmware *fw, int btintel_download_firmware(struct hci_dev *dev, const struct firmware *fw,
u32 *boot_param); u32 *boot_param);
void btintel_reset_to_bootloader(struct hci_dev *hdev); void btintel_reset_to_bootloader(struct hci_dev *hdev);
int btintel_read_debug_features(struct hci_dev *hdev,
struct intel_debug_features *features);
int btintel_set_debug_features(struct hci_dev *hdev,
const struct intel_debug_features *features);
#else #else
static inline int btintel_check_bdaddr(struct hci_dev *hdev) static inline int btintel_check_bdaddr(struct hci_dev *hdev)
@ -186,4 +194,17 @@ static inline int btintel_download_firmware(struct hci_dev *dev,
static inline void btintel_reset_to_bootloader(struct hci_dev *hdev) static inline void btintel_reset_to_bootloader(struct hci_dev *hdev)
{ {
} }
static inline int btintel_read_debug_features(struct hci_dev *hdev,
struct intel_debug_features *features)
{
return -EOPNOTSUPP;
}
static inline int btintel_set_debug_features(struct hci_dev *hdev,
const struct intel_debug_features *features)
{
return -EOPNOTSUPP;
}
#endif #endif

View File

@ -587,6 +587,14 @@ static int btmrvl_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
return 0; return 0;
} }
static bool btmrvl_prevent_wake(struct hci_dev *hdev)
{
struct btmrvl_private *priv = hci_get_drvdata(hdev);
struct btmrvl_sdio_card *card = priv->btmrvl_dev.card;
return !device_may_wakeup(&card->func->dev);
}
/* /*
* This function handles the event generated by firmware, rx data * This function handles the event generated by firmware, rx data
* received from firmware, and tx data sent from kernel. * received from firmware, and tx data sent from kernel.
@ -669,6 +677,7 @@ static int btmrvl_service_main_thread(void *data)
int btmrvl_register_hdev(struct btmrvl_private *priv) int btmrvl_register_hdev(struct btmrvl_private *priv)
{ {
struct hci_dev *hdev = NULL; struct hci_dev *hdev = NULL;
struct btmrvl_sdio_card *card = priv->btmrvl_dev.card;
int ret; int ret;
hdev = hci_alloc_dev(); hdev = hci_alloc_dev();
@ -687,6 +696,8 @@ int btmrvl_register_hdev(struct btmrvl_private *priv)
hdev->send = btmrvl_send_frame; hdev->send = btmrvl_send_frame;
hdev->setup = btmrvl_setup; hdev->setup = btmrvl_setup;
hdev->set_bdaddr = btmrvl_set_bdaddr; hdev->set_bdaddr = btmrvl_set_bdaddr;
hdev->prevent_wake = btmrvl_prevent_wake;
SET_HCIDEV_DEV(hdev, &card->func->dev);
hdev->dev_type = priv->btmrvl_dev.dev_type; hdev->dev_type = priv->btmrvl_dev.dev_type;

View File

@ -111,6 +111,9 @@ static int btmrvl_sdio_probe_of(struct device *dev,
"Failed to request irq_bt %d (%d)\n", "Failed to request irq_bt %d (%d)\n",
cfg->irq_bt, ret); cfg->irq_bt, ret);
} }
/* Configure wakeup (enabled by default) */
device_init_wakeup(dev, true);
disable_irq(cfg->irq_bt); disable_irq(cfg->irq_bt);
} }
} }
@ -328,7 +331,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8977 = { static const struct btmrvl_sdio_device btmrvl_sdio_sd8977 = {
.helper = NULL, .helper = NULL,
.firmware = "mrvl/sd8977_uapsta.bin", .firmware = "mrvl/sdsd8977_combo_v2.bin",
.reg = &btmrvl_reg_8977, .reg = &btmrvl_reg_8977,
.support_pscan_win_report = true, .support_pscan_win_report = true,
.sd_blksz_fw_dl = 256, .sd_blksz_fw_dl = 256,
@ -346,7 +349,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8987 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8997 = { static const struct btmrvl_sdio_device btmrvl_sdio_sd8997 = {
.helper = NULL, .helper = NULL,
.firmware = "mrvl/sd8997_uapsta.bin", .firmware = "mrvl/sdsd8997_combo_v4.bin",
.reg = &btmrvl_reg_8997, .reg = &btmrvl_reg_8997,
.support_pscan_win_report = true, .support_pscan_win_report = true,
.sd_blksz_fw_dl = 256, .sd_blksz_fw_dl = 256,
@ -1654,6 +1657,7 @@ static void btmrvl_sdio_remove(struct sdio_func *func)
MODULE_SHUTDOWN_REQ); MODULE_SHUTDOWN_REQ);
btmrvl_sdio_disable_host_int(card); btmrvl_sdio_disable_host_int(card);
} }
BT_DBG("unregister dev"); BT_DBG("unregister dev");
card->priv->surprise_removed = true; card->priv->surprise_removed = true;
btmrvl_sdio_unregister_dev(card); btmrvl_sdio_unregister_dev(card);
@ -1690,7 +1694,8 @@ static int btmrvl_sdio_suspend(struct device *dev)
} }
/* Enable platform specific wakeup interrupt */ /* Enable platform specific wakeup interrupt */
if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0) { if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0 &&
device_may_wakeup(dev)) {
card->plt_wake_cfg->wake_by_bt = false; card->plt_wake_cfg->wake_by_bt = false;
enable_irq(card->plt_wake_cfg->irq_bt); enable_irq(card->plt_wake_cfg->irq_bt);
enable_irq_wake(card->plt_wake_cfg->irq_bt); enable_irq_wake(card->plt_wake_cfg->irq_bt);
@ -1707,7 +1712,8 @@ static int btmrvl_sdio_suspend(struct device *dev)
BT_ERR("HS not activated, suspend failed!"); BT_ERR("HS not activated, suspend failed!");
/* Disable platform specific wakeup interrupt */ /* Disable platform specific wakeup interrupt */
if (card->plt_wake_cfg && if (card->plt_wake_cfg &&
card->plt_wake_cfg->irq_bt >= 0) { card->plt_wake_cfg->irq_bt >= 0 &&
device_may_wakeup(dev)) {
disable_irq_wake(card->plt_wake_cfg->irq_bt); disable_irq_wake(card->plt_wake_cfg->irq_bt);
disable_irq(card->plt_wake_cfg->irq_bt); disable_irq(card->plt_wake_cfg->irq_bt);
} }
@ -1767,7 +1773,8 @@ static int btmrvl_sdio_resume(struct device *dev)
hci_resume_dev(hcidev); hci_resume_dev(hcidev);
/* Disable platform specific wakeup interrupt */ /* Disable platform specific wakeup interrupt */
if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0) { if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0 &&
device_may_wakeup(dev)) {
disable_irq_wake(card->plt_wake_cfg->irq_bt); disable_irq_wake(card->plt_wake_cfg->irq_bt);
disable_irq(card->plt_wake_cfg->irq_bt); disable_irq(card->plt_wake_cfg->irq_bt);
if (card->plt_wake_cfg->wake_by_bt) if (card->plt_wake_cfg->wake_by_bt)
@ -1831,6 +1838,6 @@ MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8887_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8887_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8977_uapsta.bin"); MODULE_FIRMWARE("mrvl/sdsd8977_combo_v2.bin");
MODULE_FIRMWARE("mrvl/sd8987_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8987_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8997_uapsta.bin"); MODULE_FIRMWARE("mrvl/sdsd8997_combo_v4.bin");

View File

@ -685,7 +685,7 @@ static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
const u8 *fw_ptr; const u8 *fw_ptr;
size_t fw_size; size_t fw_size;
int err, dlen; int err, dlen;
u8 flag; u8 flag, param;
err = request_firmware(&fw, fwname, &hdev->dev); err = request_firmware(&fw, fwname, &hdev->dev);
if (err < 0) { if (err < 0) {
@ -693,6 +693,20 @@ static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
return err; return err;
} }
/* Power on data RAM the firmware relies on. */
param = 1;
wmt_params.op = MTK_WMT_FUNC_CTRL;
wmt_params.flag = 3;
wmt_params.dlen = sizeof(param);
wmt_params.data = &param;
wmt_params.status = NULL;
err = mtk_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to power on data RAM (%d)", err);
return err;
}
fw_ptr = fw->data; fw_ptr = fw->data;
fw_size = fw->size; fw_size = fw->size;

View File

@ -400,6 +400,27 @@ out:
return ret; return ret;
} }
static int qca_disable_soc_logging(struct hci_dev *hdev)
{
struct sk_buff *skb;
u8 cmd[2];
int err;
cmd[0] = QCA_DISABLE_LOGGING_SUB_OP;
cmd[1] = 0x00;
skb = __hci_cmd_sync_ev(hdev, QCA_DISABLE_LOGGING, sizeof(cmd), cmd,
HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
bt_dev_err(hdev, "QCA Failed to disable soc logging(%d)", err);
return err;
}
kfree_skb(skb);
return 0;
}
int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr) int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
{ {
struct sk_buff *skb; struct sk_buff *skb;
@ -486,6 +507,12 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
return err; return err;
} }
if (soc_type >= QCA_WCN3991) {
err = qca_disable_soc_logging(hdev);
if (err < 0)
return err;
}
/* Perform HCI reset */ /* Perform HCI reset */
err = qca_send_reset(hdev); err = qca_send_reset(hdev);
if (err < 0) { if (err < 0) {

View File

@ -14,6 +14,7 @@
#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01) #define EDL_NVM_ACCESS_SET_REQ_CMD (0x01)
#define MAX_SIZE_PER_TLV_SEGMENT (243) #define MAX_SIZE_PER_TLV_SEGMENT (243)
#define QCA_PRE_SHUTDOWN_CMD (0xFC08) #define QCA_PRE_SHUTDOWN_CMD (0xFC08)
#define QCA_DISABLE_LOGGING (0xFC17)
#define EDL_CMD_REQ_RES_EVT (0x00) #define EDL_CMD_REQ_RES_EVT (0x00)
#define EDL_PATCH_VER_RES_EVT (0x19) #define EDL_PATCH_VER_RES_EVT (0x19)
@ -22,6 +23,7 @@
#define EDL_CMD_EXE_STATUS_EVT (0x00) #define EDL_CMD_EXE_STATUS_EVT (0x00)
#define EDL_SET_BAUDRATE_RSP_EVT (0x92) #define EDL_SET_BAUDRATE_RSP_EVT (0x92)
#define EDL_NVM_ACCESS_CODE_EVT (0x0B) #define EDL_NVM_ACCESS_CODE_EVT (0x0B)
#define QCA_DISABLE_LOGGING_SUB_OP (0x14)
#define EDL_TAG_ID_HCI (17) #define EDL_TAG_ID_HCI (17)
#define EDL_TAG_ID_DEEP_SLEEP (27) #define EDL_TAG_ID_DEEP_SLEEP (27)

View File

@ -359,6 +359,10 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01), { USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01),
.driver_info = BTUSB_IGNORE }, .driver_info = BTUSB_IGNORE },
/* Realtek 8822CE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0xb00c), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
/* Realtek Bluetooth devices */ /* Realtek Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01), { USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
.driver_info = BTUSB_REALTEK }, .driver_info = BTUSB_REALTEK },
@ -453,6 +457,7 @@ static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
#define BTUSB_HW_RESET_ACTIVE 12 #define BTUSB_HW_RESET_ACTIVE 12
#define BTUSB_TX_WAIT_VND_EVT 13 #define BTUSB_TX_WAIT_VND_EVT 13
#define BTUSB_WAKEUP_DISABLE 14 #define BTUSB_WAKEUP_DISABLE 14
#define BTUSB_USE_ALT1_FOR_WBS 15
struct btusb_data { struct btusb_data {
struct hci_dev *hdev; struct hci_dev *hdev;
@ -511,7 +516,6 @@ struct btusb_data {
unsigned cmd_timeout_cnt; unsigned cmd_timeout_cnt;
}; };
static void btusb_intel_cmd_timeout(struct hci_dev *hdev) static void btusb_intel_cmd_timeout(struct hci_dev *hdev)
{ {
struct btusb_data *data = hci_get_drvdata(hdev); struct btusb_data *data = hci_get_drvdata(hdev);
@ -573,6 +577,23 @@ static void btusb_rtl_cmd_timeout(struct hci_dev *hdev)
gpiod_set_value_cansleep(reset_gpio, 0); gpiod_set_value_cansleep(reset_gpio, 0);
} }
static void btusb_qca_cmd_timeout(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
int err;
if (++data->cmd_timeout_cnt < 5)
return;
bt_dev_err(hdev, "Multiple cmd timeouts seen. Resetting usb device.");
/* This is not an unbalanced PM reference since the device will reset */
err = usb_autopm_get_interface(data->intf);
if (!err)
usb_queue_reset_device(data->intf);
else
bt_dev_err(hdev, "Failed usb_autopm_get_interface with %d", err);
}
static inline void btusb_free_frags(struct btusb_data *data) static inline void btusb_free_frags(struct btusb_data *data)
{ {
unsigned long flags; unsigned long flags;
@ -1666,14 +1687,15 @@ static void btusb_work(struct work_struct *work)
new_alts = data->sco_num; new_alts = data->sco_num;
} }
} else if (data->air_mode == HCI_NOTIFY_ENABLE_SCO_TRANSP) { } else if (data->air_mode == HCI_NOTIFY_ENABLE_SCO_TRANSP) {
data->usb_alt6_packet_flow = true;
/* Check if Alt 6 is supported for Transparent audio */ /* Check if Alt 6 is supported for Transparent audio */
if (btusb_find_altsetting(data, 6)) if (btusb_find_altsetting(data, 6)) {
data->usb_alt6_packet_flow = true;
new_alts = 6; new_alts = 6;
else } else if (test_bit(BTUSB_USE_ALT1_FOR_WBS, &data->flags)) {
new_alts = 1;
} else {
bt_dev_err(hdev, "Device does not support ALT setting 6"); bt_dev_err(hdev, "Device does not support ALT setting 6");
}
} }
if (btusb_switch_alt_setting(hdev, new_alts) < 0) if (btusb_switch_alt_setting(hdev, new_alts) < 0)
@ -1720,6 +1742,7 @@ static int btusb_setup_csr(struct hci_dev *hdev)
{ {
struct hci_rp_read_local_version *rp; struct hci_rp_read_local_version *rp;
struct sk_buff *skb; struct sk_buff *skb;
bool is_fake = false;
BT_DBG("%s", hdev->name); BT_DBG("%s", hdev->name);
@ -1739,18 +1762,69 @@ static int btusb_setup_csr(struct hci_dev *hdev)
rp = (struct hci_rp_read_local_version *)skb->data; rp = (struct hci_rp_read_local_version *)skb->data;
/* Detect controllers which aren't real CSR ones. */ /* Detect a wide host of Chinese controllers that aren't CSR.
*
* Known fake bcdDevices: 0x0100, 0x0134, 0x1915, 0x2520, 0x7558, 0x8891
*
* The main thing they have in common is that these are really popular low-cost
* options that support newer Bluetooth versions but rely on heavy VID/PID
* squatting of this poor old Bluetooth 1.1 device. Even sold as such.
*
* We detect actual CSR devices by checking that the HCI manufacturer code
* is Cambridge Silicon Radio (10) and ensuring that LMP sub-version and
* HCI rev values always match. As they both store the firmware number.
*/
if (le16_to_cpu(rp->manufacturer) != 10 || if (le16_to_cpu(rp->manufacturer) != 10 ||
le16_to_cpu(rp->lmp_subver) == 0x0c5c) { le16_to_cpu(rp->hci_rev) != le16_to_cpu(rp->lmp_subver))
is_fake = true;
/* Known legit CSR firmware build numbers and their supported BT versions:
* - 1.1 (0x1) -> 0x0073, 0x020d, 0x033c, 0x034e
* - 1.2 (0x2) -> 0x04d9, 0x0529
* - 2.0 (0x3) -> 0x07a6, 0x07ad, 0x0c5c
* - 2.1 (0x4) -> 0x149c, 0x1735, 0x1899 (0x1899 is a BlueCore4-External)
* - 4.0 (0x6) -> 0x1d86, 0x2031, 0x22bb
*
* e.g. Real CSR dongles with LMP subversion 0x73 are old enough that
* support BT 1.1 only; so it's a dead giveaway when some
* third-party BT 4.0 dongle reuses it.
*/
else if (le16_to_cpu(rp->lmp_subver) <= 0x034e &&
le16_to_cpu(rp->hci_ver) > BLUETOOTH_VER_1_1)
is_fake = true;
else if (le16_to_cpu(rp->lmp_subver) <= 0x0529 &&
le16_to_cpu(rp->hci_ver) > BLUETOOTH_VER_1_2)
is_fake = true;
else if (le16_to_cpu(rp->lmp_subver) <= 0x0c5c &&
le16_to_cpu(rp->hci_ver) > BLUETOOTH_VER_2_0)
is_fake = true;
else if (le16_to_cpu(rp->lmp_subver) <= 0x1899 &&
le16_to_cpu(rp->hci_ver) > BLUETOOTH_VER_2_1)
is_fake = true;
else if (le16_to_cpu(rp->lmp_subver) <= 0x22bb &&
le16_to_cpu(rp->hci_ver) > BLUETOOTH_VER_4_0)
is_fake = true;
if (is_fake) {
bt_dev_warn(hdev, "CSR: Unbranded CSR clone detected; adding workarounds...");
/* Generally these clones have big discrepancies between
* advertised features and what's actually supported.
* Probably will need to be expanded in the future;
* without these the controller will lock up.
*/
set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks);
/* Clear the reset quirk since this is not an actual /* Clear the reset quirk since this is not an actual
* early Bluetooth 1.1 device from CSR. * early Bluetooth 1.1 device from CSR.
*/ */
clear_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); clear_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
clear_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
/* These fake CSR controllers have all a broken
* stored link key handling and so just disable it.
*/
set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
} }
kfree_skb(skb); kfree_skb(skb);
@ -2262,45 +2336,25 @@ static bool btusb_setup_intel_new_get_fw_name(struct intel_version *ver,
return true; return true;
} }
static int btusb_setup_intel_new(struct hci_dev *hdev) static int btusb_intel_download_firmware(struct hci_dev *hdev,
struct intel_version *ver,
struct intel_boot_params *params)
{ {
struct btusb_data *data = hci_get_drvdata(hdev);
struct intel_version ver;
struct intel_boot_params params;
const struct firmware *fw; const struct firmware *fw;
u32 boot_param; u32 boot_param;
char fwname[64]; char fwname[64];
ktime_t calltime, delta, rettime;
unsigned long long duration;
int err; int err;
struct btusb_data *data = hci_get_drvdata(hdev);
BT_DBG("%s", hdev->name); if (!ver || !params)
return -EINVAL;
/* Set the default boot parameter to 0x0 and it is updated to
* SKU specific boot parameter after reading Intel_Write_Boot_Params
* command while downloading the firmware.
*/
boot_param = 0x00000000;
calltime = ktime_get();
/* Read the Intel version information to determine if the device
* is in bootloader mode or if it already has operational firmware
* loaded.
*/
err = btintel_read_version(hdev, &ver);
if (err) {
bt_dev_err(hdev, "Intel Read version failed (%d)", err);
btintel_reset_to_bootloader(hdev);
return err;
}
/* The hardware platform number has a fixed value of 0x37 and /* The hardware platform number has a fixed value of 0x37 and
* for now only accept this single value. * for now only accept this single value.
*/ */
if (ver.hw_platform != 0x37) { if (ver->hw_platform != 0x37) {
bt_dev_err(hdev, "Unsupported Intel hardware platform (%u)", bt_dev_err(hdev, "Unsupported Intel hardware platform (%u)",
ver.hw_platform); ver->hw_platform);
return -EINVAL; return -EINVAL;
} }
@ -2310,7 +2364,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* This check has been put in place to ensure correct forward * This check has been put in place to ensure correct forward
* compatibility options when newer hardware variants come along. * compatibility options when newer hardware variants come along.
*/ */
switch (ver.hw_variant) { switch (ver->hw_variant) {
case 0x0b: /* SfP */ case 0x0b: /* SfP */
case 0x0c: /* WsP */ case 0x0c: /* WsP */
case 0x11: /* JfP */ case 0x11: /* JfP */
@ -2320,11 +2374,11 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
break; break;
default: default:
bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)", bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
ver.hw_variant); ver->hw_variant);
return -EINVAL; return -EINVAL;
} }
btintel_version_info(hdev, &ver); btintel_version_info(hdev, ver);
/* The firmware variant determines if the device is in bootloader /* The firmware variant determines if the device is in bootloader
* mode or is running operational firmware. The value 0x06 identifies * mode or is running operational firmware. The value 0x06 identifies
@ -2339,25 +2393,25 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* It is not possible to use the Secure Boot Parameters in this * It is not possible to use the Secure Boot Parameters in this
* case since that command is only available in bootloader mode. * case since that command is only available in bootloader mode.
*/ */
if (ver.fw_variant == 0x23) { if (ver->fw_variant == 0x23) {
clear_bit(BTUSB_BOOTLOADER, &data->flags); clear_bit(BTUSB_BOOTLOADER, &data->flags);
btintel_check_bdaddr(hdev); btintel_check_bdaddr(hdev);
goto finish; return 0;
} }
/* If the device is not in bootloader mode, then the only possible /* If the device is not in bootloader mode, then the only possible
* choice is to return an error and abort the device initialization. * choice is to return an error and abort the device initialization.
*/ */
if (ver.fw_variant != 0x06) { if (ver->fw_variant != 0x06) {
bt_dev_err(hdev, "Unsupported Intel firmware variant (%u)", bt_dev_err(hdev, "Unsupported Intel firmware variant (%u)",
ver.fw_variant); ver->fw_variant);
return -ENODEV; return -ENODEV;
} }
/* Read the secure boot parameters to identify the operating /* Read the secure boot parameters to identify the operating
* details of the bootloader. * details of the bootloader.
*/ */
err = btintel_read_boot_params(hdev, &params); err = btintel_read_boot_params(hdev, params);
if (err) if (err)
return err; return err;
@ -2365,16 +2419,16 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* with a command complete event. If the boot parameters indicate * with a command complete event. If the boot parameters indicate
* that this bootloader does not send them, then abort the setup. * that this bootloader does not send them, then abort the setup.
*/ */
if (params.limited_cce != 0x00) { if (params->limited_cce != 0x00) {
bt_dev_err(hdev, "Unsupported Intel firmware loading method (%u)", bt_dev_err(hdev, "Unsupported Intel firmware loading method (%u)",
params.limited_cce); params->limited_cce);
return -EINVAL; return -EINVAL;
} }
/* If the OTP has no valid Bluetooth device address, then there will /* If the OTP has no valid Bluetooth device address, then there will
* also be no valid address for the operational firmware. * also be no valid address for the operational firmware.
*/ */
if (!bacmp(&params.otp_bdaddr, BDADDR_ANY)) { if (!bacmp(&params->otp_bdaddr, BDADDR_ANY)) {
bt_dev_info(hdev, "No device address configured"); bt_dev_info(hdev, "No device address configured");
set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
} }
@ -2400,7 +2454,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* ibt-<hw_variant>-<hw_revision>-<fw_revision>.sfi. * ibt-<hw_variant>-<hw_revision>-<fw_revision>.sfi.
* *
*/ */
err = btusb_setup_intel_new_get_fw_name(&ver, &params, fwname, err = btusb_setup_intel_new_get_fw_name(ver, params, fwname,
sizeof(fwname), "sfi"); sizeof(fwname), "sfi");
if (!err) { if (!err) {
bt_dev_err(hdev, "Unsupported Intel firmware naming"); bt_dev_err(hdev, "Unsupported Intel firmware naming");
@ -2415,16 +2469,6 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
bt_dev_info(hdev, "Found device firmware: %s", fwname); bt_dev_info(hdev, "Found device firmware: %s", fwname);
/* Save the DDC file name for later use to apply once the firmware
* downloading is done.
*/
err = btusb_setup_intel_new_get_fw_name(&ver, &params, fwname,
sizeof(fwname), "ddc");
if (!err) {
bt_dev_err(hdev, "Unsupported Intel firmware naming");
return -EINVAL;
}
if (fw->size < 644) { if (fw->size < 644) {
bt_dev_err(hdev, "Invalid size of firmware file (%zu)", bt_dev_err(hdev, "Invalid size of firmware file (%zu)",
fw->size); fw->size);
@ -2479,18 +2523,58 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
goto done; goto done;
} }
done:
release_firmware(fw);
return err;
}
static int btusb_setup_intel_new(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
struct intel_version ver;
struct intel_boot_params params;
u32 boot_param;
char ddcname[64];
ktime_t calltime, delta, rettime;
unsigned long long duration;
int err;
struct intel_debug_features features;
BT_DBG("%s", hdev->name);
/* Set the default boot parameter to 0x0 and it is updated to
* SKU specific boot parameter after reading Intel_Write_Boot_Params
* command while downloading the firmware.
*/
boot_param = 0x00000000;
calltime = ktime_get();
/* Read the Intel version information to determine if the device
* is in bootloader mode or if it already has operational firmware
* loaded.
*/
err = btintel_read_version(hdev, &ver);
if (err) {
bt_dev_err(hdev, "Intel Read version failed (%d)", err);
btintel_reset_to_bootloader(hdev);
return err;
}
err = btusb_intel_download_firmware(hdev, &ver, &params);
if (err)
return err;
/* controller is already having an operational firmware */
if (ver.fw_variant == 0x23)
goto finish;
rettime = ktime_get(); rettime = ktime_get();
delta = ktime_sub(rettime, calltime); delta = ktime_sub(rettime, calltime);
duration = (unsigned long long) ktime_to_ns(delta) >> 10; duration = (unsigned long long) ktime_to_ns(delta) >> 10;
bt_dev_info(hdev, "Firmware loaded in %llu usecs", duration); bt_dev_info(hdev, "Firmware loaded in %llu usecs", duration);
done:
release_firmware(fw);
if (err < 0)
return err;
calltime = ktime_get(); calltime = ktime_get();
set_bit(BTUSB_BOOTING, &data->flags); set_bit(BTUSB_BOOTING, &data->flags);
@ -2534,13 +2618,28 @@ done:
clear_bit(BTUSB_BOOTLOADER, &data->flags); clear_bit(BTUSB_BOOTLOADER, &data->flags);
/* Once the device is running in operational mode, it needs to apply err = btusb_setup_intel_new_get_fw_name(&ver, &params, ddcname,
* the device configuration (DDC) parameters. sizeof(ddcname), "ddc");
*
* The device can work without DDC parameters, so even if it fails if (!err) {
* to load the file, no need to fail the setup. bt_dev_err(hdev, "Unsupported Intel firmware naming");
} else {
/* Once the device is running in operational mode, it needs to
* apply the device configuration (DDC) parameters.
*
* The device can work without DDC parameters, so even if it
* fails to load the file, no need to fail the setup.
*/
btintel_load_ddc_config(hdev, ddcname);
}
/* Read the Intel supported features and if new exception formats
* supported, need to load the additional DDC config to enable.
*/ */
btintel_load_ddc_config(hdev, fwname); btintel_read_debug_features(hdev, &features);
/* Set DDC mask for available debug features */
btintel_set_debug_features(hdev, &features);
/* Read the Intel version information after loading the FW */ /* Read the Intel version information after loading the FW */
err = btintel_read_version(hdev, &ver); err = btintel_read_version(hdev, &ver);
@ -2925,7 +3024,7 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
const u8 *fw_ptr; const u8 *fw_ptr;
size_t fw_size; size_t fw_size;
int err, dlen; int err, dlen;
u8 flag; u8 flag, param;
err = request_firmware(&fw, fwname, &hdev->dev); err = request_firmware(&fw, fwname, &hdev->dev);
if (err < 0) { if (err < 0) {
@ -2933,6 +3032,20 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
return err; return err;
} }
/* Power on data RAM the firmware relies on. */
param = 1;
wmt_params.op = BTMTK_WMT_FUNC_CTRL;
wmt_params.flag = 3;
wmt_params.dlen = sizeof(param);
wmt_params.data = &param;
wmt_params.status = NULL;
err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to power on data RAM (%d)", err);
return err;
}
fw_ptr = fw->data; fw_ptr = fw->data;
fw_size = fw->size; fw_size = fw->size;
@ -3704,6 +3817,9 @@ static bool btusb_prevent_wake(struct hci_dev *hdev)
{ {
struct btusb_data *data = hci_get_drvdata(hdev); struct btusb_data *data = hci_get_drvdata(hdev);
if (test_bit(BTUSB_WAKEUP_DISABLE, &data->flags))
return true;
return !device_may_wakeup(&data->udev->dev); return !device_may_wakeup(&data->udev->dev);
} }
@ -3941,10 +4057,20 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_QCA_ROME) { if (id->driver_info & BTUSB_QCA_ROME) {
data->setup_on_usb = btusb_setup_qca; data->setup_on_usb = btusb_setup_qca;
hdev->set_bdaddr = btusb_set_bdaddr_ath3012; hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
hdev->cmd_timeout = btusb_qca_cmd_timeout;
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
btusb_check_needs_reset_resume(intf); btusb_check_needs_reset_resume(intf);
} }
if (id->driver_info & BTUSB_AMP) {
/* AMP controllers do not support SCO packets */
data->isoc = NULL;
} else {
/* Interface orders are hardcoded in the specification */
data->isoc = usb_ifnum_to_if(data->udev, ifnum_base + 1);
data->isoc_ifnum = ifnum_base + 1;
}
if (IS_ENABLED(CONFIG_BT_HCIBTUSB_RTL) && if (IS_ENABLED(CONFIG_BT_HCIBTUSB_RTL) &&
(id->driver_info & BTUSB_REALTEK)) { (id->driver_info & BTUSB_REALTEK)) {
hdev->setup = btrtl_setup_realtek; hdev->setup = btrtl_setup_realtek;
@ -3956,19 +4082,10 @@ static int btusb_probe(struct usb_interface *intf,
* (DEVICE_REMOTE_WAKEUP) * (DEVICE_REMOTE_WAKEUP)
*/ */
set_bit(BTUSB_WAKEUP_DISABLE, &data->flags); set_bit(BTUSB_WAKEUP_DISABLE, &data->flags);
if (btusb_find_altsetting(data, 1))
err = usb_autopm_get_interface(intf); set_bit(BTUSB_USE_ALT1_FOR_WBS, &data->flags);
if (err < 0) else
goto out_free_dev; bt_dev_err(hdev, "Device does not support ALT setting 1");
}
if (id->driver_info & BTUSB_AMP) {
/* AMP controllers do not support SCO packets */
data->isoc = NULL;
} else {
/* Interface orders are hardcoded in the specification */
data->isoc = usb_ifnum_to_if(data->udev, ifnum_base + 1);
data->isoc_ifnum = ifnum_base + 1;
} }
if (!reset) if (!reset)
@ -4001,11 +4118,13 @@ static int btusb_probe(struct usb_interface *intf,
if (bcdDevice < 0x117) if (bcdDevice < 0x117)
set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
/* Fake CSR devices with broken commands */ /* This must be set first in case we disable it for fakes */
if (bcdDevice <= 0x100 || bcdDevice == 0x134)
hdev->setup = btusb_setup_csr;
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
/* Fake CSR devices with broken commands */
if (le16_to_cpu(udev->descriptor.idVendor) == 0x0a12 &&
le16_to_cpu(udev->descriptor.idProduct) == 0x0001)
hdev->setup = btusb_setup_csr;
} }
if (id->driver_info & BTUSB_SNIFFER) { if (id->driver_info & BTUSB_SNIFFER) {

View File

@ -793,7 +793,7 @@ static int h5_serdev_probe(struct serdev_device *serdev)
if (!h5) if (!h5)
return -ENOMEM; return -ENOMEM;
set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.flags); set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.hdev_flags);
h5->hu = &h5->serdev_hu; h5->hu = &h5->serdev_hu;
h5->serdev_hu.serdev = serdev; h5->serdev_hu.serdev = serdev;

View File

@ -219,7 +219,7 @@ static void ll_device_want_to_wakeup(struct hci_uart *hu)
* perfectly safe to always send one. * perfectly safe to always send one.
*/ */
BT_DBG("dual wake-up-indication"); BT_DBG("dual wake-up-indication");
/* fall through */ fallthrough;
case HCILL_ASLEEP: case HCILL_ASLEEP:
/* acknowledge device wake up */ /* acknowledge device wake up */
if (send_hcill_cmd(HCILL_WAKE_UP_ACK, hu) < 0) { if (send_hcill_cmd(HCILL_WAKE_UP_ACK, hu) < 0) {

View File

@ -46,7 +46,7 @@
#define HCI_MAX_IBS_SIZE 10 #define HCI_MAX_IBS_SIZE 10
#define IBS_WAKE_RETRANS_TIMEOUT_MS 100 #define IBS_WAKE_RETRANS_TIMEOUT_MS 100
#define IBS_BTSOC_TX_IDLE_TIMEOUT_MS 40 #define IBS_BTSOC_TX_IDLE_TIMEOUT_MS 200
#define IBS_HOST_TX_IDLE_TIMEOUT_MS 2000 #define IBS_HOST_TX_IDLE_TIMEOUT_MS 2000
#define CMD_TRANS_TIMEOUT_MS 100 #define CMD_TRANS_TIMEOUT_MS 100
#define MEMDUMP_TIMEOUT_MS 8000 #define MEMDUMP_TIMEOUT_MS 8000
@ -72,7 +72,8 @@ enum qca_flags {
QCA_DROP_VENDOR_EVENT, QCA_DROP_VENDOR_EVENT,
QCA_SUSPENDING, QCA_SUSPENDING,
QCA_MEMDUMP_COLLECTION, QCA_MEMDUMP_COLLECTION,
QCA_HW_ERROR_EVENT QCA_HW_ERROR_EVENT,
QCA_SSR_TRIGGERED
}; };
enum qca_capabilities { enum qca_capabilities {
@ -289,25 +290,21 @@ static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
case HCI_IBS_TX_VOTE_CLOCK_ON: case HCI_IBS_TX_VOTE_CLOCK_ON:
qca->tx_vote = true; qca->tx_vote = true;
qca->tx_votes_on++; qca->tx_votes_on++;
new_vote = true;
break; break;
case HCI_IBS_RX_VOTE_CLOCK_ON: case HCI_IBS_RX_VOTE_CLOCK_ON:
qca->rx_vote = true; qca->rx_vote = true;
qca->rx_votes_on++; qca->rx_votes_on++;
new_vote = true;
break; break;
case HCI_IBS_TX_VOTE_CLOCK_OFF: case HCI_IBS_TX_VOTE_CLOCK_OFF:
qca->tx_vote = false; qca->tx_vote = false;
qca->tx_votes_off++; qca->tx_votes_off++;
new_vote = qca->rx_vote | qca->tx_vote;
break; break;
case HCI_IBS_RX_VOTE_CLOCK_OFF: case HCI_IBS_RX_VOTE_CLOCK_OFF:
qca->rx_vote = false; qca->rx_vote = false;
qca->rx_votes_off++; qca->rx_votes_off++;
new_vote = qca->rx_vote | qca->tx_vote;
break; break;
default: default:
@ -315,6 +312,8 @@ static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
return; return;
} }
new_vote = qca->rx_vote | qca->tx_vote;
if (new_vote != old_vote) { if (new_vote != old_vote) {
if (new_vote) if (new_vote)
__serial_clock_on(hu->tty); __serial_clock_on(hu->tty);
@ -474,8 +473,6 @@ static void hci_ibs_tx_idle_timeout(struct timer_list *t)
case HCI_IBS_TX_ASLEEP: case HCI_IBS_TX_ASLEEP:
case HCI_IBS_TX_WAKING: case HCI_IBS_TX_WAKING:
/* Fall through */
default: default:
BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state); BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
break; break;
@ -518,8 +515,6 @@ static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
case HCI_IBS_TX_ASLEEP: case HCI_IBS_TX_ASLEEP:
case HCI_IBS_TX_AWAKE: case HCI_IBS_TX_AWAKE:
/* Fall through */
default: default:
BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state); BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
break; break;
@ -837,8 +832,6 @@ static void device_woke_up(struct hci_uart *hu)
break; break;
case HCI_IBS_TX_ASLEEP: case HCI_IBS_TX_ASLEEP:
/* Fall through */
default: default:
BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d", BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d",
qca->tx_ibs_state); qca->tx_ibs_state);
@ -862,6 +855,13 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb, BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb,
qca->tx_ibs_state); qca->tx_ibs_state);
if (test_bit(QCA_SSR_TRIGGERED, &qca->flags)) {
/* As SSR is in progress, ignore the packets */
bt_dev_dbg(hu->hdev, "SSR is in progress");
kfree_skb(skb);
return 0;
}
/* Prepend skb with frame type */ /* Prepend skb with frame type */
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
@ -983,8 +983,11 @@ static void qca_controller_memdump(struct work_struct *work)
while ((skb = skb_dequeue(&qca->rx_memdump_q))) { while ((skb = skb_dequeue(&qca->rx_memdump_q))) {
mutex_lock(&qca->hci_memdump_lock); mutex_lock(&qca->hci_memdump_lock);
/* Skip processing the received packets if timeout detected. */ /* Skip processing the received packets if timeout detected
if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT) { * or memdump collection completed.
*/
if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT ||
qca->memdump_state == QCA_MEMDUMP_COLLECTED) {
mutex_unlock(&qca->hci_memdump_lock); mutex_unlock(&qca->hci_memdump_lock);
return; return;
} }
@ -1128,6 +1131,7 @@ static int qca_controller_memdump_event(struct hci_dev *hdev,
struct hci_uart *hu = hci_get_drvdata(hdev); struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv; struct qca_data *qca = hu->priv;
set_bit(QCA_SSR_TRIGGERED, &qca->flags);
skb_queue_tail(&qca->rx_memdump_q, skb); skb_queue_tail(&qca->rx_memdump_q, skb);
queue_work(qca->workqueue, &qca->ctrl_memdump_evt); queue_work(qca->workqueue, &qca->ctrl_memdump_evt);
@ -1485,9 +1489,8 @@ static void qca_hw_error(struct hci_dev *hdev, u8 code)
{ {
struct hci_uart *hu = hci_get_drvdata(hdev); struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv; struct qca_data *qca = hu->priv;
struct qca_memdump_data *qca_memdump = qca->qca_memdump;
char *memdump_buf = NULL;
set_bit(QCA_SSR_TRIGGERED, &qca->flags);
set_bit(QCA_HW_ERROR_EVENT, &qca->flags); set_bit(QCA_HW_ERROR_EVENT, &qca->flags);
bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state); bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state);
@ -1509,19 +1512,23 @@ static void qca_hw_error(struct hci_dev *hdev, u8 code)
qca_wait_for_dump_collection(hdev); qca_wait_for_dump_collection(hdev);
} }
mutex_lock(&qca->hci_memdump_lock);
if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) { if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) {
bt_dev_err(hu->hdev, "clearing allocated memory due to memdump timeout"); bt_dev_err(hu->hdev, "clearing allocated memory due to memdump timeout");
mutex_lock(&qca->hci_memdump_lock); if (qca->qca_memdump) {
if (qca_memdump) vfree(qca->qca_memdump->memdump_buf_head);
memdump_buf = qca_memdump->memdump_buf_head; kfree(qca->qca_memdump);
vfree(memdump_buf); qca->qca_memdump = NULL;
kfree(qca_memdump); }
qca->qca_memdump = NULL;
qca->memdump_state = QCA_MEMDUMP_TIMEOUT; qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
cancel_delayed_work(&qca->ctrl_memdump_timeout); cancel_delayed_work(&qca->ctrl_memdump_timeout);
skb_queue_purge(&qca->rx_memdump_q); }
mutex_unlock(&qca->hci_memdump_lock); mutex_unlock(&qca->hci_memdump_lock);
if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT ||
qca->memdump_state == QCA_MEMDUMP_COLLECTED) {
cancel_work_sync(&qca->ctrl_memdump_evt); cancel_work_sync(&qca->ctrl_memdump_evt);
skb_queue_purge(&qca->rx_memdump_q);
} }
clear_bit(QCA_HW_ERROR_EVENT, &qca->flags); clear_bit(QCA_HW_ERROR_EVENT, &qca->flags);
@ -1532,10 +1539,30 @@ static void qca_cmd_timeout(struct hci_dev *hdev)
struct hci_uart *hu = hci_get_drvdata(hdev); struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv; struct qca_data *qca = hu->priv;
if (qca->memdump_state == QCA_MEMDUMP_IDLE) set_bit(QCA_SSR_TRIGGERED, &qca->flags);
if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
qca_send_crashbuffer(hu); qca_send_crashbuffer(hu);
else qca_wait_for_dump_collection(hdev);
bt_dev_info(hdev, "Dump collection is in process"); } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) {
/* Let us wait here until memory dump collected or
* memory dump timer expired.
*/
bt_dev_info(hdev, "waiting for dump to complete");
qca_wait_for_dump_collection(hdev);
}
mutex_lock(&qca->hci_memdump_lock);
if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) {
qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) {
/* Inject hw error event to reset the device
* and driver.
*/
hci_reset_dev(hu->hdev);
}
}
mutex_unlock(&qca->hci_memdump_lock);
} }
static int qca_wcn3990_init(struct hci_uart *hu) static int qca_wcn3990_init(struct hci_uart *hu)
@ -1641,11 +1668,15 @@ static int qca_setup(struct hci_uart *hu)
bt_dev_info(hdev, "setting up %s", bt_dev_info(hdev, "setting up %s",
qca_is_wcn399x(soc_type) ? "wcn399x" : "ROME/QCA6390"); qca_is_wcn399x(soc_type) ? "wcn399x" : "ROME/QCA6390");
qca->memdump_state = QCA_MEMDUMP_IDLE;
retry: retry:
ret = qca_power_on(hdev); ret = qca_power_on(hdev);
if (ret) if (ret)
return ret; return ret;
clear_bit(QCA_SSR_TRIGGERED, &qca->flags);
if (qca_is_wcn399x(soc_type)) { if (qca_is_wcn399x(soc_type)) {
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
@ -1788,9 +1819,6 @@ static void qca_power_shutdown(struct hci_uart *hu)
qca_flush(hu); qca_flush(hu);
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
hu->hdev->hw_error = NULL;
hu->hdev->cmd_timeout = NULL;
/* Non-serdev device usually is powered by external power /* Non-serdev device usually is powered by external power
* and don't need additional action in driver for power down * and don't need additional action in driver for power down
*/ */
@ -1812,6 +1840,9 @@ static int qca_power_off(struct hci_dev *hdev)
struct qca_data *qca = hu->priv; struct qca_data *qca = hu->priv;
enum qca_btsoc_type soc_type = qca_soc_type(hu); enum qca_btsoc_type soc_type = qca_soc_type(hu);
hu->hdev->hw_error = NULL;
hu->hdev->cmd_timeout = NULL;
/* Stop sending shutdown command if soc crashes. */ /* Stop sending shutdown command if soc crashes. */
if (soc_type != QCA_ROME if (soc_type != QCA_ROME
&& qca->memdump_state == QCA_MEMDUMP_IDLE) { && qca->memdump_state == QCA_MEMDUMP_IDLE) {
@ -1819,7 +1850,6 @@ static int qca_power_off(struct hci_dev *hdev)
usleep_range(8000, 10000); usleep_range(8000, 10000);
} }
qca->memdump_state = QCA_MEMDUMP_IDLE;
qca_power_shutdown(hu); qca_power_shutdown(hu);
return 0; return 0;
} }
@ -1962,17 +1992,17 @@ static int qca_serdev_probe(struct serdev_device *serdev)
} }
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL); qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
if (!qcadev->susclk) { if (IS_ERR(qcadev->susclk)) {
dev_warn(&serdev->dev, "failed to acquire clk\n"); dev_warn(&serdev->dev, "failed to acquire clk\n");
} else { return PTR_ERR(qcadev->susclk);
err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
if (err)
return err;
err = clk_prepare_enable(qcadev->susclk);
if (err)
return err;
} }
err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
if (err)
return err;
err = clk_prepare_enable(qcadev->susclk);
if (err)
return err;
err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto); err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
if (err) { if (err) {
@ -2050,6 +2080,7 @@ static int __maybe_unused qca_suspend(struct device *dev)
struct hci_uart *hu = &qcadev->serdev_hu; struct hci_uart *hu = &qcadev->serdev_hu;
struct qca_data *qca = hu->priv; struct qca_data *qca = hu->priv;
unsigned long flags; unsigned long flags;
bool tx_pending = false;
int ret = 0; int ret = 0;
u8 cmd; u8 cmd;
@ -2068,7 +2099,7 @@ static int __maybe_unused qca_suspend(struct device *dev)
switch (qca->tx_ibs_state) { switch (qca->tx_ibs_state) {
case HCI_IBS_TX_WAKING: case HCI_IBS_TX_WAKING:
del_timer(&qca->wake_retrans_timer); del_timer(&qca->wake_retrans_timer);
/* Fall through */ fallthrough;
case HCI_IBS_TX_AWAKE: case HCI_IBS_TX_AWAKE:
del_timer(&qca->tx_idle_timer); del_timer(&qca->tx_idle_timer);
@ -2083,8 +2114,7 @@ static int __maybe_unused qca_suspend(struct device *dev)
qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
qca->ibs_sent_slps++; qca->ibs_sent_slps++;
tx_pending = true;
qca_wq_serial_tx_clock_vote_off(&qca->ws_tx_vote_off);
break; break;
case HCI_IBS_TX_ASLEEP: case HCI_IBS_TX_ASLEEP:
@ -2101,22 +2131,24 @@ static int __maybe_unused qca_suspend(struct device *dev)
if (ret < 0) if (ret < 0)
goto error; goto error;
serdev_device_wait_until_sent(hu->serdev, if (tx_pending) {
msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS)); serdev_device_wait_until_sent(hu->serdev,
msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
}
/* Wait for HCI_IBS_SLEEP_IND sent by device to indicate its Tx is going /* Wait for HCI_IBS_SLEEP_IND sent by device to indicate its Tx is going
* to sleep, so that the packet does not wake the system later. * to sleep, so that the packet does not wake the system later.
*/ */
ret = wait_event_interruptible_timeout(qca->suspend_wait_q, ret = wait_event_interruptible_timeout(qca->suspend_wait_q,
qca->rx_ibs_state == HCI_IBS_RX_ASLEEP, qca->rx_ibs_state == HCI_IBS_RX_ASLEEP,
msecs_to_jiffies(IBS_BTSOC_TX_IDLE_TIMEOUT_MS)); msecs_to_jiffies(IBS_BTSOC_TX_IDLE_TIMEOUT_MS));
if (ret == 0) {
if (ret > 0)
return 0;
if (ret == 0)
ret = -ETIMEDOUT; ret = -ETIMEDOUT;
goto error;
}
return 0;
error: error:
clear_bit(QCA_SUSPENDING, &qca->flags); clear_bit(QCA_SUSPENDING, &qca->flags);

View File

@ -355,7 +355,8 @@ void hci_uart_unregister_device(struct hci_uart *hu)
struct hci_dev *hdev = hu->hdev; struct hci_dev *hdev = hu->hdev;
clear_bit(HCI_UART_PROTO_READY, &hu->flags); clear_bit(HCI_UART_PROTO_READY, &hu->flags);
hci_unregister_dev(hdev); if (test_bit(HCI_UART_REGISTERED, &hu->flags))
hci_unregister_dev(hdev);
hci_free_dev(hdev); hci_free_dev(hdev);
cancel_work_sync(&hu->write_work); cancel_work_sync(&hu->write_work);

View File

@ -36,9 +36,9 @@
#define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin" #define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin"
#define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin" #define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin"
#define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin" #define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin"
#define SD8977_DEFAULT_FW_NAME "mrvl/sd8977_uapsta.bin" #define SD8977_DEFAULT_FW_NAME "mrvl/sdsd8977_combo_v2.bin"
#define SD8987_DEFAULT_FW_NAME "mrvl/sd8987_uapsta.bin" #define SD8987_DEFAULT_FW_NAME "mrvl/sd8987_uapsta.bin"
#define SD8997_DEFAULT_FW_NAME "mrvl/sd8997_uapsta.bin" #define SD8997_DEFAULT_FW_NAME "mrvl/sdsd8997_combo_v4.bin"
#define BLOCK_MODE 1 #define BLOCK_MODE 1
#define BYTE_MODE 0 #define BYTE_MODE 0

View File

@ -41,6 +41,8 @@
#define BLUETOOTH_VER_1_1 1 #define BLUETOOTH_VER_1_1 1
#define BLUETOOTH_VER_1_2 2 #define BLUETOOTH_VER_1_2 2
#define BLUETOOTH_VER_2_0 3 #define BLUETOOTH_VER_2_0 3
#define BLUETOOTH_VER_2_1 4
#define BLUETOOTH_VER_4_0 6
/* Reserv for core and drivers use */ /* Reserv for core and drivers use */
#define BT_SKB_RESERVE 8 #define BT_SKB_RESERVE 8
@ -147,6 +149,10 @@ struct bt_voice {
#define BT_MODE_LE_FLOWCTL 0x03 #define BT_MODE_LE_FLOWCTL 0x03
#define BT_MODE_EXT_FLOWCTL 0x04 #define BT_MODE_EXT_FLOWCTL 0x04
#define BT_PKT_STATUS 16
#define BT_SCM_PKT_STATUS 0x03
__printf(1, 2) __printf(1, 2)
void bt_info(const char *fmt, ...); void bt_info(const char *fmt, ...);
__printf(1, 2) __printf(1, 2)
@ -286,6 +292,7 @@ struct bt_sock {
struct sock *parent; struct sock *parent;
unsigned long flags; unsigned long flags;
void (*skb_msg_name)(struct sk_buff *, void *, int *); void (*skb_msg_name)(struct sk_buff *, void *, int *);
void (*skb_put_cmsg)(struct sk_buff *, struct msghdr *, struct sock *);
}; };
enum { enum {
@ -335,6 +342,10 @@ struct l2cap_ctrl {
struct l2cap_chan *chan; struct l2cap_chan *chan;
}; };
struct sco_ctrl {
u8 pkt_status;
};
struct hci_dev; struct hci_dev;
typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode); typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode);
@ -361,6 +372,7 @@ struct bt_skb_cb {
u8 incoming:1; u8 incoming:1;
union { union {
struct l2cap_ctrl l2cap; struct l2cap_ctrl l2cap;
struct sco_ctrl sco;
struct hci_ctrl hci; struct hci_ctrl hci;
}; };
}; };

View File

@ -227,6 +227,17 @@ enum {
* supported. * supported.
*/ */
HCI_QUIRK_VALID_LE_STATES, HCI_QUIRK_VALID_LE_STATES,
/* When this quirk is set, then erroneous data reporting
* is ignored. This is mainly due to the fact that the HCI
* Read Default Erroneous Data Reporting command is advertised,
* but not supported; these controllers often reply with unknown
* command and tend to lock up randomly. Needing a hard reset.
*
* This quirk can be set before hci_register_dev is called or
* during the hdev->setup vendor callback.
*/
HCI_QUIRK_BROKEN_ERR_DATA_REPORTING,
}; };
/* HCI device flags */ /* HCI device flags */
@ -307,6 +318,7 @@ enum {
HCI_FORCE_BREDR_SMP, HCI_FORCE_BREDR_SMP,
HCI_FORCE_STATIC_ADDR, HCI_FORCE_STATIC_ADDR,
HCI_LL_RPA_RESOLUTION, HCI_LL_RPA_RESOLUTION,
HCI_ENABLE_LL_PRIVACY,
HCI_CMD_PENDING, HCI_CMD_PENDING,
HCI_FORCE_NO_MITM, HCI_FORCE_NO_MITM,
@ -1637,6 +1649,8 @@ struct hci_rp_le_read_resolv_list_size {
#define HCI_OP_LE_SET_ADDR_RESOLV_ENABLE 0x202d #define HCI_OP_LE_SET_ADDR_RESOLV_ENABLE 0x202d
#define HCI_OP_LE_SET_RPA_TIMEOUT 0x202e
#define HCI_OP_LE_READ_MAX_DATA_LEN 0x202f #define HCI_OP_LE_READ_MAX_DATA_LEN 0x202f
struct hci_rp_le_read_max_data_len { struct hci_rp_le_read_max_data_len {
__u8 status; __u8 status;
@ -2268,8 +2282,10 @@ struct hci_ev_le_conn_complete {
#define LE_EXT_ADV_SCAN_RSP 0x0008 #define LE_EXT_ADV_SCAN_RSP 0x0008
#define LE_EXT_ADV_LEGACY_PDU 0x0010 #define LE_EXT_ADV_LEGACY_PDU 0x0010
#define ADDR_LE_DEV_PUBLIC 0x00 #define ADDR_LE_DEV_PUBLIC 0x00
#define ADDR_LE_DEV_RANDOM 0x01 #define ADDR_LE_DEV_RANDOM 0x01
#define ADDR_LE_DEV_PUBLIC_RESOLVED 0x02
#define ADDR_LE_DEV_RANDOM_RESOLVED 0x03
#define HCI_EV_LE_ADVERTISING_REPORT 0x02 #define HCI_EV_LE_ADVERTISING_REPORT 0x02
struct hci_ev_le_advertising_info { struct hci_ev_le_advertising_info {
@ -2516,4 +2532,12 @@ static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb)
#define hci_iso_data_len(h) ((h) & 0x3fff) #define hci_iso_data_len(h) ((h) & 0x3fff)
#define hci_iso_data_flags(h) ((h) >> 14) #define hci_iso_data_flags(h) ((h) >> 14)
/* le24 support */
static inline void hci_cpu_to_le24(__u32 val, __u8 dst[3])
{
dst[0] = val & 0xff;
dst[1] = (val & 0xff00) >> 8;
dst[2] = (val & 0xff0000) >> 16;
}
#endif /* __HCI_H */ #endif /* __HCI_H */

View File

@ -25,6 +25,7 @@
#ifndef __HCI_CORE_H #ifndef __HCI_CORE_H
#define __HCI_CORE_H #define __HCI_CORE_H
#include <linux/idr.h>
#include <linux/leds.h> #include <linux/leds.h>
#include <linux/rculist.h> #include <linux/rculist.h>
@ -136,6 +137,23 @@ struct bdaddr_list_with_irk {
u8 local_irk[16]; u8 local_irk[16];
}; };
struct bdaddr_list_with_flags {
struct list_head list;
bdaddr_t bdaddr;
u8 bdaddr_type;
u32 current_flags;
};
enum hci_conn_flags {
HCI_CONN_FLAG_REMOTE_WAKEUP,
HCI_CONN_FLAG_MAX
};
#define hci_conn_test_flag(nr, flags) ((flags) & (1U << nr))
/* Make sure number of flags doesn't exceed sizeof(current_flags) */
static_assert(HCI_CONN_FLAG_MAX < 32);
struct bt_uuid { struct bt_uuid {
struct list_head list; struct list_head list;
u8 uuid[16]; u8 uuid[16];
@ -220,6 +238,24 @@ struct adv_info {
#define HCI_MAX_ADV_INSTANCES 5 #define HCI_MAX_ADV_INSTANCES 5
#define HCI_DEFAULT_ADV_DURATION 2 #define HCI_DEFAULT_ADV_DURATION 2
struct adv_pattern {
struct list_head list;
__u8 ad_type;
__u8 offset;
__u8 length;
__u8 value[HCI_MAX_AD_LENGTH];
};
struct adv_monitor {
struct list_head patterns;
bool active;
__u16 handle;
};
#define HCI_MIN_ADV_MONITOR_HANDLE 1
#define HCI_MAX_ADV_MONITOR_NUM_HANDLES 32
#define HCI_MAX_ADV_MONITOR_NUM_PATTERNS 16
#define HCI_MAX_SHORT_NAME_LENGTH 10 #define HCI_MAX_SHORT_NAME_LENGTH 10
/* Min encryption key size to match with SMP */ /* Min encryption key size to match with SMP */
@ -295,6 +331,14 @@ struct hci_dev {
__u8 le_scan_type; __u8 le_scan_type;
__u16 le_scan_interval; __u16 le_scan_interval;
__u16 le_scan_window; __u16 le_scan_window;
__u16 le_scan_int_suspend;
__u16 le_scan_window_suspend;
__u16 le_scan_int_discovery;
__u16 le_scan_window_discovery;
__u16 le_scan_int_adv_monitor;
__u16 le_scan_window_adv_monitor;
__u16 le_scan_int_connect;
__u16 le_scan_window_connect;
__u16 le_conn_min_interval; __u16 le_conn_min_interval;
__u16 le_conn_max_interval; __u16 le_conn_max_interval;
__u16 le_conn_latency; __u16 le_conn_latency;
@ -323,6 +367,17 @@ struct hci_dev {
__u16 devid_product; __u16 devid_product;
__u16 devid_version; __u16 devid_version;
__u8 def_page_scan_type;
__u16 def_page_scan_int;
__u16 def_page_scan_window;
__u8 def_inq_scan_type;
__u16 def_inq_scan_int;
__u16 def_inq_scan_window;
__u16 def_br_lsto;
__u16 def_page_timeout;
__u16 def_multi_adv_rotation_duration;
__u16 def_le_autoconnect_timeout;
__u16 pkt_type; __u16 pkt_type;
__u16 esco_type; __u16 esco_type;
__u16 link_policy; __u16 link_policy;
@ -438,7 +493,6 @@ struct hci_dev {
struct list_head mgmt_pending; struct list_head mgmt_pending;
struct list_head blacklist; struct list_head blacklist;
struct list_head whitelist; struct list_head whitelist;
struct list_head wakeable;
struct list_head uuids; struct list_head uuids;
struct list_head link_keys; struct list_head link_keys;
struct list_head long_term_keys; struct list_head long_term_keys;
@ -477,6 +531,9 @@ struct hci_dev {
__u16 adv_instance_timeout; __u16 adv_instance_timeout;
struct delayed_work adv_instance_expire; struct delayed_work adv_instance_expire;
struct idr adv_monitors_idr;
unsigned int adv_monitors_cnt;
__u8 irk[16]; __u8 irk[16];
__u32 rpa_timeout; __u32 rpa_timeout;
struct delayed_work rpa_expired; struct delayed_work rpa_expired;
@ -508,6 +565,12 @@ struct hci_dev {
#define HCI_PHY_HANDLE(handle) (handle & 0xff) #define HCI_PHY_HANDLE(handle) (handle & 0xff)
enum conn_reasons {
CONN_REASON_PAIR_DEVICE,
CONN_REASON_L2CAP_CHAN,
CONN_REASON_SCO_CONNECT,
};
struct hci_conn { struct hci_conn {
struct list_head list; struct list_head list;
@ -559,6 +622,8 @@ struct hci_conn {
__s8 max_tx_power; __s8 max_tx_power;
unsigned long flags; unsigned long flags;
enum conn_reasons conn_reason;
__u32 clock; __u32 clock;
__u16 clock_accuracy; __u16 clock_accuracy;
@ -626,7 +691,7 @@ struct hci_conn_params {
struct hci_conn *conn; struct hci_conn *conn;
bool explicit_connect; bool explicit_connect;
bool wakeable; u32 current_flags;
}; };
extern struct list_head hci_dev_list; extern struct list_head hci_dev_list;
@ -984,12 +1049,14 @@ struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, u8 sec_level, u8 dst_type, u8 sec_level,
u16 conn_timeout); u16 conn_timeout,
enum conn_reasons conn_reason);
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, u8 sec_level, u16 conn_timeout, u8 dst_type, u8 sec_level, u16 conn_timeout,
u8 role, bdaddr_t *direct_rpa); u8 role, bdaddr_t *direct_rpa);
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
u8 sec_level, u8 auth_type); u8 sec_level, u8 auth_type,
enum conn_reasons conn_reason);
struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
__u16 setting); __u16 setting);
int hci_conn_check_link_mode(struct hci_conn *conn); int hci_conn_check_link_mode(struct hci_conn *conn);
@ -1151,12 +1218,19 @@ struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *list,
struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk( struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
struct list_head *list, bdaddr_t *bdaddr, struct list_head *list, bdaddr_t *bdaddr,
u8 type); u8 type);
struct bdaddr_list_with_flags *
hci_bdaddr_list_lookup_with_flags(struct list_head *list, bdaddr_t *bdaddr,
u8 type);
int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type); int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type);
int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr, int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
u8 type, u8 *peer_irk, u8 *local_irk); u8 type, u8 *peer_irk, u8 *local_irk);
int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
u8 type, u32 flags);
int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type); int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type);
int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
u8 type); u8 type);
int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
u8 type);
void hci_bdaddr_list_clear(struct list_head *list); void hci_bdaddr_list_clear(struct list_head *list);
struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
@ -1217,6 +1291,12 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance); int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance);
void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired); void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired);
void hci_adv_monitors_clear(struct hci_dev *hdev);
void hci_free_adv_monitor(struct adv_monitor *monitor);
int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor);
int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle);
bool hci_is_adv_monitoring(struct hci_dev *hdev);
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
void hci_init_sysfs(struct hci_dev *hdev); void hci_init_sysfs(struct hci_dev *hdev);
@ -1279,6 +1359,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \ #define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \
((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED)) ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED))
/* Use LL Privacy based address resolution if supported */
#define use_ll_privacy(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY)
/* Use ext scanning if set ext scan param and ext scan enable is supported */ /* Use ext scanning if set ext scan param and ext scan enable is supported */
#define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \ #define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \
((dev)->commands[37] & 0x40)) ((dev)->commands[37] & 0x40))
@ -1387,7 +1470,7 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status)
__u8 encrypt; __u8 encrypt;
if (conn->state == BT_CONFIG) { if (conn->state == BT_CONFIG) {
if (status) if (!status)
conn->state = BT_CONNECTED; conn->state = BT_CONNECTED;
hci_connect_cfm(conn, status); hci_connect_cfm(conn, status);
@ -1402,11 +1485,13 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status)
else else
encrypt = 0x01; encrypt = 0x01;
if (conn->sec_level == BT_SECURITY_SDP) if (!status) {
conn->sec_level = BT_SECURITY_LOW; if (conn->sec_level == BT_SECURITY_SDP)
conn->sec_level = BT_SECURITY_LOW;
if (conn->pending_sec_level > conn->sec_level) if (conn->pending_sec_level > conn->sec_level)
conn->sec_level = conn->pending_sec_level; conn->sec_level = conn->pending_sec_level;
}
mutex_lock(&hci_cb_list_lock); mutex_lock(&hci_cb_list_lock);
list_for_each_entry(cb, &hci_cb_list, list) { list_for_each_entry(cb, &hci_cb_list, list) {

View File

@ -31,8 +31,8 @@
#define HCI_TIME_STAMP 3 #define HCI_TIME_STAMP 3
/* CMSG flags */ /* CMSG flags */
#define HCI_CMSG_DIR 0x0001 #define HCI_CMSG_DIR 0x01
#define HCI_CMSG_TSTAMP 0x0002 #define HCI_CMSG_TSTAMP 0x02
struct sockaddr_hci { struct sockaddr_hci {
sa_family_t hci_family; sa_family_t hci_family;

View File

@ -52,6 +52,12 @@ struct mgmt_hdr {
__le16 len; __le16 len;
} __packed; } __packed;
struct mgmt_tlv {
__le16 type;
__u8 length;
__u8 value[];
} __packed;
struct mgmt_addr_info { struct mgmt_addr_info {
bdaddr_t bdaddr; bdaddr_t bdaddr;
__u8 type; __u8 type;
@ -702,6 +708,78 @@ struct mgmt_rp_set_exp_feature {
__le32 flags; __le32 flags;
} __packed; } __packed;
#define MGMT_OP_READ_DEF_SYSTEM_CONFIG 0x004b
#define MGMT_READ_DEF_SYSTEM_CONFIG_SIZE 0
#define MGMT_OP_SET_DEF_SYSTEM_CONFIG 0x004c
#define MGMT_SET_DEF_SYSTEM_CONFIG_SIZE 0
#define MGMT_OP_READ_DEF_RUNTIME_CONFIG 0x004d
#define MGMT_READ_DEF_RUNTIME_CONFIG_SIZE 0
#define MGMT_OP_SET_DEF_RUNTIME_CONFIG 0x004e
#define MGMT_SET_DEF_RUNTIME_CONFIG_SIZE 0
#define MGMT_OP_GET_DEVICE_FLAGS 0x004F
#define MGMT_GET_DEVICE_FLAGS_SIZE 7
struct mgmt_cp_get_device_flags {
struct mgmt_addr_info addr;
} __packed;
struct mgmt_rp_get_device_flags {
struct mgmt_addr_info addr;
__le32 supported_flags;
__le32 current_flags;
} __packed;
#define MGMT_OP_SET_DEVICE_FLAGS 0x0050
#define MGMT_SET_DEVICE_FLAGS_SIZE 11
struct mgmt_cp_set_device_flags {
struct mgmt_addr_info addr;
__le32 current_flags;
} __packed;
struct mgmt_rp_set_device_flags {
struct mgmt_addr_info addr;
} __packed;
#define MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS BIT(0)
#define MGMT_OP_READ_ADV_MONITOR_FEATURES 0x0051
#define MGMT_READ_ADV_MONITOR_FEATURES_SIZE 0
struct mgmt_rp_read_adv_monitor_features {
__le32 supported_features;
__le32 enabled_features;
__le16 max_num_handles;
__u8 max_num_patterns;
__le16 num_handles;
__le16 handles[];
} __packed;
struct mgmt_adv_pattern {
__u8 ad_type;
__u8 offset;
__u8 length;
__u8 value[31];
} __packed;
#define MGMT_OP_ADD_ADV_PATTERNS_MONITOR 0x0052
struct mgmt_cp_add_adv_patterns_monitor {
__u8 pattern_count;
struct mgmt_adv_pattern patterns[];
} __packed;
#define MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE 1
struct mgmt_rp_add_adv_patterns_monitor {
__le16 monitor_handle;
} __packed;
#define MGMT_OP_REMOVE_ADV_MONITOR 0x0053
struct mgmt_cp_remove_adv_monitor {
__le16 monitor_handle;
} __packed;
#define MGMT_REMOVE_ADV_MONITOR_SIZE 2
struct mgmt_rp_remove_adv_monitor {
__le16 monitor_handle;
} __packed;
#define MGMT_EV_CMD_COMPLETE 0x0001 #define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete { struct mgmt_ev_cmd_complete {
__le16 opcode; __le16 opcode;
@ -933,3 +1011,20 @@ struct mgmt_ev_exp_feature_changed {
__u8 uuid[16]; __u8 uuid[16];
__le32 flags; __le32 flags;
} __packed; } __packed;
#define MGMT_EV_DEVICE_FLAGS_CHANGED 0x002a
struct mgmt_ev_device_flags_changed {
struct mgmt_addr_info addr;
__le32 supported_flags;
__le32 current_flags;
} __packed;
#define MGMT_EV_ADV_MONITOR_ADDED 0x002b
struct mgmt_ev_adv_monitor_added {
__le16 monitor_handle;
} __packed;
#define MGMT_EV_ADV_MONITOR_REMOVED 0x002c
struct mgmt_ev_adv_monitor_removed {
__le16 monitor_handle;
} __packed;

View File

@ -46,4 +46,6 @@ struct sco_conninfo {
__u8 dev_class[3]; __u8 dev_class[3];
}; };
#define SCO_CMSG_PKT_STATUS 0x01
#endif /* __SCO_H */ #endif /* __SCO_H */

View File

@ -50,6 +50,7 @@ static bool enable_6lowpan;
/* We are listening incoming connections via this channel /* We are listening incoming connections via this channel
*/ */
static struct l2cap_chan *listen_chan; static struct l2cap_chan *listen_chan;
static DEFINE_MUTEX(set_lock);
struct lowpan_peer { struct lowpan_peer {
struct list_head list; struct list_head list;
@ -1078,12 +1079,14 @@ static void do_enable_set(struct work_struct *work)
enable_6lowpan = set_enable->flag; enable_6lowpan = set_enable->flag;
mutex_lock(&set_lock);
if (listen_chan) { if (listen_chan) {
l2cap_chan_close(listen_chan, 0); l2cap_chan_close(listen_chan, 0);
l2cap_chan_put(listen_chan); l2cap_chan_put(listen_chan);
} }
listen_chan = bt_6lowpan_listen(); listen_chan = bt_6lowpan_listen();
mutex_unlock(&set_lock);
kfree(set_enable); kfree(set_enable);
} }
@ -1135,11 +1138,13 @@ static ssize_t lowpan_control_write(struct file *fp,
if (ret == -EINVAL) if (ret == -EINVAL)
return ret; return ret;
mutex_lock(&set_lock);
if (listen_chan) { if (listen_chan) {
l2cap_chan_close(listen_chan, 0); l2cap_chan_close(listen_chan, 0);
l2cap_chan_put(listen_chan); l2cap_chan_put(listen_chan);
listen_chan = NULL; listen_chan = NULL;
} }
mutex_unlock(&set_lock);
if (conn) { if (conn) {
struct lowpan_peer *peer; struct lowpan_peer *peer;

View File

@ -21,7 +21,7 @@ menuconfig BT
It was designed as a replacement for cables and other short-range It was designed as a replacement for cables and other short-range
technologies like IrDA. Bluetooth operates in personal area range technologies like IrDA. Bluetooth operates in personal area range
that typically extends up to 10 meters. More information about that typically extends up to 10 meters. More information about
Bluetooth can be found at <http://www.bluetooth.com/>. Bluetooth can be found at <https://www.bluetooth.com/>.
Linux Bluetooth subsystem consist of several layers: Linux Bluetooth subsystem consist of several layers:
Bluetooth Core Bluetooth Core

View File

@ -14,7 +14,7 @@ bluetooth_6lowpan-y := 6lowpan.o
bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \ hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \
ecdh_helper.o hci_request.o mgmt_util.o ecdh_helper.o hci_request.o mgmt_util.o mgmt_config.o
bluetooth-$(CONFIG_BT_BREDR) += sco.o bluetooth-$(CONFIG_BT_BREDR) += sco.o
bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o

View File

@ -286,6 +286,9 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
if (msg->msg_name && bt_sk(sk)->skb_msg_name) if (msg->msg_name && bt_sk(sk)->skb_msg_name)
bt_sk(sk)->skb_msg_name(skb, msg->msg_name, bt_sk(sk)->skb_msg_name(skb, msg->msg_name,
&msg->msg_namelen); &msg->msg_namelen);
if (bt_sk(sk)->skb_put_cmsg)
bt_sk(sk)->skb_put_cmsg(skb, msg, sk);
} }
skb_free_datagram(sk, skb); skb_free_datagram(sk, skb);
@ -453,8 +456,6 @@ __poll_t bt_sock_poll(struct file *file, struct socket *sock,
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
__poll_t mask = 0; __poll_t mask = 0;
BT_DBG("sock %p, sk %p", sock, sk);
poll_wait(file, sk_sleep(sk), wait); poll_wait(file, sk_sleep(sk), wait);
if (sk->sk_state == BT_LISTEN) if (sk->sk_state == BT_LISTEN)

View File

@ -789,11 +789,8 @@ static void set_ext_conn_params(struct hci_conn *conn,
memset(p, 0, sizeof(*p)); memset(p, 0, sizeof(*p));
/* Set window to be the same value as the interval to p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
* enable continuous scanning. p->scan_window = cpu_to_le16(hdev->le_scan_window_connect);
*/
p->scan_interval = cpu_to_le16(hdev->le_scan_interval);
p->scan_window = p->scan_interval;
p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
p->conn_latency = cpu_to_le16(conn->le_conn_latency); p->conn_latency = cpu_to_le16(conn->le_conn_latency);
@ -875,11 +872,8 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
memset(&cp, 0, sizeof(cp)); memset(&cp, 0, sizeof(cp));
/* Set window to be the same value as the interval to enable cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
* continuous scanning. cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect);
*/
cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
cp.scan_window = cp.scan_interval;
bacpy(&cp.peer_addr, &conn->dst); bacpy(&cp.peer_addr, &conn->dst);
cp.peer_addr_type = conn->dst_type; cp.peer_addr_type = conn->dst_type;
@ -937,7 +931,7 @@ static void hci_req_directed_advertising(struct hci_request *req,
* So it is required to remove adv set for handle 0x00. since we use * So it is required to remove adv set for handle 0x00. since we use
* instance 0 for directed adv. * instance 0 for directed adv.
*/ */
hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(cp.handle), &cp.handle); __hci_req_remove_ext_adv_instance(req, cp.handle);
hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp); hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
@ -1009,6 +1003,11 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
struct hci_request req; struct hci_request req;
int err; int err;
/* This ensures that during disable le_scan address resolution
* will not be disabled if it is followed by le_create_conn
*/
bool rpa_le_conn = true;
/* Let's make sure that le is enabled.*/ /* Let's make sure that le is enabled.*/
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
if (lmp_le_capable(hdev)) if (lmp_le_capable(hdev))
@ -1109,7 +1108,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
* state. * state.
*/ */
if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
hci_req_add_le_scan_disable(&req); hci_req_add_le_scan_disable(&req, rpa_le_conn);
hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED); hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
} }
@ -1180,7 +1179,8 @@ static int hci_explicit_conn_params_set(struct hci_dev *hdev,
/* This function requires the caller holds hdev->lock */ /* This function requires the caller holds hdev->lock */
struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, u8 sec_level, u8 dst_type, u8 sec_level,
u16 conn_timeout) u16 conn_timeout,
enum conn_reasons conn_reason)
{ {
struct hci_conn *conn; struct hci_conn *conn;
@ -1225,6 +1225,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
conn->sec_level = BT_SECURITY_LOW; conn->sec_level = BT_SECURITY_LOW;
conn->pending_sec_level = sec_level; conn->pending_sec_level = sec_level;
conn->conn_timeout = conn_timeout; conn->conn_timeout = conn_timeout;
conn->conn_reason = conn_reason;
hci_update_background_scan(hdev); hci_update_background_scan(hdev);
@ -1234,7 +1235,8 @@ done:
} }
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
u8 sec_level, u8 auth_type) u8 sec_level, u8 auth_type,
enum conn_reasons conn_reason)
{ {
struct hci_conn *acl; struct hci_conn *acl;
@ -1254,6 +1256,7 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
hci_conn_hold(acl); hci_conn_hold(acl);
acl->conn_reason = conn_reason;
if (acl->state == BT_OPEN || acl->state == BT_CLOSED) { if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
acl->sec_level = BT_SECURITY_LOW; acl->sec_level = BT_SECURITY_LOW;
acl->pending_sec_level = sec_level; acl->pending_sec_level = sec_level;
@ -1270,7 +1273,8 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
struct hci_conn *acl; struct hci_conn *acl;
struct hci_conn *sco; struct hci_conn *sco;
acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING); acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
CONN_REASON_SCO_CONNECT);
if (IS_ERR(acl)) if (IS_ERR(acl))
return acl; return acl;
@ -1323,6 +1327,23 @@ int hci_conn_check_link_mode(struct hci_conn *conn)
return 0; return 0;
} }
/* AES encryption is required for Level 4:
*
* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
* page 1319:
*
* 128-bit equivalent strength for link and encryption keys
* required using FIPS approved algorithms (E0 not allowed,
* SAFER+ not allowed, and P-192 not allowed; encryption key
* not shortened)
*/
if (conn->sec_level == BT_SECURITY_FIPS &&
!test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
bt_dev_err(conn->hdev,
"Invalid security: Missing AES-CCM usage");
return 0;
}
if (hci_conn_ssp_enabled(conn) && if (hci_conn_ssp_enabled(conn) &&
!test_bit(HCI_CONN_ENCRYPT, &conn->flags)) !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
return 0; return 0;

View File

@ -26,7 +26,6 @@
/* Bluetooth HCI core. */ /* Bluetooth HCI core. */
#include <linux/export.h> #include <linux/export.h>
#include <linux/idr.h>
#include <linux/rfkill.h> #include <linux/rfkill.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/crypto.h> #include <linux/crypto.h>
@ -606,7 +605,8 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt)
if (hdev->commands[8] & 0x01) if (hdev->commands[8] & 0x01)
hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL); hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
if (hdev->commands[18] & 0x04) if (hdev->commands[18] & 0x04 &&
!test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL); hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
/* Some older Broadcom based Bluetooth 1.2 controllers do not /* Some older Broadcom based Bluetooth 1.2 controllers do not
@ -763,6 +763,14 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt)
hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL); hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
} }
if (hdev->commands[35] & 0x40) {
__le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
/* Set RPA timeout */
hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
&rpa_timeout);
}
if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) { if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
/* Read LE Maximum Data Length */ /* Read LE Maximum Data Length */
hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL); hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
@ -851,7 +859,8 @@ static int hci_init4_req(struct hci_request *req, unsigned long opt)
/* Set erroneous data reporting if supported to the wideband speech /* Set erroneous data reporting if supported to the wideband speech
* setting value * setting value
*/ */
if (hdev->commands[18] & 0x08) { if (hdev->commands[18] & 0x08 &&
!test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
bool enabled = hci_dev_test_flag(hdev, bool enabled = hci_dev_test_flag(hdev,
HCI_WIDEBAND_SPEECH_ENABLED); HCI_WIDEBAND_SPEECH_ENABLED);
@ -2982,7 +2991,7 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
adv_instance->remaining_time = timeout; adv_instance->remaining_time = timeout;
if (duration == 0) if (duration == 0)
adv_instance->duration = HCI_DEFAULT_ADV_DURATION; adv_instance->duration = hdev->def_multi_adv_rotation_duration;
else else
adv_instance->duration = duration; adv_instance->duration = duration;
@ -2996,6 +3005,94 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
return 0; return 0;
} }
/* This function requires the caller holds hdev->lock */
void hci_adv_monitors_clear(struct hci_dev *hdev)
{
struct adv_monitor *monitor;
int handle;
idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
hci_free_adv_monitor(monitor);
idr_destroy(&hdev->adv_monitors_idr);
}
void hci_free_adv_monitor(struct adv_monitor *monitor)
{
struct adv_pattern *pattern;
struct adv_pattern *tmp;
if (!monitor)
return;
list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list)
kfree(pattern);
kfree(monitor);
}
/* This function requires the caller holds hdev->lock */
int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
{
int min, max, handle;
if (!monitor)
return -EINVAL;
min = HCI_MIN_ADV_MONITOR_HANDLE;
max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
GFP_KERNEL);
if (handle < 0)
return handle;
hdev->adv_monitors_cnt++;
monitor->handle = handle;
hci_update_background_scan(hdev);
return 0;
}
static int free_adv_monitor(int id, void *ptr, void *data)
{
struct hci_dev *hdev = data;
struct adv_monitor *monitor = ptr;
idr_remove(&hdev->adv_monitors_idr, monitor->handle);
hci_free_adv_monitor(monitor);
return 0;
}
/* This function requires the caller holds hdev->lock */
int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle)
{
struct adv_monitor *monitor;
if (handle) {
monitor = idr_find(&hdev->adv_monitors_idr, handle);
if (!monitor)
return -ENOENT;
idr_remove(&hdev->adv_monitors_idr, monitor->handle);
hci_free_adv_monitor(monitor);
} else {
/* Remove all monitors if handle is 0. */
idr_for_each(&hdev->adv_monitors_idr, &free_adv_monitor, hdev);
}
hci_update_background_scan(hdev);
return 0;
}
/* This function requires the caller holds hdev->lock */
bool hci_is_adv_monitoring(struct hci_dev *hdev)
{
return !idr_is_empty(&hdev->adv_monitors_idr);
}
struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
bdaddr_t *bdaddr, u8 type) bdaddr_t *bdaddr, u8 type)
{ {
@ -3023,6 +3120,20 @@ struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
return NULL; return NULL;
} }
struct bdaddr_list_with_flags *
hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
bdaddr_t *bdaddr, u8 type)
{
struct bdaddr_list_with_flags *b;
list_for_each_entry(b, bdaddr_list, list) {
if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
return b;
}
return NULL;
}
void hci_bdaddr_list_clear(struct list_head *bdaddr_list) void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
{ {
struct bdaddr_list *b, *n; struct bdaddr_list *b, *n;
@ -3084,6 +3195,30 @@ int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
return 0; return 0;
} }
int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
u8 type, u32 flags)
{
struct bdaddr_list_with_flags *entry;
if (!bacmp(bdaddr, BDADDR_ANY))
return -EBADF;
if (hci_bdaddr_list_lookup(list, bdaddr, type))
return -EEXIST;
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
bacpy(&entry->bdaddr, bdaddr);
entry->bdaddr_type = type;
entry->current_flags = flags;
list_add(&entry->list, list);
return 0;
}
int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type) int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
{ {
struct bdaddr_list *entry; struct bdaddr_list *entry;
@ -3123,6 +3258,26 @@ int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
return 0; return 0;
} }
int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
u8 type)
{
struct bdaddr_list_with_flags *entry;
if (!bacmp(bdaddr, BDADDR_ANY)) {
hci_bdaddr_list_clear(list);
return 0;
}
entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
if (!entry)
return -ENOENT;
list_del(&entry->list);
kfree(entry);
return 0;
}
/* This function requires the caller holds hdev->lock */ /* This function requires the caller holds hdev->lock */
struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
bdaddr_t *addr, u8 addr_type) bdaddr_t *addr, u8 addr_type)
@ -3145,6 +3300,15 @@ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
{ {
struct hci_conn_params *param; struct hci_conn_params *param;
switch (addr_type) {
case ADDR_LE_DEV_PUBLIC_RESOLVED:
addr_type = ADDR_LE_DEV_PUBLIC;
break;
case ADDR_LE_DEV_RANDOM_RESOLVED:
addr_type = ADDR_LE_DEV_RANDOM;
break;
}
list_for_each_entry(param, list, action) { list_for_each_entry(param, list, action) {
if (bacmp(&param->addr, addr) == 0 && if (bacmp(&param->addr, addr) == 0 &&
param->addr_type == addr_type) param->addr_type == addr_type)
@ -3289,10 +3453,10 @@ static int hci_suspend_wait_event(struct hci_dev *hdev)
WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT); WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
if (ret == 0) { if (ret == 0) {
bt_dev_dbg(hdev, "Timed out waiting for suspend"); bt_dev_err(hdev, "Timed out waiting for suspend events");
for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) { for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
if (test_bit(i, hdev->suspend_tasks)) if (test_bit(i, hdev->suspend_tasks))
bt_dev_dbg(hdev, "Bit %d is set", i); bt_dev_err(hdev, "Suspend timeout bit: %d", i);
clear_bit(i, hdev->suspend_tasks); clear_bit(i, hdev->suspend_tasks);
} }
@ -3360,12 +3524,15 @@ static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
ret = hci_change_suspend_state(hdev, BT_RUNNING); ret = hci_change_suspend_state(hdev, BT_RUNNING);
} }
/* If suspend failed, restore it to running */
if (ret && action == PM_SUSPEND_PREPARE)
hci_change_suspend_state(hdev, BT_RUNNING);
done: done:
return ret ? notifier_from_errno(-EBUSY) : NOTIFY_STOP; /* We always allow suspend even if suspend preparation failed and
* attempt to recover in resume.
*/
if (ret)
bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
action, ret);
return NOTIFY_DONE;
} }
/* Alloc HCI device */ /* Alloc HCI device */
@ -3397,6 +3564,12 @@ struct hci_dev *hci_alloc_dev(void)
hdev->le_adv_max_interval = 0x0800; hdev->le_adv_max_interval = 0x0800;
hdev->le_scan_interval = 0x0060; hdev->le_scan_interval = 0x0060;
hdev->le_scan_window = 0x0030; hdev->le_scan_window = 0x0030;
hdev->le_scan_int_suspend = 0x0400;
hdev->le_scan_window_suspend = 0x0012;
hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
hdev->le_scan_int_connect = 0x0060;
hdev->le_scan_window_connect = 0x0060;
hdev->le_conn_min_interval = 0x0018; hdev->le_conn_min_interval = 0x0018;
hdev->le_conn_max_interval = 0x0028; hdev->le_conn_max_interval = 0x0028;
hdev->le_conn_latency = 0x0000; hdev->le_conn_latency = 0x0000;
@ -3412,6 +3585,8 @@ struct hci_dev *hci_alloc_dev(void)
hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES; hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
@ -3420,13 +3595,17 @@ struct hci_dev *hci_alloc_dev(void)
hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT; hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE; hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
/* default 1.28 sec page scan */
hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
hdev->def_page_scan_int = 0x0800;
hdev->def_page_scan_window = 0x0012;
mutex_init(&hdev->lock); mutex_init(&hdev->lock);
mutex_init(&hdev->req_lock); mutex_init(&hdev->req_lock);
INIT_LIST_HEAD(&hdev->mgmt_pending); INIT_LIST_HEAD(&hdev->mgmt_pending);
INIT_LIST_HEAD(&hdev->blacklist); INIT_LIST_HEAD(&hdev->blacklist);
INIT_LIST_HEAD(&hdev->whitelist); INIT_LIST_HEAD(&hdev->whitelist);
INIT_LIST_HEAD(&hdev->wakeable);
INIT_LIST_HEAD(&hdev->uuids); INIT_LIST_HEAD(&hdev->uuids);
INIT_LIST_HEAD(&hdev->link_keys); INIT_LIST_HEAD(&hdev->link_keys);
INIT_LIST_HEAD(&hdev->long_term_keys); INIT_LIST_HEAD(&hdev->long_term_keys);
@ -3574,6 +3753,8 @@ int hci_register_dev(struct hci_dev *hdev)
queue_work(hdev->req_workqueue, &hdev->power_on); queue_work(hdev->req_workqueue, &hdev->power_on);
idr_init(&hdev->adv_monitors_idr);
return id; return id;
err_wqueue: err_wqueue:
@ -3603,9 +3784,10 @@ void hci_unregister_dev(struct hci_dev *hdev)
cancel_work_sync(&hdev->power_on); cancel_work_sync(&hdev->power_on);
hci_dev_do_close(hdev);
unregister_pm_notifier(&hdev->suspend_notifier); unregister_pm_notifier(&hdev->suspend_notifier);
cancel_work_sync(&hdev->suspend_prepare);
hci_dev_do_close(hdev);
if (!test_bit(HCI_INIT, &hdev->flags) && if (!test_bit(HCI_INIT, &hdev->flags) &&
!hci_dev_test_flag(hdev, HCI_SETUP) && !hci_dev_test_flag(hdev, HCI_SETUP) &&
@ -3644,6 +3826,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
hci_smp_irks_clear(hdev); hci_smp_irks_clear(hdev);
hci_remote_oob_data_clear(hdev); hci_remote_oob_data_clear(hdev);
hci_adv_instances_clear(hdev); hci_adv_instances_clear(hdev);
hci_adv_monitors_clear(hdev);
hci_bdaddr_list_clear(&hdev->le_white_list); hci_bdaddr_list_clear(&hdev->le_white_list);
hci_bdaddr_list_clear(&hdev->le_resolv_list); hci_bdaddr_list_clear(&hdev->le_resolv_list);
hci_conn_params_clear_all(hdev); hci_conn_params_clear_all(hdev);
@ -4551,6 +4734,7 @@ static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
if (conn) { if (conn) {
/* Send to upper protocol */ /* Send to upper protocol */
bt_cb(skb)->sco.pkt_status = flags & 0x03;
sco_recv_scodata(conn, skb); sco_recv_scodata(conn, skb);
return; return;
} else { } else {

View File

@ -2296,6 +2296,22 @@ static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
if (!conn) if (!conn)
return; return;
/* When using controller based address resolution, then the new
* address types 0x02 and 0x03 are used. These types need to be
* converted back into either public address or random address type
*/
if (use_ll_privacy(hdev) &&
hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
switch (own_address_type) {
case ADDR_LE_DEV_PUBLIC_RESOLVED:
own_address_type = ADDR_LE_DEV_PUBLIC;
break;
case ADDR_LE_DEV_RANDOM_RESOLVED:
own_address_type = ADDR_LE_DEV_RANDOM;
break;
}
}
/* Store the initiator and responder address information which /* Store the initiator and responder address information which
* is needed for SMP. These values will not change during the * is needed for SMP. These values will not change during the
* lifetime of the connection. * lifetime of the connection.
@ -2517,7 +2533,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s num_rsp %d", hdev->name, num_rsp); BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
if (!num_rsp) if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
return; return;
if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
@ -2697,10 +2713,10 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
*/ */
if (hci_dev_test_flag(hdev, HCI_MGMT) && if (hci_dev_test_flag(hdev, HCI_MGMT) &&
!hci_dev_test_flag(hdev, HCI_CONNECTABLE) && !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
!hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr, !hci_bdaddr_list_lookup_with_flags(&hdev->whitelist, &ev->bdaddr,
BDADDR_BREDR)) { BDADDR_BREDR)) {
hci_reject_conn(hdev, &ev->bdaddr); hci_reject_conn(hdev, &ev->bdaddr);
return; return;
} }
/* Connection accepted */ /* Connection accepted */
@ -2825,7 +2841,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
case HCI_AUTO_CONN_LINK_LOSS: case HCI_AUTO_CONN_LINK_LOSS:
if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
break; break;
/* Fall through */ fallthrough;
case HCI_AUTO_CONN_DIRECT: case HCI_AUTO_CONN_DIRECT:
case HCI_AUTO_CONN_ALWAYS: case HCI_AUTO_CONN_ALWAYS:
@ -3065,27 +3081,23 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
/* Check link security requirements are met */
if (!hci_conn_check_link_mode(conn))
ev->status = HCI_ERROR_AUTH_FAILURE;
if (ev->status && conn->state == BT_CONNECTED) { if (ev->status && conn->state == BT_CONNECTED) {
if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
/* Notify upper layers so they can cleanup before
* disconnecting.
*/
hci_encrypt_cfm(conn, ev->status);
hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
hci_conn_drop(conn); hci_conn_drop(conn);
goto unlock; goto unlock;
} }
/* In Secure Connections Only mode, do not allow any connections
* that are not encrypted with AES-CCM using a P-256 authenticated
* combination key.
*/
if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
(!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
hci_conn_drop(conn);
goto unlock;
}
/* Try reading the encryption key size for encrypted ACL links */ /* Try reading the encryption key size for encrypted ACL links */
if (!ev->status && ev->encrypt && conn->type == ACL_LINK) { if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
struct hci_cp_read_enc_key_size cp; struct hci_cp_read_enc_key_size cp;
@ -4163,6 +4175,9 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
struct inquiry_info_with_rssi_and_pscan_mode *info; struct inquiry_info_with_rssi_and_pscan_mode *info;
info = (void *) (skb->data + 1); info = (void *) (skb->data + 1);
if (skb->len < num_rsp * sizeof(*info) + 1)
goto unlock;
for (; num_rsp; num_rsp--, info++) { for (; num_rsp; num_rsp--, info++) {
u32 flags; u32 flags;
@ -4184,6 +4199,9 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
} else { } else {
struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
if (skb->len < num_rsp * sizeof(*info) + 1)
goto unlock;
for (; num_rsp; num_rsp--, info++) { for (; num_rsp; num_rsp--, info++) {
u32 flags; u32 flags;
@ -4204,6 +4222,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
} }
} }
unlock:
hci_dev_unlock(hdev); hci_dev_unlock(hdev);
} }
@ -4324,7 +4343,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
if (hci_setup_sync(conn, conn->link->handle)) if (hci_setup_sync(conn, conn->link->handle))
goto unlock; goto unlock;
} }
/* fall through */ fallthrough;
default: default:
conn->state = BT_CLOSED; conn->state = BT_CLOSED;
@ -4379,7 +4398,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
BT_DBG("%s num_rsp %d", hdev->name, num_rsp); BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
if (!num_rsp) if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
return; return;
if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
@ -5209,6 +5228,11 @@ static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
le16_to_cpu(ev->interval), le16_to_cpu(ev->interval),
le16_to_cpu(ev->latency), le16_to_cpu(ev->latency),
le16_to_cpu(ev->supervision_timeout)); le16_to_cpu(ev->supervision_timeout));
if (use_ll_privacy(hdev) &&
hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
hci_req_disable_address_resolution(hdev);
} }
static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb) static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
@ -5319,7 +5343,7 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
} }
conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW, conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER, hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER,
direct_rpa); direct_rpa);
if (!IS_ERR(conn)) { if (!IS_ERR(conn)) {
/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
@ -5447,14 +5471,15 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
/* Passive scanning shouldn't trigger any device found events, /* Passive scanning shouldn't trigger any device found events,
* except for devices marked as CONN_REPORT for which we do send * except for devices marked as CONN_REPORT for which we do send
* device found events. * device found events, or advertisement monitoring requested.
*/ */
if (hdev->le_scan_type == LE_SCAN_PASSIVE) { if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
if (type == LE_ADV_DIRECT_IND) if (type == LE_ADV_DIRECT_IND)
return; return;
if (!hci_pend_le_action_lookup(&hdev->pend_le_reports, if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
bdaddr, bdaddr_type)) bdaddr, bdaddr_type) &&
idr_is_empty(&hdev->adv_monitors_idr))
return; return;
if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND) if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)

View File

@ -34,9 +34,6 @@
#define HCI_REQ_PEND 1 #define HCI_REQ_PEND 1
#define HCI_REQ_CANCELED 2 #define HCI_REQ_CANCELED 2
#define LE_SUSPEND_SCAN_WINDOW 0x0012
#define LE_SUSPEND_SCAN_INTERVAL 0x0400
void hci_req_init(struct hci_request *req, struct hci_dev *hdev) void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
{ {
skb_queue_head_init(&req->cmd_q); skb_queue_head_init(&req->cmd_q);
@ -366,13 +363,11 @@ void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
/* 160 msec page scan interval */ /* 160 msec page scan interval */
acp.interval = cpu_to_le16(0x0100); acp.interval = cpu_to_le16(0x0100);
} else { } else {
type = PAGE_SCAN_TYPE_STANDARD; /* default */ type = hdev->def_page_scan_type;
acp.interval = cpu_to_le16(hdev->def_page_scan_int);
/* default 1.28 sec page scan */
acp.interval = cpu_to_le16(0x0800);
} }
acp.window = cpu_to_le16(0x0012); acp.window = cpu_to_le16(hdev->def_page_scan_window);
if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval || if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
__cpu_to_le16(hdev->page_scan_window) != acp.window) __cpu_to_le16(hdev->page_scan_window) != acp.window)
@ -418,18 +413,22 @@ static void __hci_update_background_scan(struct hci_request *req)
*/ */
hci_discovery_filter_clear(hdev); hci_discovery_filter_clear(hdev);
BT_DBG("%s ADV monitoring is %s", hdev->name,
hci_is_adv_monitoring(hdev) ? "on" : "off");
if (list_empty(&hdev->pend_le_conns) && if (list_empty(&hdev->pend_le_conns) &&
list_empty(&hdev->pend_le_reports)) { list_empty(&hdev->pend_le_reports) &&
!hci_is_adv_monitoring(hdev)) {
/* If there is no pending LE connections or devices /* If there is no pending LE connections or devices
* to be scanned for, we should stop the background * to be scanned for or no ADV monitors, we should stop the
* scanning. * background scanning.
*/ */
/* If controller is not scanning we are done. */ /* If controller is not scanning we are done. */
if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
return; return;
hci_req_add_le_scan_disable(req); hci_req_add_le_scan_disable(req, false);
BT_DBG("%s stopping background scanning", hdev->name); BT_DBG("%s stopping background scanning", hdev->name);
} else { } else {
@ -448,7 +447,7 @@ static void __hci_update_background_scan(struct hci_request *req)
* don't miss any advertising (due to duplicates filter). * don't miss any advertising (due to duplicates filter).
*/ */
if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
hci_req_add_le_scan_disable(req); hci_req_add_le_scan_disable(req, false);
hci_req_add_le_passive_scan(req); hci_req_add_le_passive_scan(req);
@ -653,7 +652,7 @@ void __hci_req_update_eir(struct hci_request *req)
hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
} }
void hci_req_add_le_scan_disable(struct hci_request *req) void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
{ {
struct hci_dev *hdev = req->hdev; struct hci_dev *hdev = req->hdev;
@ -676,6 +675,15 @@ void hci_req_add_le_scan_disable(struct hci_request *req)
cp.enable = LE_SCAN_DISABLE; cp.enable = LE_SCAN_DISABLE;
hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
} }
/* Disable address resolution */
if (use_ll_privacy(hdev) &&
hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
__u8 enable = 0x00;
hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
}
} }
static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr, static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
@ -689,6 +697,21 @@ static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr, bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
cp.bdaddr_type); cp.bdaddr_type);
hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp); hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
if (use_ll_privacy(req->hdev)) {
struct smp_irk *irk;
irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
if (irk) {
struct hci_cp_le_del_from_resolv_list cp;
cp.bdaddr_type = bdaddr_type;
bacpy(&cp.bdaddr, bdaddr);
hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
sizeof(cp), &cp);
}
}
} }
/* Adds connection to white list if needed. On error, returns -1. */ /* Adds connection to white list if needed. On error, returns -1. */
@ -709,13 +732,14 @@ static int add_to_white_list(struct hci_request *req,
return -1; return -1;
/* White list can not be used with RPAs */ /* White list can not be used with RPAs */
if (!allow_rpa && if (!allow_rpa && !use_ll_privacy(hdev) &&
hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) { hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
return -1; return -1;
} }
/* During suspend, only wakeable devices can be in whitelist */ /* During suspend, only wakeable devices can be in whitelist */
if (hdev->suspended && !params->wakeable) if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
params->current_flags))
return 0; return 0;
*num_entries += 1; *num_entries += 1;
@ -726,6 +750,28 @@ static int add_to_white_list(struct hci_request *req,
cp.bdaddr_type); cp.bdaddr_type);
hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp); hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
if (use_ll_privacy(hdev)) {
struct smp_irk *irk;
irk = hci_find_irk_by_addr(hdev, &params->addr,
params->addr_type);
if (irk) {
struct hci_cp_le_add_to_resolv_list cp;
cp.bdaddr_type = params->addr_type;
bacpy(&cp.bdaddr, &params->addr);
memcpy(cp.peer_irk, irk->val, 16);
if (hci_dev_test_flag(hdev, HCI_PRIVACY))
memcpy(cp.local_irk, hdev->irk, 16);
else
memset(cp.local_irk, 0, 16);
hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
sizeof(cp), &cp);
}
}
return 0; return 0;
} }
@ -766,7 +812,7 @@ static u8 update_white_list(struct hci_request *req)
} }
/* White list can not be used with RPAs */ /* White list can not be used with RPAs */
if (!allow_rpa && if (!allow_rpa && !use_ll_privacy(hdev) &&
hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) { hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
return 0x00; return 0x00;
} }
@ -798,6 +844,14 @@ static u8 update_white_list(struct hci_request *req)
return 0x00; return 0x00;
} }
/* Once the controller offloading of advertisement monitor is in place,
* the if condition should include the support of MSFT extension
* support. If suspend is ongoing, whitelist should be the default to
* prevent waking by random advertisements.
*/
if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended)
return 0x00;
/* Select filter policy to use white list */ /* Select filter policy to use white list */
return 0x01; return 0x01;
} }
@ -808,10 +862,24 @@ static bool scan_use_rpa(struct hci_dev *hdev)
} }
static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval, static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
u16 window, u8 own_addr_type, u8 filter_policy) u16 window, u8 own_addr_type, u8 filter_policy,
bool addr_resolv)
{ {
struct hci_dev *hdev = req->hdev; struct hci_dev *hdev = req->hdev;
if (hdev->scanning_paused) {
bt_dev_dbg(hdev, "Scanning is paused for suspend");
return;
}
if (use_ll_privacy(hdev) &&
hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
addr_resolv) {
u8 enable = 0x01;
hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
}
/* Use ext scanning if set ext scan param and ext scan enable is /* Use ext scanning if set ext scan param and ext scan enable is
* supported * supported
*/ */
@ -885,12 +953,39 @@ static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
} }
} }
/* Returns true if an le connection is in the scanning state */
static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *c;
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
if (c->type == LE_LINK && c->state == BT_CONNECT &&
test_bit(HCI_CONN_SCANNING, &c->flags)) {
rcu_read_unlock();
return true;
}
}
rcu_read_unlock();
return false;
}
/* Ensure to call hci_req_add_le_scan_disable() first to disable the
* controller based address resolution to be able to reconfigure
* resolving list.
*/
void hci_req_add_le_passive_scan(struct hci_request *req) void hci_req_add_le_passive_scan(struct hci_request *req)
{ {
struct hci_dev *hdev = req->hdev; struct hci_dev *hdev = req->hdev;
u8 own_addr_type; u8 own_addr_type;
u8 filter_policy; u8 filter_policy;
u16 window, interval; u16 window, interval;
/* Background scanning should run with address resolution */
bool addr_resolv = true;
if (hdev->scanning_paused) { if (hdev->scanning_paused) {
bt_dev_dbg(hdev, "Scanning is paused for suspend"); bt_dev_dbg(hdev, "Scanning is paused for suspend");
@ -927,8 +1022,11 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
filter_policy |= 0x02; filter_policy |= 0x02;
if (hdev->suspended) { if (hdev->suspended) {
window = LE_SUSPEND_SCAN_WINDOW; window = hdev->le_scan_window_suspend;
interval = LE_SUSPEND_SCAN_INTERVAL; interval = hdev->le_scan_int_suspend;
} else if (hci_is_le_conn_scanning(hdev)) {
window = hdev->le_scan_window_connect;
interval = hdev->le_scan_int_connect;
} else { } else {
window = hdev->le_scan_window; window = hdev->le_scan_window;
interval = hdev->le_scan_interval; interval = hdev->le_scan_interval;
@ -936,7 +1034,7 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy); bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window, hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
own_addr_type, filter_policy); own_addr_type, filter_policy, addr_resolv);
} }
static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance) static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
@ -973,15 +1071,19 @@ static void hci_req_clear_event_filter(struct hci_request *req)
static void hci_req_set_event_filter(struct hci_request *req) static void hci_req_set_event_filter(struct hci_request *req)
{ {
struct bdaddr_list *b; struct bdaddr_list_with_flags *b;
struct hci_cp_set_event_filter f; struct hci_cp_set_event_filter f;
struct hci_dev *hdev = req->hdev; struct hci_dev *hdev = req->hdev;
u8 scan; u8 scan = SCAN_DISABLED;
/* Always clear event filter when starting */ /* Always clear event filter when starting */
hci_req_clear_event_filter(req); hci_req_clear_event_filter(req);
list_for_each_entry(b, &hdev->wakeable, list) { list_for_each_entry(b, &hdev->whitelist, list) {
if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
b->current_flags))
continue;
memset(&f, 0, sizeof(f)); memset(&f, 0, sizeof(f));
bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr); bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
f.flt_type = HCI_FLT_CONN_SETUP; f.flt_type = HCI_FLT_CONN_SETUP;
@ -990,16 +1092,17 @@ static void hci_req_set_event_filter(struct hci_request *req)
bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr); bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f); hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
scan = SCAN_PAGE;
} }
scan = !list_empty(&hdev->wakeable) ? SCAN_PAGE : SCAN_DISABLED;
hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
} }
static void hci_req_config_le_suspend_scan(struct hci_request *req) static void hci_req_config_le_suspend_scan(struct hci_request *req)
{ {
/* Can't change params without disabling first */ /* Before changing params disable scan if enabled */
hci_req_add_le_scan_disable(req); if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
hci_req_add_le_scan_disable(req, false);
/* Configure params and enable scanning */ /* Configure params and enable scanning */
hci_req_add_le_passive_scan(req); hci_req_add_le_passive_scan(req);
@ -1065,8 +1168,9 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
page_scan = SCAN_DISABLED; page_scan = SCAN_DISABLED;
hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan); hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
/* Disable LE passive scan */ /* Disable LE passive scan if enabled */
hci_req_add_le_scan_disable(&req); if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
hci_req_add_le_scan_disable(&req, false);
/* Mark task needing completion */ /* Mark task needing completion */
set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
@ -1160,13 +1264,8 @@ static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
void __hci_req_disable_advertising(struct hci_request *req) void __hci_req_disable_advertising(struct hci_request *req)
{ {
if (ext_adv_capable(req->hdev)) { if (ext_adv_capable(req->hdev)) {
struct hci_cp_le_set_ext_adv_enable cp; __hci_req_disable_ext_adv_instance(req, 0x00);
cp.enable = 0x00;
/* Disable all sets since we only support one set at the moment */
cp.num_of_sets = 0x00;
hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp);
} else { } else {
u8 enable = 0x00; u8 enable = 0x00;
@ -1627,6 +1726,28 @@ int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
return hci_req_run(&req, NULL); return hci_req_run(&req, NULL);
} }
static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
u16 opcode)
{
BT_DBG("%s status %u", hdev->name, status);
}
void hci_req_disable_address_resolution(struct hci_dev *hdev)
{
struct hci_request req;
__u8 enable = 0x00;
if (!use_ll_privacy(hdev) &&
!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
return;
hci_req_init(&req, hdev);
hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
hci_req_run(&req, enable_addr_resolution_complete);
}
static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode) static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{ {
BT_DBG("%s status %u", hdev->name, status); BT_DBG("%s status %u", hdev->name, status);
@ -1786,8 +1907,6 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
int err; int err;
struct adv_info *adv_instance; struct adv_info *adv_instance;
bool secondary_adv; bool secondary_adv;
/* In ext adv set param interval is 3 octets */
const u8 adv_interval[3] = { 0x00, 0x08, 0x00 };
if (instance > 0) { if (instance > 0) {
adv_instance = hci_find_adv_instance(hdev, instance); adv_instance = hci_find_adv_instance(hdev, instance);
@ -1820,8 +1939,9 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
memset(&cp, 0, sizeof(cp)); memset(&cp, 0, sizeof(cp));
memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval)); /* In ext adv set param interval is 3 octets */
memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval)); hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK); secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
@ -1932,13 +2052,59 @@ int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
return 0; return 0;
} }
int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
{
struct hci_dev *hdev = req->hdev;
struct hci_cp_le_set_ext_adv_enable *cp;
struct hci_cp_ext_adv_set *adv_set;
u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
u8 req_size;
/* If request specifies an instance that doesn't exist, fail */
if (instance > 0 && !hci_find_adv_instance(hdev, instance))
return -EINVAL;
memset(data, 0, sizeof(data));
cp = (void *)data;
adv_set = (void *)cp->data;
/* Instance 0x00 indicates all advertising instances will be disabled */
cp->num_of_sets = !!instance;
cp->enable = 0x00;
adv_set->handle = instance;
req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
return 0;
}
int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
{
struct hci_dev *hdev = req->hdev;
/* If request specifies an instance that doesn't exist, fail */
if (instance > 0 && !hci_find_adv_instance(hdev, instance))
return -EINVAL;
hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
return 0;
}
int __hci_req_start_ext_adv(struct hci_request *req, u8 instance) int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
{ {
struct hci_dev *hdev = req->hdev; struct hci_dev *hdev = req->hdev;
struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
int err; int err;
if (hci_dev_test_flag(hdev, HCI_LE_ADV)) /* If instance isn't pending, the chip knows about it, and it's safe to
__hci_req_disable_advertising(req); * disable
*/
if (adv_instance && !adv_instance->pending)
__hci_req_disable_ext_adv_instance(req, instance);
err = __hci_req_setup_ext_adv_instance(req, instance); err = __hci_req_setup_ext_adv_instance(req, instance);
if (err < 0) if (err < 0)
@ -2086,7 +2252,7 @@ void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
hci_dev_test_flag(hdev, HCI_ADVERTISING)) hci_dev_test_flag(hdev, HCI_ADVERTISING))
return; return;
if (next_instance) if (next_instance && !ext_adv_capable(hdev))
__hci_req_schedule_adv_instance(req, next_instance->instance, __hci_req_schedule_adv_instance(req, next_instance->instance,
false); false);
} }
@ -2128,7 +2294,13 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
if (use_rpa) { if (use_rpa) {
int to; int to;
*own_addr_type = ADDR_LE_DEV_RANDOM; /* If Controller supports LL Privacy use own address type is
* 0x03
*/
if (use_ll_privacy(hdev))
*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
else
*own_addr_type = ADDR_LE_DEV_RANDOM;
if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) && if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
!bacmp(&hdev->random_addr, &hdev->rpa)) !bacmp(&hdev->random_addr, &hdev->rpa))
@ -2547,7 +2719,7 @@ static void bg_scan_update(struct work_struct *work)
static int le_scan_disable(struct hci_request *req, unsigned long opt) static int le_scan_disable(struct hci_request *req, unsigned long opt)
{ {
hci_req_add_le_scan_disable(req); hci_req_add_le_scan_disable(req, false);
return 0; return 0;
} }
@ -2645,7 +2817,12 @@ static int le_scan_restart(struct hci_request *req, unsigned long opt)
if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
return 0; return 0;
hci_req_add_le_scan_disable(req); if (hdev->scanning_paused) {
bt_dev_dbg(hdev, "Scanning is paused for suspend");
return 0;
}
hci_req_add_le_scan_disable(req, false);
if (use_ext_scan(hdev)) { if (use_ext_scan(hdev)) {
struct hci_cp_le_set_ext_scan_enable ext_enable_cp; struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
@ -2725,6 +2902,8 @@ static int active_scan(struct hci_request *req, unsigned long opt)
u8 own_addr_type; u8 own_addr_type;
/* White list is not used for discovery */ /* White list is not used for discovery */
u8 filter_policy = 0x00; u8 filter_policy = 0x00;
/* Discovery doesn't require controller address resolution */
bool addr_resolv = false;
int err; int err;
BT_DBG("%s", hdev->name); BT_DBG("%s", hdev->name);
@ -2734,7 +2913,7 @@ static int active_scan(struct hci_request *req, unsigned long opt)
* discovery scanning parameters. * discovery scanning parameters.
*/ */
if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
hci_req_add_le_scan_disable(req); hci_req_add_le_scan_disable(req, false);
/* All active scans will be done with either a resolvable private /* All active scans will be done with either a resolvable private
* address (when privacy feature has been enabled) or non-resolvable * address (when privacy feature has been enabled) or non-resolvable
@ -2745,8 +2924,9 @@ static int active_scan(struct hci_request *req, unsigned long opt)
if (err < 0) if (err < 0)
own_addr_type = ADDR_LE_DEV_PUBLIC; own_addr_type = ADDR_LE_DEV_PUBLIC;
hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN, hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
own_addr_type, filter_policy); hdev->le_scan_window_discovery, own_addr_type,
filter_policy, addr_resolv);
return 0; return 0;
} }
@ -2793,18 +2973,18 @@ static void start_discovery(struct hci_dev *hdev, u8 *status)
* to do BR/EDR inquiry. * to do BR/EDR inquiry.
*/ */
hci_req_sync(hdev, interleaved_discov, hci_req_sync(hdev, interleaved_discov,
DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT, hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
status); status);
break; break;
} }
timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout); timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT, hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
HCI_CMD_TIMEOUT, status); HCI_CMD_TIMEOUT, status);
break; break;
case DISCOV_TYPE_LE: case DISCOV_TYPE_LE:
timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT, hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
HCI_CMD_TIMEOUT, status); HCI_CMD_TIMEOUT, status);
break; break;
default: default:
@ -2848,14 +3028,14 @@ bool hci_req_stop_discovery(struct hci_request *req)
if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
cancel_delayed_work(&hdev->le_scan_disable); cancel_delayed_work(&hdev->le_scan_disable);
hci_req_add_le_scan_disable(req); hci_req_add_le_scan_disable(req, false);
} }
ret = true; ret = true;
} else { } else {
/* Passive scanning */ /* Passive scanning */
if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
hci_req_add_le_scan_disable(req); hci_req_add_le_scan_disable(req, false);
ret = true; ret = true;
} }
} }

View File

@ -65,11 +65,12 @@ void __hci_req_write_fast_connectable(struct hci_request *req, bool enable);
void __hci_req_update_name(struct hci_request *req); void __hci_req_update_name(struct hci_request *req);
void __hci_req_update_eir(struct hci_request *req); void __hci_req_update_eir(struct hci_request *req);
void hci_req_add_le_scan_disable(struct hci_request *req); void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn);
void hci_req_add_le_passive_scan(struct hci_request *req); void hci_req_add_le_passive_scan(struct hci_request *req);
void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next); void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next);
void hci_req_disable_address_resolution(struct hci_dev *hdev);
void hci_req_reenable_advertising(struct hci_dev *hdev); void hci_req_reenable_advertising(struct hci_dev *hdev);
void __hci_req_enable_advertising(struct hci_request *req); void __hci_req_enable_advertising(struct hci_request *req);
void __hci_req_disable_advertising(struct hci_request *req); void __hci_req_disable_advertising(struct hci_request *req);
@ -86,6 +87,8 @@ void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance); int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance);
int __hci_req_start_ext_adv(struct hci_request *req, u8 instance); int __hci_req_start_ext_adv(struct hci_request *req, u8 instance);
int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance); int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance);
int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance);
int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance);
void __hci_req_clear_ext_adv_sets(struct hci_request *req); void __hci_req_clear_ext_adv_sets(struct hci_request *req);
int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
bool use_rpa, struct adv_info *adv_instance, bool use_rpa, struct adv_info *adv_instance,

View File

@ -52,7 +52,7 @@ struct hci_pinfo {
struct bt_sock bt; struct bt_sock bt;
struct hci_dev *hdev; struct hci_dev *hdev;
struct hci_filter filter; struct hci_filter filter;
__u32 cmsg_mask; __u8 cmsg_mask;
unsigned short channel; unsigned short channel;
unsigned long flags; unsigned long flags;
__u32 cookie; __u32 cookie;
@ -443,8 +443,7 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
case HCI_DEV_SETUP: case HCI_DEV_SETUP:
if (hdev->manufacturer == 0xffff) if (hdev->manufacturer == 0xffff)
return NULL; return NULL;
fallthrough;
/* fall through */
case HCI_DEV_UP: case HCI_DEV_UP:
skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC); skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
@ -1399,7 +1398,7 @@ done:
static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
struct sk_buff *skb) struct sk_buff *skb)
{ {
__u32 mask = hci_pi(sk)->cmsg_mask; __u8 mask = hci_pi(sk)->cmsg_mask;
if (mask & HCI_CMSG_DIR) { if (mask & HCI_CMSG_DIR) {
int incoming = bt_cb(skb)->incoming; int incoming = bt_cb(skb)->incoming;

View File

@ -666,8 +666,7 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
l2cap_seq_list_free(&chan->srej_list); l2cap_seq_list_free(&chan->srej_list);
l2cap_seq_list_free(&chan->retrans_list); l2cap_seq_list_free(&chan->retrans_list);
fallthrough;
/* fall through */
case L2CAP_MODE_STREAMING: case L2CAP_MODE_STREAMING:
skb_queue_purge(&chan->tx_q); skb_queue_purge(&chan->tx_q);
@ -872,7 +871,8 @@ static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
else else
return HCI_AT_NO_BONDING; return HCI_AT_NO_BONDING;
} }
/* fall through */ fallthrough;
default: default:
switch (chan->sec_level) { switch (chan->sec_level) {
case BT_SECURITY_HIGH: case BT_SECURITY_HIGH:
@ -2983,8 +2983,7 @@ static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
break; break;
case L2CAP_EV_RECV_REQSEQ_AND_FBIT: case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
l2cap_process_reqseq(chan, control->reqseq); l2cap_process_reqseq(chan, control->reqseq);
fallthrough;
/* Fall through */
case L2CAP_EV_RECV_FBIT: case L2CAP_EV_RECV_FBIT:
if (control && control->final) { if (control && control->final) {
@ -3311,7 +3310,7 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
case L2CAP_MODE_ERTM: case L2CAP_MODE_ERTM:
if (l2cap_mode_supported(mode, remote_feat_mask)) if (l2cap_mode_supported(mode, remote_feat_mask))
return mode; return mode;
/* fall through */ fallthrough;
default: default:
return L2CAP_MODE_BASIC; return L2CAP_MODE_BASIC;
} }
@ -3447,7 +3446,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data
if (__l2cap_efs_supported(chan->conn)) if (__l2cap_efs_supported(chan->conn))
set_bit(FLAG_EFS_ENABLE, &chan->flags); set_bit(FLAG_EFS_ENABLE, &chan->flags);
/* fall through */ fallthrough;
default: default:
chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask); chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
break; break;
@ -4539,7 +4538,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
goto done; goto done;
break; break;
} }
/* fall through */ fallthrough;
default: default:
l2cap_chan_set_err(chan, ECONNRESET); l2cap_chan_set_err(chan, ECONNRESET);
@ -7719,7 +7718,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
conn->mtu = hcon->hdev->le_mtu; conn->mtu = hcon->hdev->le_mtu;
break; break;
} }
/* fall through */ fallthrough;
default: default:
conn->mtu = hcon->hdev->acl_mtu; conn->mtu = hcon->hdev->acl_mtu;
break; break;
@ -7841,7 +7840,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
case L2CAP_MODE_STREAMING: case L2CAP_MODE_STREAMING:
if (!disable_ertm) if (!disable_ertm)
break; break;
/* fall through */ fallthrough;
default: default:
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
goto done; goto done;
@ -7893,11 +7892,13 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
else else
hcon = hci_connect_le_scan(hdev, dst, dst_type, hcon = hci_connect_le_scan(hdev, dst, dst_type,
chan->sec_level, chan->sec_level,
HCI_LE_CONN_TIMEOUT); HCI_LE_CONN_TIMEOUT,
CONN_REASON_L2CAP_CHAN);
} else { } else {
u8 auth_type = l2cap_get_auth_type(chan); u8 auth_type = l2cap_get_auth_type(chan);
hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type); hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
CONN_REASON_L2CAP_CHAN);
} }
if (IS_ERR(hcon)) { if (IS_ERR(hcon)) {

View File

@ -284,7 +284,7 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
case L2CAP_MODE_STREAMING: case L2CAP_MODE_STREAMING:
if (!disable_ertm) if (!disable_ertm)
break; break;
/* fall through */ fallthrough;
default: default:
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
goto done; goto done;
@ -760,7 +760,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
case L2CAP_MODE_STREAMING: case L2CAP_MODE_STREAMING:
if (!disable_ertm) if (!disable_ertm)
break; break;
/* fall through */ fallthrough;
default: default:
err = -EINVAL; err = -EINVAL;
break; break;

View File

@ -36,9 +36,11 @@
#include "hci_request.h" #include "hci_request.h"
#include "smp.h" #include "smp.h"
#include "mgmt_util.h" #include "mgmt_util.h"
#include "mgmt_config.h"
#include "msft.h"
#define MGMT_VERSION 1 #define MGMT_VERSION 1
#define MGMT_REVISION 17 #define MGMT_REVISION 18
static const u16 mgmt_commands[] = { static const u16 mgmt_commands[] = {
MGMT_OP_READ_INDEX_LIST, MGMT_OP_READ_INDEX_LIST,
@ -111,6 +113,15 @@ static const u16 mgmt_commands[] = {
MGMT_OP_READ_SECURITY_INFO, MGMT_OP_READ_SECURITY_INFO,
MGMT_OP_READ_EXP_FEATURES_INFO, MGMT_OP_READ_EXP_FEATURES_INFO,
MGMT_OP_SET_EXP_FEATURE, MGMT_OP_SET_EXP_FEATURE,
MGMT_OP_READ_DEF_SYSTEM_CONFIG,
MGMT_OP_SET_DEF_SYSTEM_CONFIG,
MGMT_OP_READ_DEF_RUNTIME_CONFIG,
MGMT_OP_SET_DEF_RUNTIME_CONFIG,
MGMT_OP_GET_DEVICE_FLAGS,
MGMT_OP_SET_DEVICE_FLAGS,
MGMT_OP_READ_ADV_MONITOR_FEATURES,
MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
MGMT_OP_REMOVE_ADV_MONITOR,
}; };
static const u16 mgmt_events[] = { static const u16 mgmt_events[] = {
@ -151,6 +162,7 @@ static const u16 mgmt_events[] = {
MGMT_EV_EXT_INFO_CHANGED, MGMT_EV_EXT_INFO_CHANGED,
MGMT_EV_PHY_CONFIGURATION_CHANGED, MGMT_EV_PHY_CONFIGURATION_CHANGED,
MGMT_EV_EXP_FEATURE_CHANGED, MGMT_EV_EXP_FEATURE_CHANGED,
MGMT_EV_DEVICE_FLAGS_CHANGED,
}; };
static const u16 mgmt_untrusted_commands[] = { static const u16 mgmt_untrusted_commands[] = {
@ -162,6 +174,8 @@ static const u16 mgmt_untrusted_commands[] = {
MGMT_OP_READ_EXT_INFO, MGMT_OP_READ_EXT_INFO,
MGMT_OP_READ_SECURITY_INFO, MGMT_OP_READ_SECURITY_INFO,
MGMT_OP_READ_EXP_FEATURES_INFO, MGMT_OP_READ_EXP_FEATURES_INFO,
MGMT_OP_READ_DEF_SYSTEM_CONFIG,
MGMT_OP_READ_DEF_RUNTIME_CONFIG,
}; };
static const u16 mgmt_untrusted_events[] = { static const u16 mgmt_untrusted_events[] = {
@ -177,6 +191,8 @@ static const u16 mgmt_untrusted_events[] = {
MGMT_EV_EXT_INDEX_REMOVED, MGMT_EV_EXT_INDEX_REMOVED,
MGMT_EV_EXT_INFO_CHANGED, MGMT_EV_EXT_INFO_CHANGED,
MGMT_EV_EXP_FEATURE_CHANGED, MGMT_EV_EXP_FEATURE_CHANGED,
MGMT_EV_ADV_MONITOR_ADDED,
MGMT_EV_ADV_MONITOR_REMOVED,
}; };
#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000) #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
@ -779,10 +795,15 @@ static u32 get_supported_settings(struct hci_dev *hdev)
if (lmp_le_capable(hdev)) { if (lmp_le_capable(hdev)) {
settings |= MGMT_SETTING_LE; settings |= MGMT_SETTING_LE;
settings |= MGMT_SETTING_ADVERTISING;
settings |= MGMT_SETTING_SECURE_CONN; settings |= MGMT_SETTING_SECURE_CONN;
settings |= MGMT_SETTING_PRIVACY; settings |= MGMT_SETTING_PRIVACY;
settings |= MGMT_SETTING_STATIC_ADDRESS; settings |= MGMT_SETTING_STATIC_ADDRESS;
/* When the experimental feature for LL Privacy support is
* enabled, then advertising is no longer supported.
*/
if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
settings |= MGMT_SETTING_ADVERTISING;
} }
if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
@ -2915,7 +2936,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
if (cp->addr.type == BDADDR_BREDR) { if (cp->addr.type == BDADDR_BREDR) {
conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level, conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
auth_type); auth_type, CONN_REASON_PAIR_DEVICE);
} else { } else {
u8 addr_type = le_addr_type(cp->addr.type); u8 addr_type = le_addr_type(cp->addr.type);
struct hci_conn_params *p; struct hci_conn_params *p;
@ -2934,9 +2955,9 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT) if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
p->auto_connect = HCI_AUTO_CONN_DISABLED; p->auto_connect = HCI_AUTO_CONN_DISABLED;
conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
addr_type, sec_level, sec_level, HCI_LE_CONN_TIMEOUT,
HCI_LE_CONN_TIMEOUT); CONN_REASON_PAIR_DEVICE);
} }
if (IS_ERR(conn)) { if (IS_ERR(conn)) {
@ -3037,6 +3058,20 @@ static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0, err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
addr, sizeof(*addr)); addr, sizeof(*addr));
/* Since user doesn't want to proceed with the connection, abort any
* ongoing pairing and then terminate the link if it was created
* because of the pair device action.
*/
if (addr->type == BDADDR_BREDR)
hci_remove_link_key(hdev, &addr->bdaddr);
else
smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
le_addr_type(addr->type));
if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
unlock: unlock:
hci_dev_unlock(hdev); hci_dev_unlock(hdev);
return err; return err;
@ -3723,12 +3758,25 @@ static const u8 debug_uuid[16] = {
}; };
#endif #endif
/* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
static const u8 simult_central_periph_uuid[16] = {
0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
};
/* 15c0a148-c273-11ea-b3de-0242ac130004 */
static const u8 rpa_resolution_uuid[16] = {
0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
};
static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
void *data, u16 data_len) void *data, u16 data_len)
{ {
char buf[42]; char buf[62]; /* Enough space for 3 features */
struct mgmt_rp_read_exp_features_info *rp = (void *)buf; struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
u16 idx = 0; u16 idx = 0;
u32 flags;
bt_dev_dbg(hdev, "sock %p", sk); bt_dev_dbg(hdev, "sock %p", sk);
@ -3736,7 +3784,7 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
#ifdef CONFIG_BT_FEATURE_DEBUG #ifdef CONFIG_BT_FEATURE_DEBUG
if (!hdev) { if (!hdev) {
u32 flags = bt_dbg_get() ? BIT(0) : 0; flags = bt_dbg_get() ? BIT(0) : 0;
memcpy(rp->features[idx].uuid, debug_uuid, 16); memcpy(rp->features[idx].uuid, debug_uuid, 16);
rp->features[idx].flags = cpu_to_le32(flags); rp->features[idx].flags = cpu_to_le32(flags);
@ -3744,6 +3792,31 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
} }
#endif #endif
if (hdev) {
if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
(hdev->le_states[4] & 0x08) && /* Central */
(hdev->le_states[4] & 0x40) && /* Peripheral */
(hdev->le_states[3] & 0x10)) /* Simultaneous */
flags = BIT(0);
else
flags = 0;
memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
rp->features[idx].flags = cpu_to_le32(flags);
idx++;
}
if (hdev && use_ll_privacy(hdev)) {
if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
flags = BIT(0) | BIT(1);
else
flags = BIT(1);
memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
rp->features[idx].flags = cpu_to_le32(flags);
idx++;
}
rp->feature_count = cpu_to_le16(idx); rp->feature_count = cpu_to_le16(idx);
/* After reading the experimental features information, enable /* After reading the experimental features information, enable
@ -3756,6 +3829,21 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
0, rp, sizeof(*rp) + (20 * idx)); 0, rp, sizeof(*rp) + (20 * idx));
} }
static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
struct sock *skip)
{
struct mgmt_ev_exp_feature_changed ev;
memset(&ev, 0, sizeof(ev));
memcpy(ev.uuid, rpa_resolution_uuid, 16);
ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
&ev, sizeof(ev),
HCI_MGMT_EXP_FEATURE_EVENTS, skip);
}
#ifdef CONFIG_BT_FEATURE_DEBUG #ifdef CONFIG_BT_FEATURE_DEBUG
static int exp_debug_feature_changed(bool enabled, struct sock *skip) static int exp_debug_feature_changed(bool enabled, struct sock *skip)
{ {
@ -3794,6 +3882,16 @@ static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
} }
#endif #endif
if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
bool changed = hci_dev_test_flag(hdev,
HCI_ENABLE_LL_PRIVACY);
hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
if (changed)
exp_ll_privacy_feature_changed(false, hdev, sk);
}
hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE, return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
@ -3844,11 +3942,401 @@ static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
} }
#endif #endif
if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) {
bool val, changed;
int err;
u32 flags;
/* Command requires to use the controller index */
if (!hdev)
return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
MGMT_OP_SET_EXP_FEATURE,
MGMT_STATUS_INVALID_INDEX);
/* Changes can only be made when controller is powered down */
if (hdev_is_powered(hdev))
return mgmt_cmd_status(sk, hdev->id,
MGMT_OP_SET_EXP_FEATURE,
MGMT_STATUS_NOT_POWERED);
/* Parameters are limited to a single octet */
if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
return mgmt_cmd_status(sk, hdev->id,
MGMT_OP_SET_EXP_FEATURE,
MGMT_STATUS_INVALID_PARAMS);
/* Only boolean on/off is supported */
if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
return mgmt_cmd_status(sk, hdev->id,
MGMT_OP_SET_EXP_FEATURE,
MGMT_STATUS_INVALID_PARAMS);
val = !!cp->param[0];
if (val) {
changed = !hci_dev_test_flag(hdev,
HCI_ENABLE_LL_PRIVACY);
hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
hci_dev_clear_flag(hdev, HCI_ADVERTISING);
/* Enable LL privacy + supported settings changed */
flags = BIT(0) | BIT(1);
} else {
changed = hci_dev_test_flag(hdev,
HCI_ENABLE_LL_PRIVACY);
hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
/* Disable LL privacy + supported settings changed */
flags = BIT(1);
}
memcpy(rp.uuid, rpa_resolution_uuid, 16);
rp.flags = cpu_to_le32(flags);
hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
err = mgmt_cmd_complete(sk, hdev->id,
MGMT_OP_SET_EXP_FEATURE, 0,
&rp, sizeof(rp));
if (changed)
exp_ll_privacy_feature_changed(val, hdev, sk);
return err;
}
return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE, return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
MGMT_OP_SET_EXP_FEATURE, MGMT_OP_SET_EXP_FEATURE,
MGMT_STATUS_NOT_SUPPORTED); MGMT_STATUS_NOT_SUPPORTED);
} }
#define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len)
{
struct mgmt_cp_get_device_flags *cp = data;
struct mgmt_rp_get_device_flags rp;
struct bdaddr_list_with_flags *br_params;
struct hci_conn_params *params;
u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
u32 current_flags = 0;
u8 status = MGMT_STATUS_INVALID_PARAMS;
bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
&cp->addr.bdaddr, cp->addr.type);
hci_dev_lock(hdev);
if (cp->addr.type == BDADDR_BREDR) {
br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
&cp->addr.bdaddr,
cp->addr.type);
if (!br_params)
goto done;
current_flags = br_params->current_flags;
} else {
params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
le_addr_type(cp->addr.type));
if (!params)
goto done;
current_flags = params->current_flags;
}
bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
rp.addr.type = cp->addr.type;
rp.supported_flags = cpu_to_le32(supported_flags);
rp.current_flags = cpu_to_le32(current_flags);
status = MGMT_STATUS_SUCCESS;
done:
hci_dev_unlock(hdev);
return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
&rp, sizeof(rp));
}
static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
bdaddr_t *bdaddr, u8 bdaddr_type,
u32 supported_flags, u32 current_flags)
{
struct mgmt_ev_device_flags_changed ev;
bacpy(&ev.addr.bdaddr, bdaddr);
ev.addr.type = bdaddr_type;
ev.supported_flags = cpu_to_le32(supported_flags);
ev.current_flags = cpu_to_le32(current_flags);
mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
}
static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
struct mgmt_cp_set_device_flags *cp = data;
struct bdaddr_list_with_flags *br_params;
struct hci_conn_params *params;
u8 status = MGMT_STATUS_INVALID_PARAMS;
u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
u32 current_flags = __le32_to_cpu(cp->current_flags);
bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
&cp->addr.bdaddr, cp->addr.type,
__le32_to_cpu(current_flags));
if ((supported_flags | current_flags) != supported_flags) {
bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
current_flags, supported_flags);
goto done;
}
hci_dev_lock(hdev);
if (cp->addr.type == BDADDR_BREDR) {
br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
&cp->addr.bdaddr,
cp->addr.type);
if (br_params) {
br_params->current_flags = current_flags;
status = MGMT_STATUS_SUCCESS;
} else {
bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
&cp->addr.bdaddr, cp->addr.type);
}
} else {
params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
le_addr_type(cp->addr.type));
if (params) {
params->current_flags = current_flags;
status = MGMT_STATUS_SUCCESS;
} else {
bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
&cp->addr.bdaddr,
le_addr_type(cp->addr.type));
}
}
done:
hci_dev_unlock(hdev);
if (status == MGMT_STATUS_SUCCESS)
device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
supported_flags, current_flags);
return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
&cp->addr, sizeof(cp->addr));
}
static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
u16 handle)
{
struct mgmt_ev_adv_monitor_added ev;
ev.monitor_handle = cpu_to_le16(handle);
mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
}
static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
u16 handle)
{
struct mgmt_ev_adv_monitor_added ev;
ev.monitor_handle = cpu_to_le16(handle);
mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
}
static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
struct adv_monitor *monitor = NULL;
struct mgmt_rp_read_adv_monitor_features *rp = NULL;
int handle;
size_t rp_size = 0;
__u32 supported = 0;
__u16 num_handles = 0;
__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
BT_DBG("request for %s", hdev->name);
hci_dev_lock(hdev);
if (msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR)
supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) {
handles[num_handles++] = monitor->handle;
}
hci_dev_unlock(hdev);
rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
rp = kmalloc(rp_size, GFP_KERNEL);
if (!rp)
return -ENOMEM;
/* Once controller-based monitoring is in place, the enabled_features
* should reflect the use.
*/
rp->supported_features = cpu_to_le32(supported);
rp->enabled_features = 0;
rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
rp->num_handles = cpu_to_le16(num_handles);
if (num_handles)
memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
return mgmt_cmd_complete(sk, hdev->id,
MGMT_OP_READ_ADV_MONITOR_FEATURES,
MGMT_STATUS_SUCCESS, rp, rp_size);
}
static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
struct mgmt_cp_add_adv_patterns_monitor *cp = data;
struct mgmt_rp_add_adv_patterns_monitor rp;
struct adv_monitor *m = NULL;
struct adv_pattern *p = NULL;
unsigned int mp_cnt = 0, prev_adv_monitors_cnt;
__u8 cp_ofst = 0, cp_len = 0;
int err, i;
BT_DBG("request for %s", hdev->name);
if (len <= sizeof(*cp) || cp->pattern_count == 0) {
err = mgmt_cmd_status(sk, hdev->id,
MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
MGMT_STATUS_INVALID_PARAMS);
goto failed;
}
m = kmalloc(sizeof(*m), GFP_KERNEL);
if (!m) {
err = -ENOMEM;
goto failed;
}
INIT_LIST_HEAD(&m->patterns);
m->active = false;
for (i = 0; i < cp->pattern_count; i++) {
if (++mp_cnt > HCI_MAX_ADV_MONITOR_NUM_PATTERNS) {
err = mgmt_cmd_status(sk, hdev->id,
MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
MGMT_STATUS_INVALID_PARAMS);
goto failed;
}
cp_ofst = cp->patterns[i].offset;
cp_len = cp->patterns[i].length;
if (cp_ofst >= HCI_MAX_AD_LENGTH ||
cp_len > HCI_MAX_AD_LENGTH ||
(cp_ofst + cp_len) > HCI_MAX_AD_LENGTH) {
err = mgmt_cmd_status(sk, hdev->id,
MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
MGMT_STATUS_INVALID_PARAMS);
goto failed;
}
p = kmalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
err = -ENOMEM;
goto failed;
}
p->ad_type = cp->patterns[i].ad_type;
p->offset = cp->patterns[i].offset;
p->length = cp->patterns[i].length;
memcpy(p->value, cp->patterns[i].value, p->length);
INIT_LIST_HEAD(&p->list);
list_add(&p->list, &m->patterns);
}
if (mp_cnt != cp->pattern_count) {
err = mgmt_cmd_status(sk, hdev->id,
MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
MGMT_STATUS_INVALID_PARAMS);
goto failed;
}
hci_dev_lock(hdev);
prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
err = hci_add_adv_monitor(hdev, m);
if (err) {
if (err == -ENOSPC) {
mgmt_cmd_status(sk, hdev->id,
MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
MGMT_STATUS_NO_RESOURCES);
}
goto unlock;
}
if (hdev->adv_monitors_cnt > prev_adv_monitors_cnt)
mgmt_adv_monitor_added(sk, hdev, m->handle);
hci_dev_unlock(hdev);
rp.monitor_handle = cpu_to_le16(m->handle);
return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
unlock:
hci_dev_unlock(hdev);
failed:
hci_free_adv_monitor(m);
return err;
}
static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
struct mgmt_cp_remove_adv_monitor *cp = data;
struct mgmt_rp_remove_adv_monitor rp;
unsigned int prev_adv_monitors_cnt;
u16 handle;
int err;
BT_DBG("request for %s", hdev->name);
hci_dev_lock(hdev);
handle = __le16_to_cpu(cp->monitor_handle);
prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
err = hci_remove_adv_monitor(hdev, handle);
if (err == -ENOENT) {
err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
MGMT_STATUS_INVALID_INDEX);
goto unlock;
}
if (hdev->adv_monitors_cnt < prev_adv_monitors_cnt)
mgmt_adv_monitor_removed(sk, hdev, handle);
hci_dev_unlock(hdev);
rp.monitor_handle = cp->monitor_handle;
return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
unlock:
hci_dev_unlock(hdev);
return err;
}
static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status, static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
u16 opcode, struct sk_buff *skb) u16 opcode, struct sk_buff *skb)
{ {
@ -4147,7 +4635,7 @@ static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
*mgmt_status = mgmt_le_support(hdev); *mgmt_status = mgmt_le_support(hdev);
if (*mgmt_status) if (*mgmt_status)
return false; return false;
/* Intentional fall-through */ fallthrough;
case DISCOV_TYPE_BREDR: case DISCOV_TYPE_BREDR:
*mgmt_status = mgmt_bredr_support(hdev); *mgmt_status = mgmt_bredr_support(hdev);
if (*mgmt_status) if (*mgmt_status)
@ -4662,6 +5150,13 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
status); status);
/* Enabling the experimental LL Privay support disables support for
* advertising.
*/
if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
MGMT_STATUS_NOT_SUPPORTED);
if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
MGMT_STATUS_INVALID_PARAMS); MGMT_STATUS_INVALID_PARAMS);
@ -4848,7 +5343,7 @@ static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
hci_req_init(&req, hdev); hci_req_init(&req, hdev);
hci_req_add_le_scan_disable(&req); hci_req_add_le_scan_disable(&req, false);
hci_req_add_le_passive_scan(&req); hci_req_add_le_passive_scan(&req);
hci_req_run(&req, NULL); hci_req_run(&req, NULL);
@ -5523,7 +6018,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
case MGMT_LTK_P256_DEBUG: case MGMT_LTK_P256_DEBUG:
authenticated = 0x00; authenticated = 0x00;
type = SMP_LTK_P256_DEBUG; type = SMP_LTK_P256_DEBUG;
/* fall through */ fallthrough;
default: default:
continue; continue;
} }
@ -5966,7 +6461,9 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
{ {
struct mgmt_cp_add_device *cp = data; struct mgmt_cp_add_device *cp = data;
u8 auto_conn, addr_type; u8 auto_conn, addr_type;
struct hci_conn_params *params;
int err; int err;
u32 current_flags = 0;
bt_dev_dbg(hdev, "sock %p", sk); bt_dev_dbg(hdev, "sock %p", sk);
@ -5993,8 +6490,9 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
goto unlock; goto unlock;
} }
err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr, err = hci_bdaddr_list_add_with_flags(&hdev->whitelist,
cp->addr.type); &cp->addr.bdaddr,
cp->addr.type, 0);
if (err) if (err)
goto unlock; goto unlock;
@ -6033,12 +6531,19 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
MGMT_STATUS_FAILED, &cp->addr, MGMT_STATUS_FAILED, &cp->addr,
sizeof(cp->addr)); sizeof(cp->addr));
goto unlock; goto unlock;
} else {
params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
addr_type);
if (params)
current_flags = params->current_flags;
} }
hci_update_background_scan(hdev); hci_update_background_scan(hdev);
added: added:
device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action); device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
SUPPORTED_DEVICE_FLAGS(), current_flags);
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
MGMT_STATUS_SUCCESS, &cp->addr, MGMT_STATUS_SUCCESS, &cp->addr,
@ -6724,6 +7229,13 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES, return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
MGMT_STATUS_REJECTED); MGMT_STATUS_REJECTED);
/* Enabling the experimental LL Privay support disables support for
* advertising.
*/
if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
MGMT_STATUS_NOT_SUPPORTED);
hci_dev_lock(hdev); hci_dev_lock(hdev);
rp_len = sizeof(*rp) + hdev->adv_instance_cnt; rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
@ -6927,6 +7439,13 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
status); status);
/* Enabling the experimental LL Privay support disables support for
* advertising.
*/
if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
MGMT_STATUS_NOT_SUPPORTED);
if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES) if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
MGMT_STATUS_INVALID_PARAMS); MGMT_STATUS_INVALID_PARAMS);
@ -7091,6 +7610,13 @@ static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
bt_dev_dbg(hdev, "sock %p", sk); bt_dev_dbg(hdev, "sock %p", sk);
/* Enabling the experimental LL Privay support disables support for
* advertising.
*/
if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
MGMT_STATUS_NOT_SUPPORTED);
hci_dev_lock(hdev); hci_dev_lock(hdev);
if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) { if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
@ -7116,6 +7642,12 @@ static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
hci_req_init(&req, hdev); hci_req_init(&req, hdev);
/* If we use extended advertising, instance is disabled and removed */
if (ext_adv_capable(hdev)) {
__hci_req_disable_ext_adv_instance(&req, cp->instance);
__hci_req_remove_ext_adv_instance(&req, cp->instance);
}
hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true); hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
if (list_empty(&hdev->adv_instances)) if (list_empty(&hdev->adv_instances))
@ -7297,6 +7829,20 @@ static const struct hci_mgmt_handler mgmt_handlers[] = {
{ set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE, { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
HCI_MGMT_VAR_LEN | HCI_MGMT_VAR_LEN |
HCI_MGMT_HDEV_OPTIONAL }, HCI_MGMT_HDEV_OPTIONAL },
{ read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
HCI_MGMT_UNTRUSTED },
{ set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
HCI_MGMT_VAR_LEN },
{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
HCI_MGMT_UNTRUSTED },
{ set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
HCI_MGMT_VAR_LEN },
{ get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
{ set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
{ read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
HCI_MGMT_VAR_LEN },
{ remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
}; };
void mgmt_index_added(struct hci_dev *hdev) void mgmt_index_added(struct hci_dev *hdev)
@ -8216,8 +8762,11 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
if (!hci_discovery_active(hdev)) { if (!hci_discovery_active(hdev)) {
if (link_type == ACL_LINK) if (link_type == ACL_LINK)
return; return;
if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports)) if (link_type == LE_LINK &&
list_empty(&hdev->pend_le_reports) &&
!hci_is_adv_monitoring(hdev)) {
return; return;
}
} }
if (hdev->discovery.result_filtering) { if (hdev->discovery.result_filtering) {

283
net/bluetooth/mgmt_config.c Normal file
View File

@ -0,0 +1,283 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 Google Corporation
*/
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/mgmt.h>
#include "mgmt_util.h"
#include "mgmt_config.h"
#define HDEV_PARAM_U16(_param_code_, _param_name_) \
{ \
{ cpu_to_le16(_param_code_), sizeof(__u16) }, \
{ cpu_to_le16(hdev->_param_name_) } \
}
#define HDEV_PARAM_U16_JIFFIES_TO_MSECS(_param_code_, _param_name_) \
{ \
{ cpu_to_le16(_param_code_), sizeof(__u16) }, \
{ cpu_to_le16(jiffies_to_msecs(hdev->_param_name_)) } \
}
int read_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len)
{
struct {
struct mgmt_tlv entry;
union {
/* This is a simplification for now since all values
* are 16 bits. In the future, this code may need
* refactoring to account for variable length values
* and properly calculate the required buffer size.
*/
__le16 value;
};
} __packed params[] = {
/* Please see mgmt-api.txt for documentation of these values */
HDEV_PARAM_U16(0x0000, def_page_scan_type),
HDEV_PARAM_U16(0x0001, def_page_scan_int),
HDEV_PARAM_U16(0x0002, def_page_scan_window),
HDEV_PARAM_U16(0x0003, def_inq_scan_type),
HDEV_PARAM_U16(0x0004, def_inq_scan_int),
HDEV_PARAM_U16(0x0005, def_inq_scan_window),
HDEV_PARAM_U16(0x0006, def_br_lsto),
HDEV_PARAM_U16(0x0007, def_page_timeout),
HDEV_PARAM_U16(0x0008, sniff_min_interval),
HDEV_PARAM_U16(0x0009, sniff_max_interval),
HDEV_PARAM_U16(0x000a, le_adv_min_interval),
HDEV_PARAM_U16(0x000b, le_adv_max_interval),
HDEV_PARAM_U16(0x000c, def_multi_adv_rotation_duration),
HDEV_PARAM_U16(0x000d, le_scan_interval),
HDEV_PARAM_U16(0x000e, le_scan_window),
HDEV_PARAM_U16(0x000f, le_scan_int_suspend),
HDEV_PARAM_U16(0x0010, le_scan_window_suspend),
HDEV_PARAM_U16(0x0011, le_scan_int_discovery),
HDEV_PARAM_U16(0x0012, le_scan_window_discovery),
HDEV_PARAM_U16(0x0013, le_scan_int_adv_monitor),
HDEV_PARAM_U16(0x0014, le_scan_window_adv_monitor),
HDEV_PARAM_U16(0x0015, le_scan_int_connect),
HDEV_PARAM_U16(0x0016, le_scan_window_connect),
HDEV_PARAM_U16(0x0017, le_conn_min_interval),
HDEV_PARAM_U16(0x0018, le_conn_max_interval),
HDEV_PARAM_U16(0x0019, le_conn_latency),
HDEV_PARAM_U16(0x001a, le_supv_timeout),
HDEV_PARAM_U16_JIFFIES_TO_MSECS(0x001b,
def_le_autoconnect_timeout),
};
struct mgmt_rp_read_def_system_config *rp = (void *)params;
bt_dev_dbg(hdev, "sock %p", sk);
return mgmt_cmd_complete(sk, hdev->id,
MGMT_OP_READ_DEF_SYSTEM_CONFIG,
0, rp, sizeof(params));
}
#define TO_TLV(x) ((struct mgmt_tlv *)(x))
#define TLV_GET_LE16(tlv) le16_to_cpu(*((__le16 *)(TO_TLV(tlv)->value)))
int set_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len)
{
u16 buffer_left = data_len;
u8 *buffer = data;
if (buffer_left < sizeof(struct mgmt_tlv)) {
return mgmt_cmd_status(sk, hdev->id,
MGMT_OP_SET_DEF_SYSTEM_CONFIG,
MGMT_STATUS_INVALID_PARAMS);
}
/* First pass to validate the tlv */
while (buffer_left >= sizeof(struct mgmt_tlv)) {
const u8 len = TO_TLV(buffer)->length;
const u16 exp_len = sizeof(struct mgmt_tlv) +
len;
const u16 type = le16_to_cpu(TO_TLV(buffer)->type);
if (buffer_left < exp_len) {
bt_dev_warn(hdev, "invalid len left %d, exp >= %d",
buffer_left, exp_len);
return mgmt_cmd_status(sk, hdev->id,
MGMT_OP_SET_DEF_SYSTEM_CONFIG,
MGMT_STATUS_INVALID_PARAMS);
}
/* Please see mgmt-api.txt for documentation of these values */
switch (type) {
case 0x0000:
case 0x0001:
case 0x0002:
case 0x0003:
case 0x0004:
case 0x0005:
case 0x0006:
case 0x0007:
case 0x0008:
case 0x0009:
case 0x000a:
case 0x000b:
case 0x000c:
case 0x000d:
case 0x000e:
case 0x000f:
case 0x0010:
case 0x0011:
case 0x0012:
case 0x0013:
case 0x0014:
case 0x0015:
case 0x0016:
case 0x0017:
case 0x0018:
case 0x0019:
case 0x001a:
case 0x001b:
if (len != sizeof(u16)) {
bt_dev_warn(hdev, "invalid length %d, exp %zu for type %d",
len, sizeof(u16), type);
return mgmt_cmd_status(sk, hdev->id,
MGMT_OP_SET_DEF_SYSTEM_CONFIG,
MGMT_STATUS_INVALID_PARAMS);
}
break;
default:
bt_dev_warn(hdev, "unsupported parameter %u", type);
break;
}
buffer_left -= exp_len;
buffer += exp_len;
}
buffer_left = data_len;
buffer = data;
while (buffer_left >= sizeof(struct mgmt_tlv)) {
const u8 len = TO_TLV(buffer)->length;
const u16 exp_len = sizeof(struct mgmt_tlv) +
len;
const u16 type = le16_to_cpu(TO_TLV(buffer)->type);
switch (type) {
case 0x0000:
hdev->def_page_scan_type = TLV_GET_LE16(buffer);
break;
case 0x0001:
hdev->def_page_scan_int = TLV_GET_LE16(buffer);
break;
case 0x0002:
hdev->def_page_scan_window = TLV_GET_LE16(buffer);
break;
case 0x0003:
hdev->def_inq_scan_type = TLV_GET_LE16(buffer);
break;
case 0x0004:
hdev->def_inq_scan_int = TLV_GET_LE16(buffer);
break;
case 0x0005:
hdev->def_inq_scan_window = TLV_GET_LE16(buffer);
break;
case 0x0006:
hdev->def_br_lsto = TLV_GET_LE16(buffer);
break;
case 0x0007:
hdev->def_page_timeout = TLV_GET_LE16(buffer);
break;
case 0x0008:
hdev->sniff_min_interval = TLV_GET_LE16(buffer);
break;
case 0x0009:
hdev->sniff_max_interval = TLV_GET_LE16(buffer);
break;
case 0x000a:
hdev->le_adv_min_interval = TLV_GET_LE16(buffer);
break;
case 0x000b:
hdev->le_adv_max_interval = TLV_GET_LE16(buffer);
break;
case 0x000c:
hdev->def_multi_adv_rotation_duration =
TLV_GET_LE16(buffer);
break;
case 0x000d:
hdev->le_scan_interval = TLV_GET_LE16(buffer);
break;
case 0x000e:
hdev->le_scan_window = TLV_GET_LE16(buffer);
break;
case 0x000f:
hdev->le_scan_int_suspend = TLV_GET_LE16(buffer);
break;
case 0x0010:
hdev->le_scan_window_suspend = TLV_GET_LE16(buffer);
break;
case 0x0011:
hdev->le_scan_int_discovery = TLV_GET_LE16(buffer);
break;
case 0x00012:
hdev->le_scan_window_discovery = TLV_GET_LE16(buffer);
break;
case 0x00013:
hdev->le_scan_int_adv_monitor = TLV_GET_LE16(buffer);
break;
case 0x00014:
hdev->le_scan_window_adv_monitor = TLV_GET_LE16(buffer);
break;
case 0x00015:
hdev->le_scan_int_connect = TLV_GET_LE16(buffer);
break;
case 0x00016:
hdev->le_scan_window_connect = TLV_GET_LE16(buffer);
break;
case 0x00017:
hdev->le_conn_min_interval = TLV_GET_LE16(buffer);
break;
case 0x00018:
hdev->le_conn_max_interval = TLV_GET_LE16(buffer);
break;
case 0x00019:
hdev->le_conn_latency = TLV_GET_LE16(buffer);
break;
case 0x0001a:
hdev->le_supv_timeout = TLV_GET_LE16(buffer);
break;
case 0x0001b:
hdev->def_le_autoconnect_timeout =
msecs_to_jiffies(TLV_GET_LE16(buffer));
break;
default:
bt_dev_warn(hdev, "unsupported parameter %u", type);
break;
}
buffer_left -= exp_len;
buffer += exp_len;
}
return mgmt_cmd_complete(sk, hdev->id,
MGMT_OP_SET_DEF_SYSTEM_CONFIG, 0, NULL, 0);
}
int read_def_runtime_config(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len)
{
bt_dev_dbg(hdev, "sock %p", sk);
return mgmt_cmd_complete(sk, hdev->id,
MGMT_OP_READ_DEF_RUNTIME_CONFIG, 0, NULL, 0);
}
int set_def_runtime_config(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len)
{
bt_dev_dbg(hdev, "sock %p", sk);
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEF_SYSTEM_CONFIG,
MGMT_STATUS_INVALID_PARAMS);
}

View File

@ -0,0 +1,17 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 Google Corporation
*/
int read_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len);
int set_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len);
int read_def_runtime_config(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len);
int set_def_runtime_config(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len);

View File

@ -139,3 +139,10 @@ void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb)
bt_dev_dbg(hdev, "MSFT vendor event %u", event); bt_dev_dbg(hdev, "MSFT vendor event %u", event);
} }
__u64 msft_get_features(struct hci_dev *hdev)
{
struct msft_data *msft = hdev->msft_data;
return msft ? msft->features : 0;
}

View File

@ -3,16 +3,25 @@
* Copyright (C) 2020 Google Corporation * Copyright (C) 2020 Google Corporation
*/ */
#define MSFT_FEATURE_MASK_BREDR_RSSI_MONITOR BIT(0)
#define MSFT_FEATURE_MASK_LE_CONN_RSSI_MONITOR BIT(1)
#define MSFT_FEATURE_MASK_LE_ADV_RSSI_MONITOR BIT(2)
#define MSFT_FEATURE_MASK_LE_ADV_MONITOR BIT(3)
#define MSFT_FEATURE_MASK_CURVE_VALIDITY BIT(4)
#define MSFT_FEATURE_MASK_CONCURRENT_ADV_MONITOR BIT(5)
#if IS_ENABLED(CONFIG_BT_MSFTEXT) #if IS_ENABLED(CONFIG_BT_MSFTEXT)
void msft_do_open(struct hci_dev *hdev); void msft_do_open(struct hci_dev *hdev);
void msft_do_close(struct hci_dev *hdev); void msft_do_close(struct hci_dev *hdev);
void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb); void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb);
__u64 msft_get_features(struct hci_dev *hdev);
#else #else
static inline void msft_do_open(struct hci_dev *hdev) {} static inline void msft_do_open(struct hci_dev *hdev) {}
static inline void msft_do_close(struct hci_dev *hdev) {} static inline void msft_do_close(struct hci_dev *hdev) {}
static inline void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb) {} static inline void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb) {}
static inline __u64 msft_get_features(struct hci_dev *hdev) { return 0; }
#endif #endif

View File

@ -479,7 +479,7 @@ static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err)
/* if closing a dlc in a session that hasn't been started, /* if closing a dlc in a session that hasn't been started,
* just close and unlink the dlc * just close and unlink the dlc
*/ */
/* fall through */ fallthrough;
default: default:
rfcomm_dlc_clear_timer(d); rfcomm_dlc_clear_timer(d);

View File

@ -218,7 +218,7 @@ static void __rfcomm_sock_close(struct sock *sk)
case BT_CONFIG: case BT_CONFIG:
case BT_CONNECTED: case BT_CONNECTED:
rfcomm_dlc_close(d, 0); rfcomm_dlc_close(d, 0);
/* fall through */ fallthrough;
default: default:
sock_set_flag(sk, SOCK_ZAPPED); sock_set_flag(sk, SOCK_ZAPPED);

View File

@ -66,6 +66,7 @@ struct sco_pinfo {
bdaddr_t dst; bdaddr_t dst;
__u32 flags; __u32 flags;
__u16 setting; __u16 setting;
__u8 cmsg_mask;
struct sco_conn *conn; struct sco_conn *conn;
}; };
@ -449,6 +450,15 @@ static void sco_sock_close(struct sock *sk)
sco_sock_kill(sk); sco_sock_kill(sk);
} }
static void sco_skb_put_cmsg(struct sk_buff *skb, struct msghdr *msg,
struct sock *sk)
{
if (sco_pi(sk)->cmsg_mask & SCO_CMSG_PKT_STATUS)
put_cmsg(msg, SOL_BLUETOOTH, BT_SCM_PKT_STATUS,
sizeof(bt_cb(skb)->sco.pkt_status),
&bt_cb(skb)->sco.pkt_status);
}
static void sco_sock_init(struct sock *sk, struct sock *parent) static void sco_sock_init(struct sock *sk, struct sock *parent)
{ {
BT_DBG("sk %p", sk); BT_DBG("sk %p", sk);
@ -457,6 +467,8 @@ static void sco_sock_init(struct sock *sk, struct sock *parent)
sk->sk_type = parent->sk_type; sk->sk_type = parent->sk_type;
bt_sk(sk)->flags = bt_sk(parent)->flags; bt_sk(sk)->flags = bt_sk(parent)->flags;
security_sk_clone(parent, sk); security_sk_clone(parent, sk);
} else {
bt_sk(sk)->skb_put_cmsg = sco_skb_put_cmsg;
} }
} }
@ -846,6 +858,18 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
sco_pi(sk)->setting = voice.setting; sco_pi(sk)->setting = voice.setting;
break; break;
case BT_PKT_STATUS:
if (get_user(opt, (u32 __user *)optval)) {
err = -EFAULT;
break;
}
if (opt)
sco_pi(sk)->cmsg_mask |= SCO_CMSG_PKT_STATUS;
else
sco_pi(sk)->cmsg_mask &= SCO_CMSG_PKT_STATUS;
break;
default: default:
err = -ENOPROTOOPT; err = -ENOPROTOOPT;
break; break;
@ -923,6 +947,7 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
int len, err = 0; int len, err = 0;
struct bt_voice voice; struct bt_voice voice;
u32 phys; u32 phys;
int pkt_status;
BT_DBG("sk %p", sk); BT_DBG("sk %p", sk);
@ -969,6 +994,13 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
err = -EFAULT; err = -EFAULT;
break; break;
case BT_PKT_STATUS:
pkt_status = (sco_pi(sk)->cmsg_mask & SCO_CMSG_PKT_STATUS);
if (put_user(pkt_status, (int __user *)optval))
err = -EFAULT;
break;
default: default:
err = -ENOPROTOOPT; err = -ENOPROTOOPT;
break; break;

View File

@ -205,7 +205,7 @@ static int __init test_ecdh(void)
calltime = ktime_get(); calltime = ktime_get();
tfm = crypto_alloc_kpp("ecdh", CRYPTO_ALG_INTERNAL, 0); tfm = crypto_alloc_kpp("ecdh", 0, 0);
if (IS_ERR(tfm)) { if (IS_ERR(tfm)) {
BT_ERR("Unable to create ECDH crypto context"); BT_ERR("Unable to create ECDH crypto context");
err = PTR_ERR(tfm); err = PTR_ERR(tfm);

View File

@ -1387,7 +1387,7 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
goto zfree_smp; goto zfree_smp;
} }
smp->tfm_ecdh = crypto_alloc_kpp("ecdh", CRYPTO_ALG_INTERNAL, 0); smp->tfm_ecdh = crypto_alloc_kpp("ecdh", 0, 0);
if (IS_ERR(smp->tfm_ecdh)) { if (IS_ERR(smp->tfm_ecdh)) {
BT_ERR("Unable to create ECDH crypto context"); BT_ERR("Unable to create ECDH crypto context");
goto free_shash; goto free_shash;
@ -1654,7 +1654,7 @@ int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)
memset(smp->tk, 0, sizeof(smp->tk)); memset(smp->tk, 0, sizeof(smp->tk));
BT_DBG("PassKey: %d", value); BT_DBG("PassKey: %d", value);
put_unaligned_le32(value, smp->tk); put_unaligned_le32(value, smp->tk);
/* Fall Through */ fallthrough;
case MGMT_OP_USER_CONFIRM_REPLY: case MGMT_OP_USER_CONFIRM_REPLY:
set_bit(SMP_FLAG_TK_VALID, &smp->flags); set_bit(SMP_FLAG_TK_VALID, &smp->flags);
break; break;
@ -3282,7 +3282,7 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid)
return ERR_CAST(tfm_cmac); return ERR_CAST(tfm_cmac);
} }
tfm_ecdh = crypto_alloc_kpp("ecdh", CRYPTO_ALG_INTERNAL, 0); tfm_ecdh = crypto_alloc_kpp("ecdh", 0, 0);
if (IS_ERR(tfm_ecdh)) { if (IS_ERR(tfm_ecdh)) {
BT_ERR("Unable to create ECDH crypto context"); BT_ERR("Unable to create ECDH crypto context");
crypto_free_shash(tfm_cmac); crypto_free_shash(tfm_cmac);
@ -3847,7 +3847,7 @@ int __init bt_selftest_smp(void)
return PTR_ERR(tfm_cmac); return PTR_ERR(tfm_cmac);
} }
tfm_ecdh = crypto_alloc_kpp("ecdh", CRYPTO_ALG_INTERNAL, 0); tfm_ecdh = crypto_alloc_kpp("ecdh", 0, 0);
if (IS_ERR(tfm_ecdh)) { if (IS_ERR(tfm_ecdh)) {
BT_ERR("Unable to create ECDH crypto context"); BT_ERR("Unable to create ECDH crypto context");
crypto_free_shash(tfm_cmac); crypto_free_shash(tfm_cmac);