Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next

Johan Hedberg says:

====================
pull request: bluetooth-next 2018-08-05

Here's the main bluetooth-next pull request for the 4.19 kernel.

 - Added support for Bluetooth Advertising Extensions
 - Added vendor driver support to hci_h5 HCI driver
 - Added serdev support to hci_h5 driver
 - Added support for Qualcomm wcn3990 controller
 - Added support for RTL8723BS and RTL8723DS controllers
 - btusb: Added new ID for Realtek 8723DE
 - Several other smaller fixes & cleanups

Please let me know if there are any issues pulling. Thanks.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-08-05 17:29:27 -07:00
commit 6277547f33
29 changed files with 3247 additions and 585 deletions

View File

@ -10,12 +10,25 @@ device the slave device is attached to.
Required properties: Required properties:
- compatible: should contain one of the following: - compatible: should contain one of the following:
* "qcom,qca6174-bt" * "qcom,qca6174-bt"
* "qcom,wcn3990-bt"
Optional properties for compatible string qcom,qca6174-bt:
Optional properties:
- enable-gpios: gpio specifier used to enable chip - enable-gpios: gpio specifier used to enable chip
- clocks: clock provided to the controller (SUSCLK_32KHZ) - clocks: clock provided to the controller (SUSCLK_32KHZ)
Example: Required properties for compatible string qcom,wcn3990-bt:
- vddio-supply: VDD_IO supply regulator handle.
- vddxo-supply: VDD_XO supply regulator handle.
- vddrf-supply: VDD_RF supply regulator handle.
- vddch0-supply: VDD_CH0 supply regulator handle.
Optional properties for compatible string qcom,wcn3990-bt:
- max-speed: see Documentation/devicetree/bindings/serial/slave-device.txt
Examples:
serial@7570000 { serial@7570000 {
label = "BT-UART"; label = "BT-UART";
@ -28,3 +41,15 @@ serial@7570000 {
clocks = <&divclk4>; clocks = <&divclk4>;
}; };
}; };
serial@898000 {
bluetooth {
compatible = "qcom,wcn3990-bt";
vddio-supply = <&vreg_s4a_1p8>;
vddxo-supply = <&vreg_l7a_1p8>;
vddrf-supply = <&vreg_l17a_1p3>;
vddch0-supply = <&vreg_l25a_3p3>;
max-speed = <3200000>;
};
};

View File

@ -159,6 +159,7 @@ config BT_HCIUART_LL
config BT_HCIUART_3WIRE config BT_HCIUART_3WIRE
bool "Three-wire UART (H5) protocol support" bool "Three-wire UART (H5) protocol support"
depends on BT_HCIUART depends on BT_HCIUART
depends on BT_HCIUART_SERDEV
help help
The HCI Three-wire UART Transport Layer makes it possible to The HCI Three-wire UART Transport Layer makes it possible to
user the Bluetooth HCI over a serial port interface. The HCI user the Bluetooth HCI over a serial port interface. The HCI

View File

@ -490,7 +490,7 @@ static int bfusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
count = skb->len; count = skb->len;
/* Max HCI frame size seems to be 1511 + 1 */ /* Max HCI frame size seems to be 1511 + 1 */
nskb = bt_skb_alloc(count + 32, GFP_ATOMIC); nskb = bt_skb_alloc(count + 32, GFP_KERNEL);
if (!nskb) { if (!nskb) {
BT_ERR("Can't allocate memory for new packet"); BT_ERR("Can't allocate memory for new packet");
return -ENOMEM; return -ENOMEM;

View File

@ -565,7 +565,7 @@ static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud)
/* Ericsson baud rate command */ /* Ericsson baud rate command */
unsigned char cmd[] = { HCI_COMMAND_PKT, 0x09, 0xfc, 0x01, 0x03 }; unsigned char cmd[] = { HCI_COMMAND_PKT, 0x09, 0xfc, 0x01, 0x03 };
skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC); skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_KERNEL);
if (!skb) { if (!skb) {
BT_ERR("Can't allocate mem for new packet"); BT_ERR("Can't allocate mem for new packet");
return -1; return -1;

View File

@ -289,7 +289,7 @@ static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
skb->dev = (void *) hdev; skb->dev = (void *) hdev;
urb = usb_alloc_urb(0, GFP_ATOMIC); urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) if (!urb)
return -ENOMEM; return -ENOMEM;
@ -298,7 +298,7 @@ static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
switch (hci_skb_pkt_type(skb)) { switch (hci_skb_pkt_type(skb)) {
case HCI_COMMAND_PKT: case HCI_COMMAND_PKT:
dr = kmalloc(sizeof(*dr), GFP_ATOMIC); dr = kmalloc(sizeof(*dr), GFP_KERNEL);
if (!dr) { if (!dr) {
usb_free_urb(urb); usb_free_urb(urb);
return -ENOMEM; return -ENOMEM;
@ -343,7 +343,7 @@ static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
usb_anchor_urb(urb, &data->tx_anchor); usb_anchor_urb(urb, &data->tx_anchor);
err = usb_submit_urb(urb, GFP_ATOMIC); err = usb_submit_urb(urb, GFP_KERNEL);
if (err < 0) { if (err < 0) {
bt_dev_err(hdev, "urb %p submission failed", urb); bt_dev_err(hdev, "urb %p submission failed", urb);
kfree(urb->setup_packet); kfree(urb->setup_packet);

View File

@ -718,7 +718,7 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
} }
/* Allocate buffer */ /* Allocate buffer */
skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_ATOMIC); skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_KERNEL);
if (!skb) { if (!skb) {
BT_ERR("No free skb"); BT_ERR("No free skb");
ret = -ENOMEM; ret = -ENOMEM;

View File

@ -27,7 +27,7 @@
#define VERSION "0.1" #define VERSION "0.1"
static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version) int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct edl_event_hdr *edl; struct edl_event_hdr *edl;
@ -35,36 +35,35 @@ static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version)
char cmd; char cmd;
int err = 0; int err = 0;
BT_DBG("%s: ROME Patch Version Request", hdev->name); bt_dev_dbg(hdev, "QCA Version Request");
cmd = EDL_PATCH_VER_REQ_CMD; cmd = EDL_PATCH_VER_REQ_CMD;
skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN, skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN,
&cmd, HCI_VENDOR_PKT, HCI_INIT_TIMEOUT); &cmd, HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) { if (IS_ERR(skb)) {
err = PTR_ERR(skb); err = PTR_ERR(skb);
BT_ERR("%s: Failed to read version of ROME (%d)", hdev->name, bt_dev_err(hdev, "Reading QCA version information failed (%d)",
err); err);
return err; return err;
} }
if (skb->len != sizeof(*edl) + sizeof(*ver)) { if (skb->len != sizeof(*edl) + sizeof(*ver)) {
BT_ERR("%s: Version size mismatch len %d", hdev->name, bt_dev_err(hdev, "QCA Version size mismatch len %d", skb->len);
skb->len);
err = -EILSEQ; err = -EILSEQ;
goto out; goto out;
} }
edl = (struct edl_event_hdr *)(skb->data); edl = (struct edl_event_hdr *)(skb->data);
if (!edl) { if (!edl) {
BT_ERR("%s: TLV with no header", hdev->name); bt_dev_err(hdev, "QCA TLV with no header");
err = -EILSEQ; err = -EILSEQ;
goto out; goto out;
} }
if (edl->cresp != EDL_CMD_REQ_RES_EVT || if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
edl->rtype != EDL_APP_VER_RES_EVT) { edl->rtype != EDL_APP_VER_RES_EVT) {
BT_ERR("%s: Wrong packet received %d %d", hdev->name, bt_dev_err(hdev, "QCA Wrong packet received %d %d", edl->cresp,
edl->cresp, edl->rtype); edl->rtype);
err = -EIO; err = -EIO;
goto out; goto out;
} }
@ -76,30 +75,35 @@ static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version)
BT_DBG("%s: ROM :0x%08x", hdev->name, le16_to_cpu(ver->rome_ver)); BT_DBG("%s: ROM :0x%08x", hdev->name, le16_to_cpu(ver->rome_ver));
BT_DBG("%s: SOC :0x%08x", hdev->name, le32_to_cpu(ver->soc_id)); BT_DBG("%s: SOC :0x%08x", hdev->name, le32_to_cpu(ver->soc_id));
/* ROME chipset version can be decided by patch and SoC /* QCA chipset version can be decided by patch and SoC
* version, combination with upper 2 bytes from SoC * version, combination with upper 2 bytes from SoC
* and lower 2 bytes from patch will be used. * and lower 2 bytes from patch will be used.
*/ */
*rome_version = (le32_to_cpu(ver->soc_id) << 16) | *soc_version = (le32_to_cpu(ver->soc_id) << 16) |
(le16_to_cpu(ver->rome_ver) & 0x0000ffff); (le16_to_cpu(ver->rome_ver) & 0x0000ffff);
if (*soc_version == 0)
err = -EILSEQ;
out: out:
kfree_skb(skb); kfree_skb(skb);
if (err)
bt_dev_err(hdev, "QCA Failed to get version (%d)", err);
return err; return err;
} }
EXPORT_SYMBOL_GPL(qca_read_soc_version);
static int rome_reset(struct hci_dev *hdev) static int qca_send_reset(struct hci_dev *hdev)
{ {
struct sk_buff *skb; struct sk_buff *skb;
int err; int err;
BT_DBG("%s: ROME HCI_RESET", hdev->name); bt_dev_dbg(hdev, "QCA HCI_RESET");
skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) { if (IS_ERR(skb)) {
err = PTR_ERR(skb); err = PTR_ERR(skb);
BT_ERR("%s: Reset failed (%d)", hdev->name, err); bt_dev_err(hdev, "QCA Reset failed (%d)", err);
return err; return err;
} }
@ -108,7 +112,7 @@ static int rome_reset(struct hci_dev *hdev)
return 0; return 0;
} }
static void rome_tlv_check_data(struct rome_config *config, static void qca_tlv_check_data(struct rome_config *config,
const struct firmware *fw) const struct firmware *fw)
{ {
const u8 *data; const u8 *data;
@ -207,7 +211,7 @@ static void rome_tlv_check_data(struct rome_config *config,
} }
} }
static int rome_tlv_send_segment(struct hci_dev *hdev, int seg_size, static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size,
const u8 *data, enum rome_tlv_dnld_mode mode) const u8 *data, enum rome_tlv_dnld_mode mode)
{ {
struct sk_buff *skb; struct sk_buff *skb;
@ -228,19 +232,19 @@ static int rome_tlv_send_segment(struct hci_dev *hdev, int seg_size,
HCI_VENDOR_PKT, HCI_INIT_TIMEOUT); HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) { if (IS_ERR(skb)) {
err = PTR_ERR(skb); err = PTR_ERR(skb);
BT_ERR("%s: Failed to send TLV segment (%d)", hdev->name, err); bt_dev_err(hdev, "QCA Failed to send TLV segment (%d)", err);
return err; return err;
} }
if (skb->len != sizeof(*edl) + sizeof(*tlv_resp)) { if (skb->len != sizeof(*edl) + sizeof(*tlv_resp)) {
BT_ERR("%s: TLV response size mismatch", hdev->name); bt_dev_err(hdev, "QCA TLV response size mismatch");
err = -EILSEQ; err = -EILSEQ;
goto out; goto out;
} }
edl = (struct edl_event_hdr *)(skb->data); edl = (struct edl_event_hdr *)(skb->data);
if (!edl) { if (!edl) {
BT_ERR("%s: TLV with no header", hdev->name); bt_dev_err(hdev, "TLV with no header");
err = -EILSEQ; err = -EILSEQ;
goto out; goto out;
} }
@ -249,8 +253,8 @@ static int rome_tlv_send_segment(struct hci_dev *hdev, int seg_size,
if (edl->cresp != EDL_CMD_REQ_RES_EVT || if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
edl->rtype != EDL_TVL_DNLD_RES_EVT || tlv_resp->result != 0x00) { edl->rtype != EDL_TVL_DNLD_RES_EVT || tlv_resp->result != 0x00) {
BT_ERR("%s: TLV with error stat 0x%x rtype 0x%x (0x%x)", bt_dev_err(hdev, "QCA TLV with error stat 0x%x rtype 0x%x (0x%x)",
hdev->name, edl->cresp, edl->rtype, tlv_resp->result); edl->cresp, edl->rtype, tlv_resp->result);
err = -EIO; err = -EIO;
} }
@ -260,23 +264,23 @@ out:
return err; return err;
} }
static int rome_download_firmware(struct hci_dev *hdev, static int qca_download_firmware(struct hci_dev *hdev,
struct rome_config *config) struct rome_config *config)
{ {
const struct firmware *fw; const struct firmware *fw;
const u8 *segment; const u8 *segment;
int ret, remain, i = 0; int ret, remain, i = 0;
bt_dev_info(hdev, "ROME Downloading %s", config->fwname); bt_dev_info(hdev, "QCA Downloading %s", config->fwname);
ret = request_firmware(&fw, config->fwname, &hdev->dev); ret = request_firmware(&fw, config->fwname, &hdev->dev);
if (ret) { if (ret) {
BT_ERR("%s: Failed to request file: %s (%d)", hdev->name, bt_dev_err(hdev, "QCA Failed to request file: %s (%d)",
config->fwname, ret); config->fwname, ret);
return ret; return ret;
} }
rome_tlv_check_data(config, fw); qca_tlv_check_data(config, fw);
segment = fw->data; segment = fw->data;
remain = fw->size; remain = fw->size;
@ -290,7 +294,7 @@ static int rome_download_firmware(struct hci_dev *hdev,
if (!remain || segsize < MAX_SIZE_PER_TLV_SEGMENT) if (!remain || segsize < MAX_SIZE_PER_TLV_SEGMENT)
config->dnld_mode = ROME_SKIP_EVT_NONE; config->dnld_mode = ROME_SKIP_EVT_NONE;
ret = rome_tlv_send_segment(hdev, segsize, segment, ret = qca_tlv_send_segment(hdev, segsize, segment,
config->dnld_mode); config->dnld_mode);
if (ret) if (ret)
break; break;
@ -317,8 +321,7 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
HCI_VENDOR_PKT, HCI_INIT_TIMEOUT); HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) { if (IS_ERR(skb)) {
err = PTR_ERR(skb); err = PTR_ERR(skb);
BT_ERR("%s: Change address command failed (%d)", bt_dev_err(hdev, "QCA Change address command failed (%d)", err);
hdev->name, err);
return err; return err;
} }
@ -328,57 +331,65 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
} }
EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome); EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome);
int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate) int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type, u32 soc_ver)
{ {
u32 rome_ver = 0;
struct rome_config config; struct rome_config config;
int err; int err;
u8 rom_ver;
BT_DBG("%s: ROME setup on UART", hdev->name); bt_dev_dbg(hdev, "QCA setup on UART");
config.user_baud_rate = baudrate; config.user_baud_rate = baudrate;
/* Get ROME version information */
err = rome_patch_ver_req(hdev, &rome_ver);
if (err < 0 || rome_ver == 0) {
BT_ERR("%s: Failed to get version 0x%x", hdev->name, err);
return err;
}
bt_dev_info(hdev, "ROME controller version 0x%08x", rome_ver);
/* Download rampatch file */ /* Download rampatch file */
config.type = TLV_TYPE_PATCH; config.type = TLV_TYPE_PATCH;
snprintf(config.fwname, sizeof(config.fwname), "qca/rampatch_%08x.bin", if (soc_type == QCA_WCN3990) {
rome_ver); /* Firmware files to download are based on ROM version.
err = rome_download_firmware(hdev, &config); * ROM version is derived from last two bytes of soc_ver.
*/
rom_ver = ((soc_ver & 0x00000f00) >> 0x04) |
(soc_ver & 0x0000000f);
snprintf(config.fwname, sizeof(config.fwname),
"qca/crbtfw%02x.tlv", rom_ver);
} else {
snprintf(config.fwname, sizeof(config.fwname),
"qca/rampatch_%08x.bin", soc_ver);
}
err = qca_download_firmware(hdev, &config);
if (err < 0) { if (err < 0) {
BT_ERR("%s: Failed to download patch (%d)", hdev->name, err); bt_dev_err(hdev, "QCA Failed to download patch (%d)", err);
return err; return err;
} }
/* Download NVM configuration */ /* Download NVM configuration */
config.type = TLV_TYPE_NVM; config.type = TLV_TYPE_NVM;
snprintf(config.fwname, sizeof(config.fwname), "qca/nvm_%08x.bin", if (soc_type == QCA_WCN3990)
rome_ver); snprintf(config.fwname, sizeof(config.fwname),
err = rome_download_firmware(hdev, &config); "qca/crnv%02x.bin", rom_ver);
else
snprintf(config.fwname, sizeof(config.fwname),
"qca/nvm_%08x.bin", soc_ver);
err = qca_download_firmware(hdev, &config);
if (err < 0) { if (err < 0) {
BT_ERR("%s: Failed to download NVM (%d)", hdev->name, err); bt_dev_err(hdev, "QCA Failed to download NVM (%d)", err);
return err; return err;
} }
/* Perform HCI reset */ /* Perform HCI reset */
err = rome_reset(hdev); err = qca_send_reset(hdev);
if (err < 0) { if (err < 0) {
BT_ERR("%s: Failed to run HCI_RESET (%d)", hdev->name, err); bt_dev_err(hdev, "QCA Failed to run HCI_RESET (%d)", err);
return err; return err;
} }
bt_dev_info(hdev, "ROME setup on UART is completed"); bt_dev_info(hdev, "QCA setup on UART is completed");
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(qca_uart_setup_rome); EXPORT_SYMBOL_GPL(qca_uart_setup);
MODULE_AUTHOR("Ben Young Tae Kim <ytkim@qca.qualcomm.com>"); MODULE_AUTHOR("Ben Young Tae Kim <ytkim@qca.qualcomm.com>");
MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family ver " VERSION); MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family ver " VERSION);

View File

@ -37,6 +37,9 @@
#define EDL_TAG_ID_HCI (17) #define EDL_TAG_ID_HCI (17)
#define EDL_TAG_ID_DEEP_SLEEP (27) #define EDL_TAG_ID_DEEP_SLEEP (27)
#define QCA_WCN3990_POWERON_PULSE 0xFC
#define QCA_WCN3990_POWEROFF_PULSE 0xC0
enum qca_bardrate { enum qca_bardrate {
QCA_BAUDRATE_115200 = 0, QCA_BAUDRATE_115200 = 0,
QCA_BAUDRATE_57600, QCA_BAUDRATE_57600,
@ -124,10 +127,19 @@ struct tlv_type_hdr {
__u8 data[0]; __u8 data[0];
} __packed; } __packed;
enum qca_btsoc_type {
QCA_INVALID = -1,
QCA_AR3002,
QCA_ROME,
QCA_WCN3990
};
#if IS_ENABLED(CONFIG_BT_QCA) #if IS_ENABLED(CONFIG_BT_QCA)
int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr); int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr);
int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate); int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type, u32 soc_ver);
int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version);
#else #else
@ -136,7 +148,13 @@ static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdad
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static inline int qca_uart_setup_rome(struct hci_dev *hdev, int speed) static inline int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type, u32 soc_ver)
{
return -EOPNOTSUPP;
}
static inline int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version)
{ {
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }

View File

@ -34,9 +34,12 @@
#define RTL_ROM_LMP_8821A 0x8821 #define RTL_ROM_LMP_8821A 0x8821
#define RTL_ROM_LMP_8761A 0x8761 #define RTL_ROM_LMP_8761A 0x8761
#define RTL_ROM_LMP_8822B 0x8822 #define RTL_ROM_LMP_8822B 0x8822
#define RTL_CONFIG_MAGIC 0x8723ab55
#define IC_MATCH_FL_LMPSUBV (1 << 0) #define IC_MATCH_FL_LMPSUBV (1 << 0)
#define IC_MATCH_FL_HCIREV (1 << 1) #define IC_MATCH_FL_HCIREV (1 << 1)
#define IC_MATCH_FL_HCIVER (1 << 2)
#define IC_MATCH_FL_HCIBUS (1 << 3)
#define IC_INFO(lmps, hcir) \ #define IC_INFO(lmps, hcir) \
.match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV, \ .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV, \
.lmp_subver = (lmps), \ .lmp_subver = (lmps), \
@ -46,49 +49,130 @@ struct id_table {
__u16 match_flags; __u16 match_flags;
__u16 lmp_subver; __u16 lmp_subver;
__u16 hci_rev; __u16 hci_rev;
__u8 hci_ver;
__u8 hci_bus;
bool config_needed; bool config_needed;
bool has_rom_version;
char *fw_name; char *fw_name;
char *cfg_name; char *cfg_name;
}; };
struct btrtl_device_info {
const struct id_table *ic_info;
u8 rom_version;
u8 *fw_data;
int fw_len;
u8 *cfg_data;
int cfg_len;
};
static const struct id_table ic_id_table[] = { static const struct id_table ic_id_table[] = {
{ IC_MATCH_FL_LMPSUBV, RTL_ROM_LMP_8723A, 0x0,
.config_needed = false,
.has_rom_version = false,
.fw_name = "rtl_bt/rtl8723a_fw.bin",
.cfg_name = NULL },
{ IC_MATCH_FL_LMPSUBV, RTL_ROM_LMP_3499, 0x0,
.config_needed = false,
.has_rom_version = false,
.fw_name = "rtl_bt/rtl8723a_fw.bin",
.cfg_name = NULL },
/* 8723BS */
{ .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV |
IC_MATCH_FL_HCIVER | IC_MATCH_FL_HCIBUS,
.lmp_subver = RTL_ROM_LMP_8723B,
.hci_rev = 0xb,
.hci_ver = 6,
.hci_bus = HCI_UART,
.config_needed = true,
.has_rom_version = true,
.fw_name = "rtl_bt/rtl8723bs_fw.bin",
.cfg_name = "rtl_bt/rtl8723bs_config" },
/* 8723B */ /* 8723B */
{ IC_INFO(RTL_ROM_LMP_8723B, 0xb), { IC_INFO(RTL_ROM_LMP_8723B, 0xb),
.config_needed = false, .config_needed = false,
.has_rom_version = true,
.fw_name = "rtl_bt/rtl8723b_fw.bin", .fw_name = "rtl_bt/rtl8723b_fw.bin",
.cfg_name = "rtl_bt/rtl8723b_config.bin" }, .cfg_name = "rtl_bt/rtl8723b_config" },
/* 8723D */ /* 8723D */
{ IC_INFO(RTL_ROM_LMP_8723B, 0xd), { IC_INFO(RTL_ROM_LMP_8723B, 0xd),
.config_needed = true, .config_needed = true,
.has_rom_version = true,
.fw_name = "rtl_bt/rtl8723d_fw.bin", .fw_name = "rtl_bt/rtl8723d_fw.bin",
.cfg_name = "rtl_bt/rtl8723d_config.bin" }, .cfg_name = "rtl_bt/rtl8723d_config" },
/* 8723DS */
{ .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV |
IC_MATCH_FL_HCIVER | IC_MATCH_FL_HCIBUS,
.lmp_subver = RTL_ROM_LMP_8723B,
.hci_rev = 0xd,
.hci_ver = 8,
.hci_bus = HCI_UART,
.config_needed = true,
.has_rom_version = true,
.fw_name = "rtl_bt/rtl8723ds_fw.bin",
.cfg_name = "rtl_bt/rtl8723ds_config" },
/* 8821A */ /* 8821A */
{ IC_INFO(RTL_ROM_LMP_8821A, 0xa), { IC_INFO(RTL_ROM_LMP_8821A, 0xa),
.config_needed = false, .config_needed = false,
.has_rom_version = true,
.fw_name = "rtl_bt/rtl8821a_fw.bin", .fw_name = "rtl_bt/rtl8821a_fw.bin",
.cfg_name = "rtl_bt/rtl8821a_config.bin" }, .cfg_name = "rtl_bt/rtl8821a_config" },
/* 8821C */ /* 8821C */
{ IC_INFO(RTL_ROM_LMP_8821A, 0xc), { IC_INFO(RTL_ROM_LMP_8821A, 0xc),
.config_needed = false, .config_needed = false,
.has_rom_version = true,
.fw_name = "rtl_bt/rtl8821c_fw.bin", .fw_name = "rtl_bt/rtl8821c_fw.bin",
.cfg_name = "rtl_bt/rtl8821c_config.bin" }, .cfg_name = "rtl_bt/rtl8821c_config" },
/* 8761A */ /* 8761A */
{ IC_MATCH_FL_LMPSUBV, RTL_ROM_LMP_8761A, 0x0, { IC_MATCH_FL_LMPSUBV, RTL_ROM_LMP_8761A, 0x0,
.config_needed = false, .config_needed = false,
.has_rom_version = true,
.fw_name = "rtl_bt/rtl8761a_fw.bin", .fw_name = "rtl_bt/rtl8761a_fw.bin",
.cfg_name = "rtl_bt/rtl8761a_config.bin" }, .cfg_name = "rtl_bt/rtl8761a_config" },
/* 8822B */ /* 8822B */
{ IC_INFO(RTL_ROM_LMP_8822B, 0xb), { IC_INFO(RTL_ROM_LMP_8822B, 0xb),
.config_needed = true, .config_needed = true,
.has_rom_version = true,
.fw_name = "rtl_bt/rtl8822b_fw.bin", .fw_name = "rtl_bt/rtl8822b_fw.bin",
.cfg_name = "rtl_bt/rtl8822b_config.bin" }, .cfg_name = "rtl_bt/rtl8822b_config" },
}; };
static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev,
u8 hci_ver, u8 hci_bus)
{
int i;
for (i = 0; i < ARRAY_SIZE(ic_id_table); i++) {
if ((ic_id_table[i].match_flags & IC_MATCH_FL_LMPSUBV) &&
(ic_id_table[i].lmp_subver != lmp_subver))
continue;
if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIREV) &&
(ic_id_table[i].hci_rev != hci_rev))
continue;
if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIVER) &&
(ic_id_table[i].hci_ver != hci_ver))
continue;
if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIBUS) &&
(ic_id_table[i].hci_bus != hci_bus))
continue;
break;
}
if (i >= ARRAY_SIZE(ic_id_table))
return NULL;
return &ic_id_table[i];
}
static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version) static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
{ {
struct rtl_rom_version_evt *rom_version; struct rtl_rom_version_evt *rom_version;
@ -97,20 +181,20 @@ static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
/* Read RTL ROM version command */ /* Read RTL ROM version command */
skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT); skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) { if (IS_ERR(skb)) {
BT_ERR("%s: Read ROM version failed (%ld)", rtl_dev_err(hdev, "Read ROM version failed (%ld)\n",
hdev->name, PTR_ERR(skb)); PTR_ERR(skb));
return PTR_ERR(skb); return PTR_ERR(skb);
} }
if (skb->len != sizeof(*rom_version)) { if (skb->len != sizeof(*rom_version)) {
BT_ERR("%s: RTL version event length mismatch", hdev->name); rtl_dev_err(hdev, "RTL version event length mismatch\n");
kfree_skb(skb); kfree_skb(skb);
return -EIO; return -EIO;
} }
rom_version = (struct rtl_rom_version_evt *)skb->data; rom_version = (struct rtl_rom_version_evt *)skb->data;
bt_dev_info(hdev, "rom_version status=%x version=%x", rtl_dev_info(hdev, "rom_version status=%x version=%x\n",
rom_version->status, rom_version->version); rom_version->status, rom_version->version);
*version = rom_version->version; *version = rom_version->version;
@ -118,16 +202,16 @@ static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
return 0; return 0;
} }
static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver, static int rtlbt_parse_firmware(struct hci_dev *hdev,
const struct firmware *fw, struct btrtl_device_info *btrtl_dev,
unsigned char **_buf) unsigned char **_buf)
{ {
const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 }; const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 };
struct rtl_epatch_header *epatch_info; struct rtl_epatch_header *epatch_info;
unsigned char *buf; unsigned char *buf;
int i, ret, len; int i, len;
size_t min_size; size_t min_size;
u8 opcode, length, data, rom_version = 0; u8 opcode, length, data;
int project_id = -1; int project_id = -1;
const unsigned char *fwptr, *chip_id_base; const unsigned char *fwptr, *chip_id_base;
const unsigned char *patch_length_base, *patch_offset_base; const unsigned char *patch_length_base, *patch_offset_base;
@ -146,17 +230,13 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
{ RTL_ROM_LMP_8821A, 10 }, /* 8821C */ { RTL_ROM_LMP_8821A, 10 }, /* 8821C */
}; };
ret = rtl_read_rom_version(hdev, &rom_version);
if (ret)
return ret;
min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3; min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3;
if (fw->size < min_size) if (btrtl_dev->fw_len < min_size)
return -EINVAL; return -EINVAL;
fwptr = fw->data + fw->size - sizeof(extension_sig); fwptr = btrtl_dev->fw_data + btrtl_dev->fw_len - sizeof(extension_sig);
if (memcmp(fwptr, extension_sig, sizeof(extension_sig)) != 0) { if (memcmp(fwptr, extension_sig, sizeof(extension_sig)) != 0) {
BT_ERR("%s: extension section signature mismatch", hdev->name); rtl_dev_err(hdev, "extension section signature mismatch\n");
return -EINVAL; return -EINVAL;
} }
@ -166,7 +246,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
* Once we have that, we double-check that that project_id is suitable * Once we have that, we double-check that that project_id is suitable
* for the hardware we are working with. * for the hardware we are working with.
*/ */
while (fwptr >= fw->data + (sizeof(struct rtl_epatch_header) + 3)) { while (fwptr >= btrtl_dev->fw_data + (sizeof(*epatch_info) + 3)) {
opcode = *--fwptr; opcode = *--fwptr;
length = *--fwptr; length = *--fwptr;
data = *--fwptr; data = *--fwptr;
@ -177,8 +257,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
break; break;
if (length == 0) { if (length == 0) {
BT_ERR("%s: found instruction with length 0", rtl_dev_err(hdev, "found instruction with length 0\n");
hdev->name);
return -EINVAL; return -EINVAL;
} }
@ -191,7 +270,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
} }
if (project_id < 0) { if (project_id < 0) {
BT_ERR("%s: failed to find version instruction", hdev->name); rtl_dev_err(hdev, "failed to find version instruction\n");
return -EINVAL; return -EINVAL;
} }
@ -202,19 +281,21 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
} }
if (i >= ARRAY_SIZE(project_id_to_lmp_subver)) { if (i >= ARRAY_SIZE(project_id_to_lmp_subver)) {
BT_ERR("%s: unknown project id %d", hdev->name, project_id); rtl_dev_err(hdev, "unknown project id %d\n", project_id);
return -EINVAL; return -EINVAL;
} }
if (lmp_subver != project_id_to_lmp_subver[i].lmp_subver) { if (btrtl_dev->ic_info->lmp_subver !=
BT_ERR("%s: firmware is for %x but this is a %x", hdev->name, project_id_to_lmp_subver[i].lmp_subver) {
project_id_to_lmp_subver[i].lmp_subver, lmp_subver); rtl_dev_err(hdev, "firmware is for %x but this is a %x\n",
project_id_to_lmp_subver[i].lmp_subver,
btrtl_dev->ic_info->lmp_subver);
return -EINVAL; return -EINVAL;
} }
epatch_info = (struct rtl_epatch_header *)fw->data; epatch_info = (struct rtl_epatch_header *)btrtl_dev->fw_data;
if (memcmp(epatch_info->signature, RTL_EPATCH_SIGNATURE, 8) != 0) { if (memcmp(epatch_info->signature, RTL_EPATCH_SIGNATURE, 8) != 0) {
BT_ERR("%s: bad EPATCH signature", hdev->name); rtl_dev_err(hdev, "bad EPATCH signature\n");
return -EINVAL; return -EINVAL;
} }
@ -229,16 +310,16 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
* Find the right patch for this chip. * Find the right patch for this chip.
*/ */
min_size += 8 * num_patches; min_size += 8 * num_patches;
if (fw->size < min_size) if (btrtl_dev->fw_len < min_size)
return -EINVAL; return -EINVAL;
chip_id_base = fw->data + sizeof(struct rtl_epatch_header); chip_id_base = btrtl_dev->fw_data + sizeof(struct rtl_epatch_header);
patch_length_base = chip_id_base + (sizeof(u16) * num_patches); patch_length_base = chip_id_base + (sizeof(u16) * num_patches);
patch_offset_base = patch_length_base + (sizeof(u16) * num_patches); patch_offset_base = patch_length_base + (sizeof(u16) * num_patches);
for (i = 0; i < num_patches; i++) { for (i = 0; i < num_patches; i++) {
u16 chip_id = get_unaligned_le16(chip_id_base + u16 chip_id = get_unaligned_le16(chip_id_base +
(i * sizeof(u16))); (i * sizeof(u16)));
if (chip_id == rom_version + 1) { if (chip_id == btrtl_dev->rom_version + 1) {
patch_length = get_unaligned_le16(patch_length_base + patch_length = get_unaligned_le16(patch_length_base +
(i * sizeof(u16))); (i * sizeof(u16)));
patch_offset = get_unaligned_le32(patch_offset_base + patch_offset = get_unaligned_le32(patch_offset_base +
@ -248,21 +329,22 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
} }
if (!patch_offset) { if (!patch_offset) {
BT_ERR("%s: didn't find patch for chip id %d", rtl_dev_err(hdev, "didn't find patch for chip id %d",
hdev->name, rom_version); btrtl_dev->rom_version);
return -EINVAL; return -EINVAL;
} }
BT_DBG("length=%x offset=%x index %d", patch_length, patch_offset, i); BT_DBG("length=%x offset=%x index %d", patch_length, patch_offset, i);
min_size = patch_offset + patch_length; min_size = patch_offset + patch_length;
if (fw->size < min_size) if (btrtl_dev->fw_len < min_size)
return -EINVAL; return -EINVAL;
/* Copy the firmware into a new buffer and write the version at /* Copy the firmware into a new buffer and write the version at
* the end. * the end.
*/ */
len = patch_length; len = patch_length;
buf = kmemdup(fw->data + patch_offset, patch_length, GFP_KERNEL); buf = kmemdup(btrtl_dev->fw_data + patch_offset, patch_length,
GFP_KERNEL);
if (!buf) if (!buf)
return -ENOMEM; return -ENOMEM;
@ -301,15 +383,14 @@ static int rtl_download_firmware(struct hci_dev *hdev,
skb = __hci_cmd_sync(hdev, 0xfc20, frag_len + 1, dl_cmd, skb = __hci_cmd_sync(hdev, 0xfc20, frag_len + 1, dl_cmd,
HCI_INIT_TIMEOUT); HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) { if (IS_ERR(skb)) {
BT_ERR("%s: download fw command failed (%ld)", rtl_dev_err(hdev, "download fw command failed (%ld)\n",
hdev->name, PTR_ERR(skb)); PTR_ERR(skb));
ret = -PTR_ERR(skb); ret = -PTR_ERR(skb);
goto out; goto out;
} }
if (skb->len != sizeof(struct rtl_download_response)) { if (skb->len != sizeof(struct rtl_download_response)) {
BT_ERR("%s: download fw event length mismatch", rtl_dev_err(hdev, "download fw event length mismatch\n");
hdev->name);
kfree_skb(skb); kfree_skb(skb);
ret = -EIO; ret = -EIO;
goto out; goto out;
@ -324,12 +405,12 @@ out:
return ret; return ret;
} }
static int rtl_load_config(struct hci_dev *hdev, const char *name, u8 **buff) static int rtl_load_file(struct hci_dev *hdev, const char *name, u8 **buff)
{ {
const struct firmware *fw; const struct firmware *fw;
int ret; int ret;
bt_dev_info(hdev, "rtl: loading %s", name); rtl_dev_info(hdev, "rtl: loading %s\n", name);
ret = request_firmware(&fw, name, &hdev->dev); ret = request_firmware(&fw, name, &hdev->dev);
if (ret < 0) if (ret < 0)
return ret; return ret;
@ -343,96 +424,37 @@ static int rtl_load_config(struct hci_dev *hdev, const char *name, u8 **buff)
return ret; return ret;
} }
static int btrtl_setup_rtl8723a(struct hci_dev *hdev) static int btrtl_setup_rtl8723a(struct hci_dev *hdev,
struct btrtl_device_info *btrtl_dev)
{ {
const struct firmware *fw; if (btrtl_dev->fw_len < 8)
int ret; return -EINVAL;
bt_dev_info(hdev, "rtl: loading rtl_bt/rtl8723a_fw.bin");
ret = request_firmware(&fw, "rtl_bt/rtl8723a_fw.bin", &hdev->dev);
if (ret < 0) {
BT_ERR("%s: Failed to load rtl_bt/rtl8723a_fw.bin", hdev->name);
return ret;
}
if (fw->size < 8) {
ret = -EINVAL;
goto out;
}
/* Check that the firmware doesn't have the epatch signature /* Check that the firmware doesn't have the epatch signature
* (which is only for RTL8723B and newer). * (which is only for RTL8723B and newer).
*/ */
if (!memcmp(fw->data, RTL_EPATCH_SIGNATURE, 8)) { if (!memcmp(btrtl_dev->fw_data, RTL_EPATCH_SIGNATURE, 8)) {
BT_ERR("%s: unexpected EPATCH signature!", hdev->name); rtl_dev_err(hdev, "unexpected EPATCH signature!\n");
ret = -EINVAL;
goto out;
}
ret = rtl_download_firmware(hdev, fw->data, fw->size);
out:
release_firmware(fw);
return ret;
}
static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 hci_rev,
u16 lmp_subver)
{
unsigned char *fw_data = NULL;
const struct firmware *fw;
int ret;
int cfg_sz;
u8 *cfg_buff = NULL;
u8 *tbuff;
char *cfg_name = NULL;
char *fw_name = NULL;
int i;
for (i = 0; i < ARRAY_SIZE(ic_id_table); i++) {
if ((ic_id_table[i].match_flags & IC_MATCH_FL_LMPSUBV) &&
(ic_id_table[i].lmp_subver != lmp_subver))
continue;
if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIREV) &&
(ic_id_table[i].hci_rev != hci_rev))
continue;
break;
}
if (i >= ARRAY_SIZE(ic_id_table)) {
BT_ERR("%s: unknown IC info, lmp subver %04x, hci rev %04x",
hdev->name, lmp_subver, hci_rev);
return -EINVAL; return -EINVAL;
} }
cfg_name = ic_id_table[i].cfg_name; return rtl_download_firmware(hdev, btrtl_dev->fw_data,
btrtl_dev->fw_len);
}
if (cfg_name) { static int btrtl_setup_rtl8723b(struct hci_dev *hdev,
cfg_sz = rtl_load_config(hdev, cfg_name, &cfg_buff); struct btrtl_device_info *btrtl_dev)
if (cfg_sz < 0) { {
cfg_sz = 0; unsigned char *fw_data = NULL;
if (ic_id_table[i].config_needed) int ret;
BT_ERR("Necessary config file %s not found\n", u8 *tbuff;
cfg_name);
}
} else
cfg_sz = 0;
fw_name = ic_id_table[i].fw_name; ret = rtlbt_parse_firmware(hdev, btrtl_dev, &fw_data);
bt_dev_info(hdev, "rtl: loading %s", fw_name);
ret = request_firmware(&fw, fw_name, &hdev->dev);
if (ret < 0) {
BT_ERR("%s: Failed to load %s", hdev->name, fw_name);
goto err_req_fw;
}
ret = rtlbt_parse_firmware(hdev, lmp_subver, fw, &fw_data);
if (ret < 0) if (ret < 0)
goto out; goto out;
if (cfg_sz) { if (btrtl_dev->cfg_len > 0) {
tbuff = kzalloc(ret + cfg_sz, GFP_KERNEL); tbuff = kzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL);
if (!tbuff) { if (!tbuff) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
@ -441,22 +463,18 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 hci_rev,
memcpy(tbuff, fw_data, ret); memcpy(tbuff, fw_data, ret);
kfree(fw_data); kfree(fw_data);
memcpy(tbuff + ret, cfg_buff, cfg_sz); memcpy(tbuff + ret, btrtl_dev->cfg_data, btrtl_dev->cfg_len);
ret += cfg_sz; ret += btrtl_dev->cfg_len;
fw_data = tbuff; fw_data = tbuff;
} }
bt_dev_info(hdev, "cfg_sz %d, total size %d", cfg_sz, ret); rtl_dev_info(hdev, "cfg_sz %d, total sz %d\n", btrtl_dev->cfg_len, ret);
ret = rtl_download_firmware(hdev, fw_data, ret); ret = rtl_download_firmware(hdev, fw_data, ret);
out: out:
release_firmware(fw);
kfree(fw_data); kfree(fw_data);
err_req_fw:
if (cfg_sz)
kfree(cfg_buff);
return ret; return ret;
} }
@ -467,14 +485,13 @@ static struct sk_buff *btrtl_read_local_version(struct hci_dev *hdev)
skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
HCI_INIT_TIMEOUT); HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) { if (IS_ERR(skb)) {
BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)", rtl_dev_err(hdev, "HCI_OP_READ_LOCAL_VERSION failed (%ld)\n",
hdev->name, PTR_ERR(skb)); PTR_ERR(skb));
return skb; return skb;
} }
if (skb->len != sizeof(struct hci_rp_read_local_version)) { if (skb->len != sizeof(struct hci_rp_read_local_version)) {
BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch", rtl_dev_err(hdev, "HCI_OP_READ_LOCAL_VERSION event length mismatch\n");
hdev->name);
kfree_skb(skb); kfree_skb(skb);
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
} }
@ -482,49 +499,264 @@ static struct sk_buff *btrtl_read_local_version(struct hci_dev *hdev)
return skb; return skb;
} }
int btrtl_setup_realtek(struct hci_dev *hdev) void btrtl_free(struct btrtl_device_info *btrtl_dev)
{ {
kfree(btrtl_dev->fw_data);
kfree(btrtl_dev->cfg_data);
kfree(btrtl_dev);
}
EXPORT_SYMBOL_GPL(btrtl_free);
struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
const char *postfix)
{
struct btrtl_device_info *btrtl_dev;
struct sk_buff *skb; struct sk_buff *skb;
struct hci_rp_read_local_version *resp; struct hci_rp_read_local_version *resp;
char cfg_name[40];
u16 hci_rev, lmp_subver; u16 hci_rev, lmp_subver;
u8 hci_ver;
int ret;
btrtl_dev = kzalloc(sizeof(*btrtl_dev), GFP_KERNEL);
if (!btrtl_dev) {
ret = -ENOMEM;
goto err_alloc;
}
skb = btrtl_read_local_version(hdev); skb = btrtl_read_local_version(hdev);
if (IS_ERR(skb)) if (IS_ERR(skb)) {
return -PTR_ERR(skb); ret = PTR_ERR(skb);
goto err_free;
}
resp = (struct hci_rp_read_local_version *)skb->data; resp = (struct hci_rp_read_local_version *)skb->data;
bt_dev_info(hdev, "rtl: examining hci_ver=%02x hci_rev=%04x " rtl_dev_info(hdev, "rtl: examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x lmp_subver=%04x\n",
"lmp_ver=%02x lmp_subver=%04x", resp->hci_ver, resp->hci_rev,
resp->hci_ver, resp->hci_rev, resp->lmp_ver, resp->lmp_subver);
resp->lmp_ver, resp->lmp_subver);
hci_ver = resp->hci_ver;
hci_rev = le16_to_cpu(resp->hci_rev); hci_rev = le16_to_cpu(resp->hci_rev);
lmp_subver = le16_to_cpu(resp->lmp_subver); lmp_subver = le16_to_cpu(resp->lmp_subver);
kfree_skb(skb); kfree_skb(skb);
btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver,
hdev->bus);
if (!btrtl_dev->ic_info) {
rtl_dev_err(hdev, "rtl: unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x",
lmp_subver, hci_rev, hci_ver);
ret = -EINVAL;
goto err_free;
}
if (btrtl_dev->ic_info->has_rom_version) {
ret = rtl_read_rom_version(hdev, &btrtl_dev->rom_version);
if (ret)
goto err_free;
}
btrtl_dev->fw_len = rtl_load_file(hdev, btrtl_dev->ic_info->fw_name,
&btrtl_dev->fw_data);
if (btrtl_dev->fw_len < 0) {
rtl_dev_err(hdev, "firmware file %s not found\n",
btrtl_dev->ic_info->fw_name);
ret = btrtl_dev->fw_len;
goto err_free;
}
if (btrtl_dev->ic_info->cfg_name) {
if (postfix) {
snprintf(cfg_name, sizeof(cfg_name), "%s-%s.bin",
btrtl_dev->ic_info->cfg_name, postfix);
} else {
snprintf(cfg_name, sizeof(cfg_name), "%s.bin",
btrtl_dev->ic_info->cfg_name);
}
btrtl_dev->cfg_len = rtl_load_file(hdev, cfg_name,
&btrtl_dev->cfg_data);
if (btrtl_dev->ic_info->config_needed &&
btrtl_dev->cfg_len <= 0) {
rtl_dev_err(hdev, "mandatory config file %s not found\n",
btrtl_dev->ic_info->cfg_name);
ret = btrtl_dev->cfg_len;
goto err_free;
}
}
return btrtl_dev;
err_free:
btrtl_free(btrtl_dev);
err_alloc:
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(btrtl_initialize);
int btrtl_download_firmware(struct hci_dev *hdev,
struct btrtl_device_info *btrtl_dev)
{
/* Match a set of subver values that correspond to stock firmware, /* Match a set of subver values that correspond to stock firmware,
* which is not compatible with standard btusb. * which is not compatible with standard btusb.
* If matched, upload an alternative firmware that does conform to * If matched, upload an alternative firmware that does conform to
* standard btusb. Once that firmware is uploaded, the subver changes * standard btusb. Once that firmware is uploaded, the subver changes
* to a different value. * to a different value.
*/ */
switch (lmp_subver) { switch (btrtl_dev->ic_info->lmp_subver) {
case RTL_ROM_LMP_8723A: case RTL_ROM_LMP_8723A:
case RTL_ROM_LMP_3499: case RTL_ROM_LMP_3499:
return btrtl_setup_rtl8723a(hdev); return btrtl_setup_rtl8723a(hdev, btrtl_dev);
case RTL_ROM_LMP_8723B: case RTL_ROM_LMP_8723B:
case RTL_ROM_LMP_8821A: case RTL_ROM_LMP_8821A:
case RTL_ROM_LMP_8761A: case RTL_ROM_LMP_8761A:
case RTL_ROM_LMP_8822B: case RTL_ROM_LMP_8822B:
return btrtl_setup_rtl8723b(hdev, hci_rev, lmp_subver); return btrtl_setup_rtl8723b(hdev, btrtl_dev);
default: default:
bt_dev_info(hdev, "rtl: assuming no firmware upload needed"); rtl_dev_info(hdev, "rtl: assuming no firmware upload needed\n");
return 0; return 0;
} }
} }
EXPORT_SYMBOL_GPL(btrtl_download_firmware);
int btrtl_setup_realtek(struct hci_dev *hdev)
{
struct btrtl_device_info *btrtl_dev;
int ret;
btrtl_dev = btrtl_initialize(hdev, NULL);
if (IS_ERR(btrtl_dev))
return PTR_ERR(btrtl_dev);
ret = btrtl_download_firmware(hdev, btrtl_dev);
btrtl_free(btrtl_dev);
return ret;
}
EXPORT_SYMBOL_GPL(btrtl_setup_realtek); EXPORT_SYMBOL_GPL(btrtl_setup_realtek);
static unsigned int btrtl_convert_baudrate(u32 device_baudrate)
{
switch (device_baudrate) {
case 0x0252a00a:
return 230400;
case 0x05f75004:
return 921600;
case 0x00005004:
return 1000000;
case 0x04928002:
case 0x01128002:
return 1500000;
case 0x00005002:
return 2000000;
case 0x0000b001:
return 2500000;
case 0x04928001:
return 3000000;
case 0x052a6001:
return 3500000;
case 0x00005001:
return 4000000;
case 0x0252c014:
default:
return 115200;
}
}
int btrtl_get_uart_settings(struct hci_dev *hdev,
struct btrtl_device_info *btrtl_dev,
unsigned int *controller_baudrate,
u32 *device_baudrate, bool *flow_control)
{
struct rtl_vendor_config *config;
struct rtl_vendor_config_entry *entry;
int i, total_data_len;
bool found = false;
total_data_len = btrtl_dev->cfg_len - sizeof(*config);
if (total_data_len <= 0) {
rtl_dev_warn(hdev, "no config loaded\n");
return -EINVAL;
}
config = (struct rtl_vendor_config *)btrtl_dev->cfg_data;
if (le32_to_cpu(config->signature) != RTL_CONFIG_MAGIC) {
rtl_dev_err(hdev, "invalid config magic\n");
return -EINVAL;
}
if (total_data_len < le16_to_cpu(config->total_len)) {
rtl_dev_err(hdev, "config is too short\n");
return -EINVAL;
}
for (i = 0; i < total_data_len; ) {
entry = ((void *)config->entry) + i;
switch (le16_to_cpu(entry->offset)) {
case 0xc:
if (entry->len < sizeof(*device_baudrate)) {
rtl_dev_err(hdev, "invalid UART config entry\n");
return -EINVAL;
}
*device_baudrate = get_unaligned_le32(entry->data);
*controller_baudrate = btrtl_convert_baudrate(
*device_baudrate);
if (entry->len >= 13)
*flow_control = !!(entry->data[12] & BIT(2));
else
*flow_control = false;
found = true;
break;
default:
rtl_dev_dbg(hdev, "skipping config entry 0x%x (len %u)\n",
le16_to_cpu(entry->offset), entry->len);
break;
};
i += sizeof(*entry) + entry->len;
}
if (!found) {
rtl_dev_err(hdev, "no UART config entry found\n");
return -ENOENT;
}
rtl_dev_dbg(hdev, "device baudrate = 0x%08x\n", *device_baudrate);
rtl_dev_dbg(hdev, "controller baudrate = %u\n", *controller_baudrate);
rtl_dev_dbg(hdev, "flow control %d\n", *flow_control);
return 0;
}
EXPORT_SYMBOL_GPL(btrtl_get_uart_settings);
MODULE_AUTHOR("Daniel Drake <drake@endlessm.com>"); MODULE_AUTHOR("Daniel Drake <drake@endlessm.com>");
MODULE_DESCRIPTION("Bluetooth support for Realtek devices ver " VERSION); MODULE_DESCRIPTION("Bluetooth support for Realtek devices ver " VERSION);
MODULE_VERSION(VERSION); MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_FIRMWARE("rtl_bt/rtl8723a_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8723b_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8723b_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8723bs_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8723bs_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8723ds_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8723ds_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8761a_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8761a_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8821a_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8821a_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8822b_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8822b_config.bin");

View File

@ -17,6 +17,13 @@
#define RTL_FRAG_LEN 252 #define RTL_FRAG_LEN 252
#define rtl_dev_err(dev, fmt, ...) bt_dev_err(dev, "RTL: " fmt, ##__VA_ARGS__)
#define rtl_dev_warn(dev, fmt, ...) bt_dev_warn(dev, "RTL: " fmt, ##__VA_ARGS__)
#define rtl_dev_info(dev, fmt, ...) bt_dev_info(dev, "RTL: " fmt, ##__VA_ARGS__)
#define rtl_dev_dbg(dev, fmt, ...) bt_dev_dbg(dev, "RTL: " fmt, ##__VA_ARGS__)
struct btrtl_device_info;
struct rtl_download_cmd { struct rtl_download_cmd {
__u8 index; __u8 index;
__u8 data[RTL_FRAG_LEN]; __u8 data[RTL_FRAG_LEN];
@ -38,15 +45,61 @@ struct rtl_epatch_header {
__le16 num_patches; __le16 num_patches;
} __packed; } __packed;
struct rtl_vendor_config_entry {
__le16 offset;
__u8 len;
__u8 data[0];
} __packed;
struct rtl_vendor_config {
__le32 signature;
__le16 total_len;
struct rtl_vendor_config_entry entry[0];
} __packed;
#if IS_ENABLED(CONFIG_BT_RTL) #if IS_ENABLED(CONFIG_BT_RTL)
struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
const char *postfix);
void btrtl_free(struct btrtl_device_info *btrtl_dev);
int btrtl_download_firmware(struct hci_dev *hdev,
struct btrtl_device_info *btrtl_dev);
int btrtl_setup_realtek(struct hci_dev *hdev); int btrtl_setup_realtek(struct hci_dev *hdev);
int btrtl_get_uart_settings(struct hci_dev *hdev,
struct btrtl_device_info *btrtl_dev,
unsigned int *controller_baudrate,
u32 *device_baudrate, bool *flow_control);
#else #else
static inline struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
const char *postfix)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline void btrtl_free(struct btrtl_device_info *btrtl_dev)
{
}
static inline int btrtl_download_firmware(struct hci_dev *hdev,
struct btrtl_device_info *btrtl_dev)
{
return -EOPNOTSUPP;
}
static inline int btrtl_setup_realtek(struct hci_dev *hdev) static inline int btrtl_setup_realtek(struct hci_dev *hdev)
{ {
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static inline int btrtl_get_uart_settings(struct hci_dev *hdev,
struct btrtl_device_info *btrtl_dev,
unsigned int *controller_baudrate,
u32 *device_baudrate,
bool *flow_control)
{
return -ENOENT;
}
#endif #endif

View File

@ -374,6 +374,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK },
/* Additional Realtek 8723DE Bluetooth devices */ /* Additional Realtek 8723DE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK },
/* Additional Realtek 8821AE Bluetooth devices */ /* Additional Realtek 8821AE Bluetooth devices */
@ -509,9 +510,10 @@ static inline void btusb_free_frags(struct btusb_data *data)
static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count) static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
{ {
struct sk_buff *skb; struct sk_buff *skb;
unsigned long flags;
int err = 0; int err = 0;
spin_lock(&data->rxlock); spin_lock_irqsave(&data->rxlock, flags);
skb = data->evt_skb; skb = data->evt_skb;
while (count) { while (count) {
@ -556,7 +558,7 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
} }
data->evt_skb = skb; data->evt_skb = skb;
spin_unlock(&data->rxlock); spin_unlock_irqrestore(&data->rxlock, flags);
return err; return err;
} }
@ -564,9 +566,10 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count) static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count)
{ {
struct sk_buff *skb; struct sk_buff *skb;
unsigned long flags;
int err = 0; int err = 0;
spin_lock(&data->rxlock); spin_lock_irqsave(&data->rxlock, flags);
skb = data->acl_skb; skb = data->acl_skb;
while (count) { while (count) {
@ -613,7 +616,7 @@ static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count)
} }
data->acl_skb = skb; data->acl_skb = skb;
spin_unlock(&data->rxlock); spin_unlock_irqrestore(&data->rxlock, flags);
return err; return err;
} }
@ -621,9 +624,10 @@ static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count)
static int btusb_recv_isoc(struct btusb_data *data, void *buffer, int count) static int btusb_recv_isoc(struct btusb_data *data, void *buffer, int count)
{ {
struct sk_buff *skb; struct sk_buff *skb;
unsigned long flags;
int err = 0; int err = 0;
spin_lock(&data->rxlock); spin_lock_irqsave(&data->rxlock, flags);
skb = data->sco_skb; skb = data->sco_skb;
while (count) { while (count) {
@ -668,7 +672,7 @@ static int btusb_recv_isoc(struct btusb_data *data, void *buffer, int count)
} }
data->sco_skb = skb; data->sco_skb = skb;
spin_unlock(&data->rxlock); spin_unlock_irqrestore(&data->rxlock, flags);
return err; return err;
} }
@ -1066,6 +1070,7 @@ static void btusb_tx_complete(struct urb *urb)
struct sk_buff *skb = urb->context; struct sk_buff *skb = urb->context;
struct hci_dev *hdev = (struct hci_dev *)skb->dev; struct hci_dev *hdev = (struct hci_dev *)skb->dev;
struct btusb_data *data = hci_get_drvdata(hdev); struct btusb_data *data = hci_get_drvdata(hdev);
unsigned long flags;
BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status,
urb->actual_length); urb->actual_length);
@ -1079,9 +1084,9 @@ static void btusb_tx_complete(struct urb *urb)
hdev->stat.err_tx++; hdev->stat.err_tx++;
done: done:
spin_lock(&data->txlock); spin_lock_irqsave(&data->txlock, flags);
data->tx_in_flight--; data->tx_in_flight--;
spin_unlock(&data->txlock); spin_unlock_irqrestore(&data->txlock, flags);
kfree(urb->setup_packet); kfree(urb->setup_packet);
@ -1593,13 +1598,13 @@ static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev,
ret = request_firmware(&fw, fwname, &hdev->dev); ret = request_firmware(&fw, fwname, &hdev->dev);
if (ret < 0) { if (ret < 0) {
if (ret == -EINVAL) { if (ret == -EINVAL) {
BT_ERR("%s Intel firmware file request failed (%d)", bt_dev_err(hdev, "Intel firmware file request failed (%d)",
hdev->name, ret); ret);
return NULL; return NULL;
} }
BT_ERR("%s failed to open Intel firmware file: %s(%d)", bt_dev_err(hdev, "failed to open Intel firmware file: %s (%d)",
hdev->name, fwname, ret); fwname, ret);
/* If the correct firmware patch file is not found, use the /* If the correct firmware patch file is not found, use the
* default firmware patch file instead * default firmware patch file instead
@ -1607,8 +1612,8 @@ static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev,
snprintf(fwname, sizeof(fwname), "intel/ibt-hw-%x.%x.bseq", snprintf(fwname, sizeof(fwname), "intel/ibt-hw-%x.%x.bseq",
ver->hw_platform, ver->hw_variant); ver->hw_platform, ver->hw_variant);
if (request_firmware(&fw, fwname, &hdev->dev) < 0) { if (request_firmware(&fw, fwname, &hdev->dev) < 0) {
BT_ERR("%s failed to open default Intel fw file: %s", bt_dev_err(hdev, "failed to open default fw file: %s",
hdev->name, fwname); fwname);
return NULL; return NULL;
} }
} }
@ -1637,7 +1642,7 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
* process. * process.
*/ */
if (remain > HCI_COMMAND_HDR_SIZE && *fw_ptr[0] != 0x01) { if (remain > HCI_COMMAND_HDR_SIZE && *fw_ptr[0] != 0x01) {
BT_ERR("%s Intel fw corrupted: invalid cmd read", hdev->name); bt_dev_err(hdev, "Intel fw corrupted: invalid cmd read");
return -EINVAL; return -EINVAL;
} }
(*fw_ptr)++; (*fw_ptr)++;
@ -1651,7 +1656,7 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
* of command parameter. If not, the firmware file is corrupted. * of command parameter. If not, the firmware file is corrupted.
*/ */
if (remain < cmd->plen) { if (remain < cmd->plen) {
BT_ERR("%s Intel fw corrupted: invalid cmd len", hdev->name); bt_dev_err(hdev, "Intel fw corrupted: invalid cmd len");
return -EFAULT; return -EFAULT;
} }
@ -1684,8 +1689,7 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
remain -= sizeof(*evt); remain -= sizeof(*evt);
if (remain < evt->plen) { if (remain < evt->plen) {
BT_ERR("%s Intel fw corrupted: invalid evt len", bt_dev_err(hdev, "Intel fw corrupted: invalid evt len");
hdev->name);
return -EFAULT; return -EFAULT;
} }
@ -1699,15 +1703,15 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
* file is corrupted. * file is corrupted.
*/ */
if (!evt || !evt_param || remain < 0) { if (!evt || !evt_param || remain < 0) {
BT_ERR("%s Intel fw corrupted: invalid evt read", hdev->name); bt_dev_err(hdev, "Intel fw corrupted: invalid evt read");
return -EFAULT; return -EFAULT;
} }
skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cmd->opcode), cmd->plen, skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cmd->opcode), cmd->plen,
cmd_param, evt->evt, HCI_INIT_TIMEOUT); cmd_param, evt->evt, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) { if (IS_ERR(skb)) {
BT_ERR("%s sending Intel patch command (0x%4.4x) failed (%ld)", bt_dev_err(hdev, "sending Intel patch command (0x%4.4x) failed (%ld)",
hdev->name, cmd->opcode, PTR_ERR(skb)); cmd->opcode, PTR_ERR(skb));
return PTR_ERR(skb); return PTR_ERR(skb);
} }
@ -1716,15 +1720,15 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
* the contents of the event. * the contents of the event.
*/ */
if (skb->len != evt->plen) { if (skb->len != evt->plen) {
BT_ERR("%s mismatch event length (opcode 0x%4.4x)", hdev->name, bt_dev_err(hdev, "mismatch event length (opcode 0x%4.4x)",
le16_to_cpu(cmd->opcode)); le16_to_cpu(cmd->opcode));
kfree_skb(skb); kfree_skb(skb);
return -EFAULT; return -EFAULT;
} }
if (memcmp(skb->data, evt_param, evt->plen)) { if (memcmp(skb->data, evt_param, evt->plen)) {
BT_ERR("%s mismatch event parameter (opcode 0x%4.4x)", bt_dev_err(hdev, "mismatch event parameter (opcode 0x%4.4x)",
hdev->name, le16_to_cpu(cmd->opcode)); le16_to_cpu(cmd->opcode));
kfree_skb(skb); kfree_skb(skb);
return -EFAULT; return -EFAULT;
} }
@ -1753,8 +1757,8 @@ static int btusb_setup_intel(struct hci_dev *hdev)
*/ */
skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) { if (IS_ERR(skb)) {
BT_ERR("%s sending initial HCI reset command failed (%ld)", bt_dev_err(hdev, "sending initial HCI reset command failed (%ld)",
hdev->name, PTR_ERR(skb)); PTR_ERR(skb));
return PTR_ERR(skb); return PTR_ERR(skb);
} }
kfree_skb(skb); kfree_skb(skb);
@ -1890,7 +1894,7 @@ static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
struct hci_event_hdr *hdr; struct hci_event_hdr *hdr;
struct hci_ev_cmd_complete *evt; struct hci_ev_cmd_complete *evt;
skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_ATOMIC); skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL);
if (!skb) if (!skb)
return -ENOMEM; return -ENOMEM;
@ -2084,8 +2088,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* for now only accept this single value. * for now only accept this single value.
*/ */
if (ver.hw_platform != 0x37) { if (ver.hw_platform != 0x37) {
BT_ERR("%s: Unsupported Intel hardware platform (%u)", bt_dev_err(hdev, "Unsupported Intel hardware platform (%u)",
hdev->name, ver.hw_platform); ver.hw_platform);
return -EINVAL; return -EINVAL;
} }
@ -2104,8 +2108,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
case 0x14: /* QnJ, IcP */ case 0x14: /* QnJ, IcP */
break; break;
default: default:
BT_ERR("%s: Unsupported Intel hardware variant (%u)", bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
hdev->name, ver.hw_variant); ver.hw_variant);
return -EINVAL; return -EINVAL;
} }
@ -2134,8 +2138,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* choice is to return an error and abort the device initialization. * choice is to return an error and abort the device initialization.
*/ */
if (ver.fw_variant != 0x06) { if (ver.fw_variant != 0x06) {
BT_ERR("%s: Unsupported Intel firmware variant (%u)", bt_dev_err(hdev, "Unsupported Intel firmware variant (%u)",
hdev->name, ver.fw_variant); ver.fw_variant);
return -ENODEV; return -ENODEV;
} }
@ -2151,8 +2155,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* that this bootloader does not send them, then abort the setup. * that this bootloader does not send them, then abort the setup.
*/ */
if (params.limited_cce != 0x00) { if (params.limited_cce != 0x00) {
BT_ERR("%s: Unsupported Intel firmware loading method (%u)", bt_dev_err(hdev, "Unsupported Intel firmware loading method (%u)",
hdev->name, params.limited_cce); params.limited_cce);
return -EINVAL; return -EINVAL;
} }
@ -2202,14 +2206,13 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
le16_to_cpu(ver.fw_revision)); le16_to_cpu(ver.fw_revision));
break; break;
default: default:
BT_ERR("%s: Unsupported Intel firmware naming", hdev->name); bt_dev_err(hdev, "Unsupported Intel firmware naming");
return -EINVAL; return -EINVAL;
} }
err = request_firmware(&fw, fwname, &hdev->dev); err = request_firmware(&fw, fwname, &hdev->dev);
if (err < 0) { if (err < 0) {
BT_ERR("%s: Failed to load Intel firmware file (%d)", bt_dev_err(hdev, "Failed to load Intel firmware file (%d)", err);
hdev->name, err);
return err; return err;
} }
@ -2235,13 +2238,13 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
le16_to_cpu(ver.fw_revision)); le16_to_cpu(ver.fw_revision));
break; break;
default: default:
BT_ERR("%s: Unsupported Intel firmware naming", hdev->name); bt_dev_err(hdev, "Unsupported Intel firmware naming");
return -EINVAL; return -EINVAL;
} }
if (fw->size < 644) { if (fw->size < 644) {
BT_ERR("%s: Invalid size of firmware file (%zu)", bt_dev_err(hdev, "Invalid size of firmware file (%zu)",
hdev->name, fw->size); fw->size);
err = -EBADF; err = -EBADF;
goto done; goto done;
} }
@ -2272,18 +2275,18 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
TASK_INTERRUPTIBLE, TASK_INTERRUPTIBLE,
msecs_to_jiffies(5000)); msecs_to_jiffies(5000));
if (err == -EINTR) { if (err == -EINTR) {
BT_ERR("%s: Firmware loading interrupted", hdev->name); bt_dev_err(hdev, "Firmware loading interrupted");
goto done; goto done;
} }
if (err) { if (err) {
BT_ERR("%s: Firmware loading timeout", hdev->name); bt_dev_err(hdev, "Firmware loading timeout");
err = -ETIMEDOUT; err = -ETIMEDOUT;
goto done; goto done;
} }
if (test_bit(BTUSB_FIRMWARE_FAILED, &data->flags)) { if (test_bit(BTUSB_FIRMWARE_FAILED, &data->flags)) {
BT_ERR("%s: Firmware loading failed", hdev->name); bt_dev_err(hdev, "Firmware loading failed");
err = -ENOEXEC; err = -ENOEXEC;
goto done; goto done;
} }
@ -2322,12 +2325,12 @@ done:
msecs_to_jiffies(1000)); msecs_to_jiffies(1000));
if (err == -EINTR) { if (err == -EINTR) {
BT_ERR("%s: Device boot interrupted", hdev->name); bt_dev_err(hdev, "Device boot interrupted");
return -EINTR; return -EINTR;
} }
if (err) { if (err) {
BT_ERR("%s: Device boot timeout", hdev->name); bt_dev_err(hdev, "Device boot timeout");
return -ETIMEDOUT; return -ETIMEDOUT;
} }
@ -2364,6 +2367,22 @@ static int btusb_shutdown_intel(struct hci_dev *hdev)
struct sk_buff *skb; struct sk_buff *skb;
long ret; long ret;
/* In the shutdown sequence where Bluetooth is turned off followed
* by WiFi being turned off, turning WiFi back on causes issue with
* the RF calibration.
*
* To ensure that any RF activity has been stopped, issue HCI Reset
* command to clear all ongoing activity including advertising,
* scanning etc.
*/
skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
ret = PTR_ERR(skb);
bt_dev_err(hdev, "HCI reset during shutdown failed");
return ret;
}
kfree_skb(skb);
/* Some platforms have an issue with BT LED when the interface is /* Some platforms have an issue with BT LED when the interface is
* down or BT radio is turned off, which takes 5 seconds to BT LED * down or BT radio is turned off, which takes 5 seconds to BT LED
* goes off. This command turns off the BT LED immediately. * goes off. This command turns off the BT LED immediately.
@ -2371,8 +2390,7 @@ static int btusb_shutdown_intel(struct hci_dev *hdev)
skb = __hci_cmd_sync(hdev, 0xfc3f, 0, NULL, HCI_INIT_TIMEOUT); skb = __hci_cmd_sync(hdev, 0xfc3f, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) { if (IS_ERR(skb)) {
ret = PTR_ERR(skb); ret = PTR_ERR(skb);
BT_ERR("%s: turning off Intel device LED failed (%ld)", bt_dev_err(hdev, "turning off Intel device LED failed");
hdev->name, ret);
return ret; return ret;
} }
kfree_skb(skb); kfree_skb(skb);

View File

@ -21,13 +21,18 @@
* *
*/ */
#include <linux/kernel.h> #include <linux/acpi.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/serdev.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <net/bluetooth/bluetooth.h> #include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h> #include <net/bluetooth/hci_core.h>
#include "btrtl.h"
#include "hci_uart.h" #include "hci_uart.h"
#define HCI_3WIRE_ACK_PKT 0 #define HCI_3WIRE_ACK_PKT 0
@ -65,6 +70,9 @@ enum {
}; };
struct h5 { struct h5 {
/* Must be the first member, hci_serdev.c expects this. */
struct hci_uart serdev_hu;
struct sk_buff_head unack; /* Unack'ed packets queue */ struct sk_buff_head unack; /* Unack'ed packets queue */
struct sk_buff_head rel; /* Reliable packets queue */ struct sk_buff_head rel; /* Reliable packets queue */
struct sk_buff_head unrel; /* Unreliable packets queue */ struct sk_buff_head unrel; /* Unreliable packets queue */
@ -95,6 +103,19 @@ struct h5 {
H5_SLEEPING, H5_SLEEPING,
H5_WAKING_UP, H5_WAKING_UP,
} sleep; } sleep;
const struct h5_vnd *vnd;
const char *id;
struct gpio_desc *enable_gpio;
struct gpio_desc *device_wake_gpio;
};
struct h5_vnd {
int (*setup)(struct h5 *h5);
void (*open)(struct h5 *h5);
void (*close)(struct h5 *h5);
const struct acpi_gpio_mapping *acpi_gpio_map;
}; };
static void h5_reset_rx(struct h5 *h5); static void h5_reset_rx(struct h5 *h5);
@ -193,9 +214,13 @@ static int h5_open(struct hci_uart *hu)
BT_DBG("hu %p", hu); BT_DBG("hu %p", hu);
h5 = kzalloc(sizeof(*h5), GFP_KERNEL); if (hu->serdev) {
if (!h5) h5 = serdev_device_get_drvdata(hu->serdev);
return -ENOMEM; } else {
h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
if (!h5)
return -ENOMEM;
}
hu->priv = h5; hu->priv = h5;
h5->hu = hu; h5->hu = hu;
@ -210,6 +235,9 @@ static int h5_open(struct hci_uart *hu)
h5->tx_win = H5_TX_WIN_MAX; h5->tx_win = H5_TX_WIN_MAX;
if (h5->vnd && h5->vnd->open)
h5->vnd->open(h5);
set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags); set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags);
/* Send initial sync request */ /* Send initial sync request */
@ -229,7 +257,21 @@ static int h5_close(struct hci_uart *hu)
skb_queue_purge(&h5->rel); skb_queue_purge(&h5->rel);
skb_queue_purge(&h5->unrel); skb_queue_purge(&h5->unrel);
kfree(h5); if (h5->vnd && h5->vnd->close)
h5->vnd->close(h5);
if (!hu->serdev)
kfree(h5);
return 0;
}
static int h5_setup(struct hci_uart *hu)
{
struct h5 *h5 = hu->priv;
if (h5->vnd && h5->vnd->setup)
return h5->vnd->setup(h5);
return 0; return 0;
} }
@ -744,18 +786,168 @@ static const struct hci_uart_proto h5p = {
.name = "Three-wire (H5)", .name = "Three-wire (H5)",
.open = h5_open, .open = h5_open,
.close = h5_close, .close = h5_close,
.setup = h5_setup,
.recv = h5_recv, .recv = h5_recv,
.enqueue = h5_enqueue, .enqueue = h5_enqueue,
.dequeue = h5_dequeue, .dequeue = h5_dequeue,
.flush = h5_flush, .flush = h5_flush,
}; };
static int h5_serdev_probe(struct serdev_device *serdev)
{
const struct acpi_device_id *match;
struct device *dev = &serdev->dev;
struct h5 *h5;
h5 = devm_kzalloc(dev, sizeof(*h5), GFP_KERNEL);
if (!h5)
return -ENOMEM;
set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.flags);
h5->hu = &h5->serdev_hu;
h5->serdev_hu.serdev = serdev;
serdev_device_set_drvdata(serdev, h5);
if (has_acpi_companion(dev)) {
match = acpi_match_device(dev->driver->acpi_match_table, dev);
if (!match)
return -ENODEV;
h5->vnd = (const struct h5_vnd *)match->driver_data;
h5->id = (char *)match->id;
if (h5->vnd->acpi_gpio_map)
devm_acpi_dev_add_driver_gpios(dev,
h5->vnd->acpi_gpio_map);
}
h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(h5->enable_gpio))
return PTR_ERR(h5->enable_gpio);
h5->device_wake_gpio = devm_gpiod_get_optional(dev, "device-wake",
GPIOD_OUT_LOW);
if (IS_ERR(h5->device_wake_gpio))
return PTR_ERR(h5->device_wake_gpio);
return hci_uart_register_device(&h5->serdev_hu, &h5p);
}
static void h5_serdev_remove(struct serdev_device *serdev)
{
struct h5 *h5 = serdev_device_get_drvdata(serdev);
hci_uart_unregister_device(&h5->serdev_hu);
}
static int h5_btrtl_setup(struct h5 *h5)
{
struct btrtl_device_info *btrtl_dev;
struct sk_buff *skb;
__le32 baudrate_data;
u32 device_baudrate;
unsigned int controller_baudrate;
bool flow_control;
int err;
btrtl_dev = btrtl_initialize(h5->hu->hdev, h5->id);
if (IS_ERR(btrtl_dev))
return PTR_ERR(btrtl_dev);
err = btrtl_get_uart_settings(h5->hu->hdev, btrtl_dev,
&controller_baudrate, &device_baudrate,
&flow_control);
if (err)
goto out_free;
baudrate_data = cpu_to_le32(device_baudrate);
skb = __hci_cmd_sync(h5->hu->hdev, 0xfc17, sizeof(baudrate_data),
&baudrate_data, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
rtl_dev_err(h5->hu->hdev, "set baud rate command failed\n");
err = PTR_ERR(skb);
goto out_free;
} else {
kfree_skb(skb);
}
/* Give the device some time to set up the new baudrate. */
usleep_range(10000, 20000);
serdev_device_set_baudrate(h5->hu->serdev, controller_baudrate);
serdev_device_set_flow_control(h5->hu->serdev, flow_control);
err = btrtl_download_firmware(h5->hu->hdev, btrtl_dev);
/* Give the device some time before the hci-core sends it a reset */
usleep_range(10000, 20000);
out_free:
btrtl_free(btrtl_dev);
return err;
}
static void h5_btrtl_open(struct h5 *h5)
{
/* Devices always start with these fixed parameters */
serdev_device_set_flow_control(h5->hu->serdev, false);
serdev_device_set_parity(h5->hu->serdev, SERDEV_PARITY_EVEN);
serdev_device_set_baudrate(h5->hu->serdev, 115200);
/* The controller needs up to 500ms to wakeup */
gpiod_set_value_cansleep(h5->enable_gpio, 1);
gpiod_set_value_cansleep(h5->device_wake_gpio, 1);
msleep(500);
}
static void h5_btrtl_close(struct h5 *h5)
{
gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
gpiod_set_value_cansleep(h5->enable_gpio, 0);
}
static const struct acpi_gpio_params btrtl_device_wake_gpios = { 0, 0, false };
static const struct acpi_gpio_params btrtl_enable_gpios = { 1, 0, false };
static const struct acpi_gpio_params btrtl_host_wake_gpios = { 2, 0, false };
static const struct acpi_gpio_mapping acpi_btrtl_gpios[] = {
{ "device-wake-gpios", &btrtl_device_wake_gpios, 1 },
{ "enable-gpios", &btrtl_enable_gpios, 1 },
{ "host-wake-gpios", &btrtl_host_wake_gpios, 1 },
{},
};
static struct h5_vnd rtl_vnd = {
.setup = h5_btrtl_setup,
.open = h5_btrtl_open,
.close = h5_btrtl_close,
.acpi_gpio_map = acpi_btrtl_gpios,
};
#ifdef CONFIG_ACPI
static const struct acpi_device_id h5_acpi_match[] = {
{ "OBDA8723", (kernel_ulong_t)&rtl_vnd },
{ },
};
MODULE_DEVICE_TABLE(acpi, h5_acpi_match);
#endif
static struct serdev_device_driver h5_serdev_driver = {
.probe = h5_serdev_probe,
.remove = h5_serdev_remove,
.driver = {
.name = "hci_uart_h5",
.acpi_match_table = ACPI_PTR(h5_acpi_match),
},
};
int __init h5_init(void) int __init h5_init(void)
{ {
serdev_device_driver_register(&h5_serdev_driver);
return hci_uart_register_proto(&h5p); return hci_uart_register_proto(&h5p);
} }
int __exit h5_deinit(void) int __exit h5_deinit(void)
{ {
serdev_device_driver_unregister(&h5_serdev_driver);
return hci_uart_unregister_proto(&h5p); return hci_uart_unregister_proto(&h5p);
} }

View File

@ -458,7 +458,7 @@ static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
struct hci_event_hdr *hdr; struct hci_event_hdr *hdr;
struct hci_ev_cmd_complete *evt; struct hci_ev_cmd_complete *evt;
skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_ATOMIC); skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL);
if (!skb) if (!skb)
return -ENOMEM; return -ENOMEM;

View File

@ -5,7 +5,7 @@
* protocol extension to H4. * protocol extension to H4.
* *
* Copyright (C) 2007 Texas Instruments, Inc. * Copyright (C) 2007 Texas Instruments, Inc.
* Copyright (c) 2010, 2012 The Linux Foundation. All rights reserved. * Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved.
* *
* Acknowledgements: * Acknowledgements:
* This file is based on hci_ll.c, which was... * This file is based on hci_ll.c, which was...
@ -31,9 +31,14 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h> #include <linux/gpio/consumer.h>
#include <linux/mod_devicetable.h> #include <linux/mod_devicetable.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/serdev.h> #include <linux/serdev.h>
#include <net/bluetooth/bluetooth.h> #include <net/bluetooth/bluetooth.h>
@ -119,12 +124,51 @@ struct qca_data {
u64 votes_off; u64 votes_off;
}; };
enum qca_speed_type {
QCA_INIT_SPEED = 1,
QCA_OPER_SPEED
};
/*
* Voltage regulator information required for configuring the
* QCA Bluetooth chipset
*/
struct qca_vreg {
const char *name;
unsigned int min_uV;
unsigned int max_uV;
unsigned int load_uA;
};
struct qca_vreg_data {
enum qca_btsoc_type soc_type;
struct qca_vreg *vregs;
size_t num_vregs;
};
/*
* Platform data for the QCA Bluetooth power driver.
*/
struct qca_power {
struct device *dev;
const struct qca_vreg_data *vreg_data;
struct regulator_bulk_data *vreg_bulk;
bool vregs_on;
};
struct qca_serdev { struct qca_serdev {
struct hci_uart serdev_hu; struct hci_uart serdev_hu;
struct gpio_desc *bt_en; struct gpio_desc *bt_en;
struct clk *susclk; struct clk *susclk;
enum qca_btsoc_type btsoc_type;
struct qca_power *bt_power;
u32 init_speed;
u32 oper_speed;
}; };
static int qca_power_setup(struct hci_uart *hu, bool on);
static void qca_power_shutdown(struct hci_dev *hdev);
static void __serial_clock_on(struct tty_struct *tty) static void __serial_clock_on(struct tty_struct *tty)
{ {
/* TODO: Some chipset requires to enable UART clock on client /* TODO: Some chipset requires to enable UART clock on client
@ -402,10 +446,11 @@ static int qca_open(struct hci_uart *hu)
{ {
struct qca_serdev *qcadev; struct qca_serdev *qcadev;
struct qca_data *qca; struct qca_data *qca;
int ret;
BT_DBG("hu %p qca_open", hu); BT_DBG("hu %p qca_open", hu);
qca = kzalloc(sizeof(struct qca_data), GFP_ATOMIC); qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL);
if (!qca) if (!qca)
return -ENOMEM; return -ENOMEM;
@ -453,19 +498,32 @@ static int qca_open(struct hci_uart *hu)
hu->priv = qca; hu->priv = qca;
if (hu->serdev) {
serdev_device_open(hu->serdev);
qcadev = serdev_device_get_drvdata(hu->serdev);
if (qcadev->btsoc_type != QCA_WCN3990) {
gpiod_set_value_cansleep(qcadev->bt_en, 1);
} else {
hu->init_speed = qcadev->init_speed;
hu->oper_speed = qcadev->oper_speed;
ret = qca_power_setup(hu, true);
if (ret) {
destroy_workqueue(qca->workqueue);
kfree_skb(qca->rx_skb);
hu->priv = NULL;
kfree(qca);
return ret;
}
}
}
timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0); timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0);
qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS; qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0); timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS; qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;
if (hu->serdev) {
serdev_device_open(hu->serdev);
qcadev = serdev_device_get_drvdata(hu->serdev);
gpiod_set_value_cansleep(qcadev->bt_en, 1);
}
BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u", BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
qca->tx_idle_delay, qca->wake_retrans); qca->tx_idle_delay, qca->wake_retrans);
@ -549,10 +607,13 @@ static int qca_close(struct hci_uart *hu)
qca->hu = NULL; qca->hu = NULL;
if (hu->serdev) { if (hu->serdev) {
serdev_device_close(hu->serdev);
qcadev = serdev_device_get_drvdata(hu->serdev); qcadev = serdev_device_get_drvdata(hu->serdev);
gpiod_set_value_cansleep(qcadev->bt_en, 0); if (qcadev->btsoc_type == QCA_WCN3990)
qca_power_shutdown(hu->hdev);
else
gpiod_set_value_cansleep(qcadev->bt_en, 0);
serdev_device_close(hu->serdev);
} }
kfree_skb(qca->rx_skb); kfree_skb(qca->rx_skb);
@ -872,6 +933,8 @@ static uint8_t qca_get_baudrate_value(int speed)
return QCA_BAUDRATE_2000000; return QCA_BAUDRATE_2000000;
case 3000000: case 3000000:
return QCA_BAUDRATE_3000000; return QCA_BAUDRATE_3000000;
case 3200000:
return QCA_BAUDRATE_3200000;
case 3500000: case 3500000:
return QCA_BAUDRATE_3500000; return QCA_BAUDRATE_3500000;
default: default:
@ -884,19 +947,27 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
struct hci_uart *hu = hci_get_drvdata(hdev); struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv; struct qca_data *qca = hu->priv;
struct sk_buff *skb; struct sk_buff *skb;
struct qca_serdev *qcadev;
u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 }; u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
if (baudrate > QCA_BAUDRATE_3000000) if (baudrate > QCA_BAUDRATE_3200000)
return -EINVAL; return -EINVAL;
cmd[4] = baudrate; cmd[4] = baudrate;
skb = bt_skb_alloc(sizeof(cmd), GFP_ATOMIC); skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
if (!skb) { if (!skb) {
bt_dev_err(hdev, "Failed to allocate baudrate packet"); bt_dev_err(hdev, "Failed to allocate baudrate packet");
return -ENOMEM; return -ENOMEM;
} }
/* Disabling hardware flow control is mandatory while
* sending change baudrate request to wcn3990 SoC.
*/
qcadev = serdev_device_get_drvdata(hu->serdev);
if (qcadev->btsoc_type == QCA_WCN3990)
hci_uart_set_flow_control(hu, true);
/* Assign commands to change baudrate and packet type. */ /* Assign commands to change baudrate and packet type. */
skb_put_data(skb, cmd, sizeof(cmd)); skb_put_data(skb, cmd, sizeof(cmd));
hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
@ -912,6 +983,9 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS)); schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
set_current_state(TASK_RUNNING); set_current_state(TASK_RUNNING);
if (qcadev->btsoc_type == QCA_WCN3990)
hci_uart_set_flow_control(hu, false);
return 0; return 0;
} }
@ -923,50 +997,195 @@ static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed)
hci_uart_set_baudrate(hu, speed); hci_uart_set_baudrate(hu, speed);
} }
static int qca_send_power_pulse(struct hci_dev *hdev, u8 cmd)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv;
struct sk_buff *skb;
/* These power pulses are single byte command which are sent
* at required baudrate to wcn3990. On wcn3990, we have an external
* circuit at Tx pin which decodes the pulse sent at specific baudrate.
* For example, wcn3990 supports RF COEX antenna for both Wi-Fi/BT
* and also we use the same power inputs to turn on and off for
* Wi-Fi/BT. Powering up the power sources will not enable BT, until
* we send a power on pulse at 115200 bps. This algorithm will help to
* save power. Disabling hardware flow control is mandatory while
* sending power pulses to SoC.
*/
bt_dev_dbg(hdev, "sending power pulse %02x to SoC", cmd);
skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
if (!skb)
return -ENOMEM;
hci_uart_set_flow_control(hu, true);
skb_put_u8(skb, cmd);
hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
skb_queue_tail(&qca->txq, skb);
hci_uart_tx_wakeup(hu);
/* Wait for 100 uS for SoC to settle down */
usleep_range(100, 200);
hci_uart_set_flow_control(hu, false);
return 0;
}
static unsigned int qca_get_speed(struct hci_uart *hu,
enum qca_speed_type speed_type)
{
unsigned int speed = 0;
if (speed_type == QCA_INIT_SPEED) {
if (hu->init_speed)
speed = hu->init_speed;
else if (hu->proto->init_speed)
speed = hu->proto->init_speed;
} else {
if (hu->oper_speed)
speed = hu->oper_speed;
else if (hu->proto->oper_speed)
speed = hu->proto->oper_speed;
}
return speed;
}
static int qca_check_speeds(struct hci_uart *hu)
{
struct qca_serdev *qcadev;
qcadev = serdev_device_get_drvdata(hu->serdev);
if (qcadev->btsoc_type == QCA_WCN3990) {
if (!qca_get_speed(hu, QCA_INIT_SPEED) &&
!qca_get_speed(hu, QCA_OPER_SPEED))
return -EINVAL;
} else {
if (!qca_get_speed(hu, QCA_INIT_SPEED) ||
!qca_get_speed(hu, QCA_OPER_SPEED))
return -EINVAL;
}
return 0;
}
static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
{
unsigned int speed, qca_baudrate;
int ret;
if (speed_type == QCA_INIT_SPEED) {
speed = qca_get_speed(hu, QCA_INIT_SPEED);
if (speed)
host_set_baudrate(hu, speed);
} else {
speed = qca_get_speed(hu, QCA_OPER_SPEED);
if (!speed)
return 0;
qca_baudrate = qca_get_baudrate_value(speed);
bt_dev_dbg(hu->hdev, "Set UART speed to %d", speed);
ret = qca_set_baudrate(hu->hdev, qca_baudrate);
if (ret)
return ret;
host_set_baudrate(hu, speed);
}
return 0;
}
static int qca_wcn3990_init(struct hci_uart *hu)
{
struct hci_dev *hdev = hu->hdev;
int ret;
/* Forcefully enable wcn3990 to enter in to boot mode. */
host_set_baudrate(hu, 2400);
ret = qca_send_power_pulse(hdev, QCA_WCN3990_POWEROFF_PULSE);
if (ret)
return ret;
qca_set_speed(hu, QCA_INIT_SPEED);
ret = qca_send_power_pulse(hdev, QCA_WCN3990_POWERON_PULSE);
if (ret)
return ret;
/* Wait for 100 ms for SoC to boot */
msleep(100);
/* Now the device is in ready state to communicate with host.
* To sync host with device we need to reopen port.
* Without this, we will have RTS and CTS synchronization
* issues.
*/
serdev_device_close(hu->serdev);
ret = serdev_device_open(hu->serdev);
if (ret) {
bt_dev_err(hu->hdev, "failed to open port");
return ret;
}
hci_uart_set_flow_control(hu, false);
return 0;
}
static int qca_setup(struct hci_uart *hu) static int qca_setup(struct hci_uart *hu)
{ {
struct hci_dev *hdev = hu->hdev; struct hci_dev *hdev = hu->hdev;
struct qca_data *qca = hu->priv; struct qca_data *qca = hu->priv;
unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200; unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
struct qca_serdev *qcadev;
int ret; int ret;
int soc_ver = 0;
bt_dev_info(hdev, "ROME setup"); qcadev = serdev_device_get_drvdata(hu->serdev);
ret = qca_check_speeds(hu);
if (ret)
return ret;
/* Patch downloading has to be done without IBS mode */ /* Patch downloading has to be done without IBS mode */
clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags); clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
/* Setup initial baudrate */ if (qcadev->btsoc_type == QCA_WCN3990) {
speed = 0; bt_dev_info(hdev, "setting up wcn3990");
if (hu->init_speed) ret = qca_wcn3990_init(hu);
speed = hu->init_speed; if (ret)
else if (hu->proto->init_speed)
speed = hu->proto->init_speed;
if (speed)
host_set_baudrate(hu, speed);
/* Setup user speed if needed */
speed = 0;
if (hu->oper_speed)
speed = hu->oper_speed;
else if (hu->proto->oper_speed)
speed = hu->proto->oper_speed;
if (speed) {
qca_baudrate = qca_get_baudrate_value(speed);
bt_dev_info(hdev, "Set UART speed to %d", speed);
ret = qca_set_baudrate(hdev, qca_baudrate);
if (ret) {
bt_dev_err(hdev, "Failed to change the baud rate (%d)",
ret);
return ret; return ret;
}
host_set_baudrate(hu, speed); ret = qca_read_soc_version(hdev, &soc_ver);
if (ret)
return ret;
} else {
bt_dev_info(hdev, "ROME setup");
qca_set_speed(hu, QCA_INIT_SPEED);
} }
/* Setup user speed if needed */
speed = qca_get_speed(hu, QCA_OPER_SPEED);
if (speed) {
ret = qca_set_speed(hu, QCA_OPER_SPEED);
if (ret)
return ret;
qca_baudrate = qca_get_baudrate_value(speed);
}
if (qcadev->btsoc_type != QCA_WCN3990) {
/* Get QCA version information */
ret = qca_read_soc_version(hdev, &soc_ver);
if (ret)
return ret;
}
bt_dev_info(hdev, "QCA controller version 0x%08x", soc_ver);
/* Setup patch / NVM configurations */ /* Setup patch / NVM configurations */
ret = qca_uart_setup_rome(hdev, qca_baudrate); ret = qca_uart_setup(hdev, qca_baudrate, qcadev->btsoc_type, soc_ver);
if (!ret) { if (!ret) {
set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags); set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
qca_debugfs_init(hdev); qca_debugfs_init(hdev);
@ -1002,9 +1221,123 @@ static struct hci_uart_proto qca_proto = {
.dequeue = qca_dequeue, .dequeue = qca_dequeue,
}; };
static const struct qca_vreg_data qca_soc_data = {
.soc_type = QCA_WCN3990,
.vregs = (struct qca_vreg []) {
{ "vddio", 1800000, 1900000, 15000 },
{ "vddxo", 1800000, 1900000, 80000 },
{ "vddrf", 1300000, 1350000, 300000 },
{ "vddch0", 3300000, 3400000, 450000 },
},
.num_vregs = 4,
};
static void qca_power_shutdown(struct hci_dev *hdev)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
host_set_baudrate(hu, 2400);
qca_send_power_pulse(hdev, QCA_WCN3990_POWEROFF_PULSE);
qca_power_setup(hu, false);
}
static int qca_enable_regulator(struct qca_vreg vregs,
struct regulator *regulator)
{
int ret;
ret = regulator_set_voltage(regulator, vregs.min_uV,
vregs.max_uV);
if (ret)
return ret;
if (vregs.load_uA)
ret = regulator_set_load(regulator,
vregs.load_uA);
if (ret)
return ret;
return regulator_enable(regulator);
}
static void qca_disable_regulator(struct qca_vreg vregs,
struct regulator *regulator)
{
regulator_disable(regulator);
regulator_set_voltage(regulator, 0, vregs.max_uV);
if (vregs.load_uA)
regulator_set_load(regulator, 0);
}
static int qca_power_setup(struct hci_uart *hu, bool on)
{
struct qca_vreg *vregs;
struct regulator_bulk_data *vreg_bulk;
struct qca_serdev *qcadev;
int i, num_vregs, ret = 0;
qcadev = serdev_device_get_drvdata(hu->serdev);
if (!qcadev || !qcadev->bt_power || !qcadev->bt_power->vreg_data ||
!qcadev->bt_power->vreg_bulk)
return -EINVAL;
vregs = qcadev->bt_power->vreg_data->vregs;
vreg_bulk = qcadev->bt_power->vreg_bulk;
num_vregs = qcadev->bt_power->vreg_data->num_vregs;
BT_DBG("on: %d", on);
if (on && !qcadev->bt_power->vregs_on) {
for (i = 0; i < num_vregs; i++) {
ret = qca_enable_regulator(vregs[i],
vreg_bulk[i].consumer);
if (ret)
break;
}
if (ret) {
BT_ERR("failed to enable regulator:%s", vregs[i].name);
/* turn off regulators which are enabled */
for (i = i - 1; i >= 0; i--)
qca_disable_regulator(vregs[i],
vreg_bulk[i].consumer);
} else {
qcadev->bt_power->vregs_on = true;
}
} else if (!on && qcadev->bt_power->vregs_on) {
/* turn off regulator in reverse order */
i = qcadev->bt_power->vreg_data->num_vregs - 1;
for ( ; i >= 0; i--)
qca_disable_regulator(vregs[i], vreg_bulk[i].consumer);
qcadev->bt_power->vregs_on = false;
}
return ret;
}
static int qca_init_regulators(struct qca_power *qca,
const struct qca_vreg *vregs, size_t num_vregs)
{
int i;
qca->vreg_bulk = devm_kzalloc(qca->dev, num_vregs *
sizeof(struct regulator_bulk_data),
GFP_KERNEL);
if (!qca->vreg_bulk)
return -ENOMEM;
for (i = 0; i < num_vregs; i++)
qca->vreg_bulk[i].supply = vregs[i].name;
return devm_regulator_bulk_get(qca->dev, num_vregs, qca->vreg_bulk);
}
static int qca_serdev_probe(struct serdev_device *serdev) static int qca_serdev_probe(struct serdev_device *serdev)
{ {
struct qca_serdev *qcadev; struct qca_serdev *qcadev;
const struct qca_vreg_data *data;
int err; int err;
qcadev = devm_kzalloc(&serdev->dev, sizeof(*qcadev), GFP_KERNEL); qcadev = devm_kzalloc(&serdev->dev, sizeof(*qcadev), GFP_KERNEL);
@ -1012,47 +1345,84 @@ static int qca_serdev_probe(struct serdev_device *serdev)
return -ENOMEM; return -ENOMEM;
qcadev->serdev_hu.serdev = serdev; qcadev->serdev_hu.serdev = serdev;
data = of_device_get_match_data(&serdev->dev);
serdev_device_set_drvdata(serdev, qcadev); serdev_device_set_drvdata(serdev, qcadev);
if (data && data->soc_type == QCA_WCN3990) {
qcadev->btsoc_type = QCA_WCN3990;
qcadev->bt_power = devm_kzalloc(&serdev->dev,
sizeof(struct qca_power),
GFP_KERNEL);
if (!qcadev->bt_power)
return -ENOMEM;
qcadev->bt_en = devm_gpiod_get(&serdev->dev, "enable", qcadev->bt_power->dev = &serdev->dev;
GPIOD_OUT_LOW); qcadev->bt_power->vreg_data = data;
if (IS_ERR(qcadev->bt_en)) { err = qca_init_regulators(qcadev->bt_power, data->vregs,
dev_err(&serdev->dev, "failed to acquire enable gpio\n"); data->num_vregs);
return PTR_ERR(qcadev->bt_en); if (err) {
BT_ERR("Failed to init regulators:%d", err);
goto out;
}
qcadev->bt_power->vregs_on = false;
device_property_read_u32(&serdev->dev, "max-speed",
&qcadev->oper_speed);
if (!qcadev->oper_speed)
BT_DBG("UART will pick default operating speed");
err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
if (err) {
BT_ERR("wcn3990 serdev registration failed");
goto out;
}
} else {
qcadev->btsoc_type = QCA_ROME;
qcadev->bt_en = devm_gpiod_get(&serdev->dev, "enable",
GPIOD_OUT_LOW);
if (IS_ERR(qcadev->bt_en)) {
dev_err(&serdev->dev, "failed to acquire enable gpio\n");
return PTR_ERR(qcadev->bt_en);
}
qcadev->susclk = devm_clk_get(&serdev->dev, NULL);
if (IS_ERR(qcadev->susclk)) {
dev_err(&serdev->dev, "failed to acquire clk\n");
return PTR_ERR(qcadev->susclk);
}
err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
if (err)
return err;
err = clk_prepare_enable(qcadev->susclk);
if (err)
return err;
err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
if (err)
clk_disable_unprepare(qcadev->susclk);
} }
qcadev->susclk = devm_clk_get(&serdev->dev, NULL); out: return err;
if (IS_ERR(qcadev->susclk)) {
dev_err(&serdev->dev, "failed to acquire clk\n");
return PTR_ERR(qcadev->susclk);
}
err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
if (err)
return err;
err = clk_prepare_enable(qcadev->susclk);
if (err)
return err;
err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
if (err)
clk_disable_unprepare(qcadev->susclk);
return err;
} }
static void qca_serdev_remove(struct serdev_device *serdev) static void qca_serdev_remove(struct serdev_device *serdev)
{ {
struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev); struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
hci_uart_unregister_device(&qcadev->serdev_hu); if (qcadev->btsoc_type == QCA_WCN3990)
qca_power_shutdown(qcadev->serdev_hu.hdev);
else
clk_disable_unprepare(qcadev->susclk);
clk_disable_unprepare(qcadev->susclk); hci_uart_unregister_device(&qcadev->serdev_hu);
} }
static const struct of_device_id qca_bluetooth_of_match[] = { static const struct of_device_id qca_bluetooth_of_match[] = {
{ .compatible = "qcom,qca6174-bt" }, { .compatible = "qcom,qca6174-bt" },
{ .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data},
{ /* sentinel */ } { /* sentinel */ }
}; };
MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match); MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match);

View File

@ -1,38 +0,0 @@
/*
* This file is part of Nokia H4P bluetooth driver
*
* Copyright (C) 2010 Nokia Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
*/
/**
* struct hci_h4p_platform data - hci_h4p Platform data structure
*/
struct hci_h4p_platform_data {
int chip_type;
int bt_sysclk;
unsigned int bt_wakeup_gpio;
unsigned int host_wakeup_gpio;
unsigned int reset_gpio;
int reset_gpio_shared;
unsigned int uart_irq;
phys_addr_t uart_base;
const char *uart_iclk;
const char *uart_fclk;
void (*set_pm_limits)(struct device *dev, bool set);
};

View File

@ -183,6 +183,15 @@ enum {
* during the hdev->setup vendor callback. * during the hdev->setup vendor callback.
*/ */
HCI_QUIRK_NON_PERSISTENT_DIAG, HCI_QUIRK_NON_PERSISTENT_DIAG,
/* When this quirk is set, setup() would be run after every
* open() and not just after the first open().
*
* This quirk can be set before hci_register_dev is called or
* during the hdev->setup vendor callback.
*
*/
HCI_QUIRK_NON_PERSISTENT_SETUP,
}; };
/* HCI device flags */ /* HCI device flags */
@ -291,6 +300,14 @@ enum {
#define HCI_DH3 0x0800 #define HCI_DH3 0x0800
#define HCI_DH5 0x8000 #define HCI_DH5 0x8000
/* HCI packet types inverted masks */
#define HCI_2DH1 0x0002
#define HCI_3DH1 0x0004
#define HCI_2DH3 0x0100
#define HCI_3DH3 0x0200
#define HCI_2DH5 0x1000
#define HCI_3DH5 0x2000
#define HCI_HV1 0x0020 #define HCI_HV1 0x0020
#define HCI_HV2 0x0040 #define HCI_HV2 0x0040
#define HCI_HV3 0x0080 #define HCI_HV3 0x0080
@ -354,6 +371,8 @@ enum {
#define LMP_PCONTROL 0x04 #define LMP_PCONTROL 0x04
#define LMP_TRANSPARENT 0x08 #define LMP_TRANSPARENT 0x08
#define LMP_EDR_2M 0x02
#define LMP_EDR_3M 0x04
#define LMP_RSSI_INQ 0x40 #define LMP_RSSI_INQ 0x40
#define LMP_ESCO 0x80 #define LMP_ESCO 0x80
@ -361,7 +380,9 @@ enum {
#define LMP_EV5 0x02 #define LMP_EV5 0x02
#define LMP_NO_BREDR 0x20 #define LMP_NO_BREDR 0x20
#define LMP_LE 0x40 #define LMP_LE 0x40
#define LMP_EDR_3SLOT 0x80
#define LMP_EDR_5SLOT 0x01
#define LMP_SNIFF_SUBR 0x02 #define LMP_SNIFF_SUBR 0x02
#define LMP_PAUSE_ENC 0x04 #define LMP_PAUSE_ENC 0x04
#define LMP_EDR_ESCO_2M 0x20 #define LMP_EDR_ESCO_2M 0x20
@ -398,7 +419,12 @@ enum {
#define HCI_LE_SLAVE_FEATURES 0x08 #define HCI_LE_SLAVE_FEATURES 0x08
#define HCI_LE_PING 0x10 #define HCI_LE_PING 0x10
#define HCI_LE_DATA_LEN_EXT 0x20 #define HCI_LE_DATA_LEN_EXT 0x20
#define HCI_LE_PHY_2M 0x01
#define HCI_LE_PHY_CODED 0x08
#define HCI_LE_EXT_ADV 0x10
#define HCI_LE_EXT_SCAN_POLICY 0x80 #define HCI_LE_EXT_SCAN_POLICY 0x80
#define HCI_LE_PHY_2M 0x01
#define HCI_LE_PHY_CODED 0x08
#define HCI_LE_CHAN_SEL_ALG2 0x40 #define HCI_LE_CHAN_SEL_ALG2 0x40
/* Connection modes */ /* Connection modes */
@ -1490,6 +1516,14 @@ struct hci_cp_le_write_def_data_len {
__le16 tx_time; __le16 tx_time;
} __packed; } __packed;
#define HCI_OP_LE_CLEAR_RESOLV_LIST 0x2029
#define HCI_OP_LE_READ_RESOLV_LIST_SIZE 0x202a
struct hci_rp_le_read_resolv_list_size {
__u8 status;
__u8 size;
} __packed;
#define HCI_OP_LE_READ_MAX_DATA_LEN 0x202f #define HCI_OP_LE_READ_MAX_DATA_LEN 0x202f
struct hci_rp_le_read_max_data_len { struct hci_rp_le_read_max_data_len {
__u8 status; __u8 status;
@ -1506,6 +1540,134 @@ struct hci_cp_le_set_default_phy {
__u8 rx_phys; __u8 rx_phys;
} __packed; } __packed;
#define HCI_LE_SET_PHY_1M 0x01
#define HCI_LE_SET_PHY_2M 0x02
#define HCI_LE_SET_PHY_CODED 0x04
#define HCI_OP_LE_SET_EXT_SCAN_PARAMS 0x2041
struct hci_cp_le_set_ext_scan_params {
__u8 own_addr_type;
__u8 filter_policy;
__u8 scanning_phys;
__u8 data[0];
} __packed;
#define LE_SCAN_PHY_1M 0x01
#define LE_SCAN_PHY_2M 0x02
#define LE_SCAN_PHY_CODED 0x04
struct hci_cp_le_scan_phy_params {
__u8 type;
__le16 interval;
__le16 window;
} __packed;
#define HCI_OP_LE_SET_EXT_SCAN_ENABLE 0x2042
struct hci_cp_le_set_ext_scan_enable {
__u8 enable;
__u8 filter_dup;
__le16 duration;
__le16 period;
} __packed;
#define HCI_OP_LE_EXT_CREATE_CONN 0x2043
struct hci_cp_le_ext_create_conn {
__u8 filter_policy;
__u8 own_addr_type;
__u8 peer_addr_type;
bdaddr_t peer_addr;
__u8 phys;
__u8 data[0];
} __packed;
struct hci_cp_le_ext_conn_param {
__le16 scan_interval;
__le16 scan_window;
__le16 conn_interval_min;
__le16 conn_interval_max;
__le16 conn_latency;
__le16 supervision_timeout;
__le16 min_ce_len;
__le16 max_ce_len;
} __packed;
#define HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS 0x203b
struct hci_rp_le_read_num_supported_adv_sets {
__u8 status;
__u8 num_of_sets;
} __packed;
#define HCI_OP_LE_SET_EXT_ADV_PARAMS 0x2036
struct hci_cp_le_set_ext_adv_params {
__u8 handle;
__le16 evt_properties;
__u8 min_interval[3];
__u8 max_interval[3];
__u8 channel_map;
__u8 own_addr_type;
__u8 peer_addr_type;
bdaddr_t peer_addr;
__u8 filter_policy;
__u8 tx_power;
__u8 primary_phy;
__u8 secondary_max_skip;
__u8 secondary_phy;
__u8 sid;
__u8 notif_enable;
} __packed;
#define HCI_ADV_PHY_1M 0X01
#define HCI_ADV_PHY_2M 0x02
#define HCI_ADV_PHY_CODED 0x03
struct hci_rp_le_set_ext_adv_params {
__u8 status;
__u8 tx_power;
} __packed;
#define HCI_OP_LE_SET_EXT_ADV_ENABLE 0x2039
struct hci_cp_le_set_ext_adv_enable {
__u8 enable;
__u8 num_of_sets;
__u8 data[0];
} __packed;
struct hci_cp_ext_adv_set {
__u8 handle;
__le16 duration;
__u8 max_events;
} __packed;
#define HCI_OP_LE_SET_EXT_ADV_DATA 0x2037
struct hci_cp_le_set_ext_adv_data {
__u8 handle;
__u8 operation;
__u8 frag_pref;
__u8 length;
__u8 data[HCI_MAX_AD_LENGTH];
} __packed;
#define HCI_OP_LE_SET_EXT_SCAN_RSP_DATA 0x2038
struct hci_cp_le_set_ext_scan_rsp_data {
__u8 handle;
__u8 operation;
__u8 frag_pref;
__u8 length;
__u8 data[HCI_MAX_AD_LENGTH];
} __packed;
#define LE_SET_ADV_DATA_OP_COMPLETE 0x03
#define LE_SET_ADV_DATA_NO_FRAG 0x01
#define HCI_OP_LE_CLEAR_ADV_SETS 0x203d
#define HCI_OP_LE_SET_ADV_SET_RAND_ADDR 0x2035
struct hci_cp_le_set_adv_set_rand_addr {
__u8 handle;
bdaddr_t bdaddr;
} __packed;
/* ---- HCI Events ---- */ /* ---- HCI Events ---- */
#define HCI_EV_INQUIRY_COMPLETE 0x01 #define HCI_EV_INQUIRY_COMPLETE 0x01
@ -1893,6 +2055,23 @@ struct hci_ev_le_conn_complete {
#define LE_ADV_SCAN_IND 0x02 #define LE_ADV_SCAN_IND 0x02
#define LE_ADV_NONCONN_IND 0x03 #define LE_ADV_NONCONN_IND 0x03
#define LE_ADV_SCAN_RSP 0x04 #define LE_ADV_SCAN_RSP 0x04
#define LE_ADV_INVALID 0x05
/* Legacy event types in extended adv report */
#define LE_LEGACY_ADV_IND 0x0013
#define LE_LEGACY_ADV_DIRECT_IND 0x0015
#define LE_LEGACY_ADV_SCAN_IND 0x0012
#define LE_LEGACY_NONCONN_IND 0x0010
#define LE_LEGACY_SCAN_RSP_ADV 0x001b
#define LE_LEGACY_SCAN_RSP_ADV_SCAN 0x001a
/* Extended Advertising event types */
#define LE_EXT_ADV_NON_CONN_IND 0x0000
#define LE_EXT_ADV_CONN_IND 0x0001
#define LE_EXT_ADV_SCAN_IND 0x0002
#define LE_EXT_ADV_DIRECT_IND 0x0004
#define LE_EXT_ADV_SCAN_RSP 0x0008
#define LE_EXT_ADV_LEGACY_PDU 0x0010
#define ADDR_LE_DEV_PUBLIC 0x00 #define ADDR_LE_DEV_PUBLIC 0x00
#define ADDR_LE_DEV_RANDOM 0x01 #define ADDR_LE_DEV_RANDOM 0x01
@ -1957,6 +2136,46 @@ struct hci_ev_le_direct_adv_info {
__s8 rssi; __s8 rssi;
} __packed; } __packed;
#define HCI_EV_LE_EXT_ADV_REPORT 0x0d
struct hci_ev_le_ext_adv_report {
__le16 evt_type;
__u8 bdaddr_type;
bdaddr_t bdaddr;
__u8 primary_phy;
__u8 secondary_phy;
__u8 sid;
__u8 tx_power;
__s8 rssi;
__le16 interval;
__u8 direct_addr_type;
bdaddr_t direct_addr;
__u8 length;
__u8 data[0];
} __packed;
#define HCI_EV_LE_ENHANCED_CONN_COMPLETE 0x0a
struct hci_ev_le_enh_conn_complete {
__u8 status;
__le16 handle;
__u8 role;
__u8 bdaddr_type;
bdaddr_t bdaddr;
bdaddr_t local_rpa;
bdaddr_t peer_rpa;
__le16 interval;
__le16 latency;
__le16 supervision_timeout;
__u8 clk_accurancy;
} __packed;
#define HCI_EV_LE_EXT_ADV_SET_TERM 0x12
struct hci_evt_le_ext_adv_set_term {
__u8 status;
__u8 handle;
__le16 conn_handle;
__u8 num_evts;
} __packed;
/* Internal events generated by Bluetooth stack */ /* Internal events generated by Bluetooth stack */
#define HCI_EV_STACK_INTERNAL 0xfd #define HCI_EV_STACK_INTERNAL 0xfd
struct hci_ev_stack_internal { struct hci_ev_stack_internal {

View File

@ -171,6 +171,10 @@ struct adv_info {
__u8 adv_data[HCI_MAX_AD_LENGTH]; __u8 adv_data[HCI_MAX_AD_LENGTH];
__u16 scan_rsp_len; __u16 scan_rsp_len;
__u8 scan_rsp_data[HCI_MAX_AD_LENGTH]; __u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
__s8 tx_power;
bdaddr_t random_addr;
bool rpa_expired;
struct delayed_work rpa_expired_cb;
}; };
#define HCI_MAX_ADV_INSTANCES 5 #define HCI_MAX_ADV_INSTANCES 5
@ -221,6 +225,8 @@ struct hci_dev {
__u8 features[HCI_MAX_PAGES][8]; __u8 features[HCI_MAX_PAGES][8];
__u8 le_features[8]; __u8 le_features[8];
__u8 le_white_list_size; __u8 le_white_list_size;
__u8 le_resolv_list_size;
__u8 le_num_of_adv_sets;
__u8 le_states[8]; __u8 le_states[8];
__u8 commands[64]; __u8 commands[64];
__u8 hci_ver; __u8 hci_ver;
@ -314,6 +320,9 @@ struct hci_dev {
unsigned long sco_last_tx; unsigned long sco_last_tx;
unsigned long le_last_tx; unsigned long le_last_tx;
__u8 le_tx_def_phys;
__u8 le_rx_def_phys;
struct workqueue_struct *workqueue; struct workqueue_struct *workqueue;
struct workqueue_struct *req_workqueue; struct workqueue_struct *req_workqueue;
@ -367,6 +376,7 @@ struct hci_dev {
struct list_head identity_resolving_keys; struct list_head identity_resolving_keys;
struct list_head remote_oob_data; struct list_head remote_oob_data;
struct list_head le_white_list; struct list_head le_white_list;
struct list_head le_resolv_list;
struct list_head le_conn_params; struct list_head le_conn_params;
struct list_head pend_le_conns; struct list_head pend_le_conns;
struct list_head pend_le_reports; struct list_head pend_le_reports;
@ -1106,6 +1116,7 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
u16 scan_rsp_len, u8 *scan_rsp_data, u16 scan_rsp_len, u8 *scan_rsp_data,
u16 timeout, u16 duration); u16 timeout, u16 duration);
int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance); int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance);
void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired);
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
@ -1136,6 +1147,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR) #define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR)
#define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES) #define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES)
#define lmp_transp_capable(dev) ((dev)->features[0][2] & LMP_TRANSPARENT) #define lmp_transp_capable(dev) ((dev)->features[0][2] & LMP_TRANSPARENT)
#define lmp_edr_2m_capable(dev) ((dev)->features[0][3] & LMP_EDR_2M)
#define lmp_edr_3m_capable(dev) ((dev)->features[0][3] & LMP_EDR_3M)
#define lmp_edr_3slot_capable(dev) ((dev)->features[0][4] & LMP_EDR_3SLOT)
#define lmp_edr_5slot_capable(dev) ((dev)->features[0][5] & LMP_EDR_5SLOT)
/* ----- Extended LMP capabilities ----- */ /* ----- Extended LMP capabilities ----- */
#define lmp_csb_master_capable(dev) ((dev)->features[2][0] & LMP_CSB_MASTER) #define lmp_csb_master_capable(dev) ((dev)->features[2][0] & LMP_CSB_MASTER)
@ -1156,6 +1171,24 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \ #define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \
hci_dev_test_flag(dev, HCI_SC_ENABLED)) hci_dev_test_flag(dev, HCI_SC_ENABLED))
#define scan_1m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_1M) || \
((dev)->le_rx_def_phys & HCI_LE_SET_PHY_1M))
#define scan_2m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_2M) || \
((dev)->le_rx_def_phys & HCI_LE_SET_PHY_2M))
#define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \
((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED))
/* Use ext scanning if set ext scan param and ext scan enable is supported */
#define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \
((dev)->commands[37] & 0x40))
/* Use ext create connection if command is supported */
#define use_ext_conn(dev) ((dev)->commands[37] & 0x80)
/* Extended advertising support */
#define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV))
/* ----- HCI protocols ----- */ /* ----- HCI protocols ----- */
#define HCI_PROTO_DEFER 0x01 #define HCI_PROTO_DEFER 0x01
@ -1529,6 +1562,7 @@ void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev,
u8 instance); u8 instance);
void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev, void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
u8 instance); u8 instance);
int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip);
u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency, u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
u16 to_multiplier); u16 to_multiplier);

View File

@ -101,6 +101,7 @@ struct mgmt_rp_read_index_list {
#define MGMT_SETTING_PRIVACY 0x00002000 #define MGMT_SETTING_PRIVACY 0x00002000
#define MGMT_SETTING_CONFIGURATION 0x00004000 #define MGMT_SETTING_CONFIGURATION 0x00004000
#define MGMT_SETTING_STATIC_ADDRESS 0x00008000 #define MGMT_SETTING_STATIC_ADDRESS 0x00008000
#define MGMT_SETTING_PHY_CONFIGURATION 0x00010000
#define MGMT_OP_READ_INFO 0x0004 #define MGMT_OP_READ_INFO 0x0004
#define MGMT_READ_INFO_SIZE 0 #define MGMT_READ_INFO_SIZE 0
@ -561,6 +562,12 @@ struct mgmt_rp_add_advertising {
#define MGMT_ADV_FLAG_TX_POWER BIT(4) #define MGMT_ADV_FLAG_TX_POWER BIT(4)
#define MGMT_ADV_FLAG_APPEARANCE BIT(5) #define MGMT_ADV_FLAG_APPEARANCE BIT(5)
#define MGMT_ADV_FLAG_LOCAL_NAME BIT(6) #define MGMT_ADV_FLAG_LOCAL_NAME BIT(6)
#define MGMT_ADV_FLAG_SEC_1M BIT(7)
#define MGMT_ADV_FLAG_SEC_2M BIT(8)
#define MGMT_ADV_FLAG_SEC_CODED BIT(9)
#define MGMT_ADV_FLAG_SEC_MASK (MGMT_ADV_FLAG_SEC_1M | MGMT_ADV_FLAG_SEC_2M | \
MGMT_ADV_FLAG_SEC_CODED)
#define MGMT_OP_REMOVE_ADVERTISING 0x003F #define MGMT_OP_REMOVE_ADVERTISING 0x003F
struct mgmt_cp_remove_advertising { struct mgmt_cp_remove_advertising {
@ -604,6 +611,49 @@ struct mgmt_cp_set_appearance {
} __packed; } __packed;
#define MGMT_SET_APPEARANCE_SIZE 2 #define MGMT_SET_APPEARANCE_SIZE 2
#define MGMT_OP_GET_PHY_CONFIGURATION 0x0044
struct mgmt_rp_get_phy_confguration {
__le32 supported_phys;
__le32 configurable_phys;
__le32 selected_phys;
} __packed;
#define MGMT_GET_PHY_CONFIGURATION_SIZE 0
#define MGMT_PHY_BR_1M_1SLOT 0x00000001
#define MGMT_PHY_BR_1M_3SLOT 0x00000002
#define MGMT_PHY_BR_1M_5SLOT 0x00000004
#define MGMT_PHY_EDR_2M_1SLOT 0x00000008
#define MGMT_PHY_EDR_2M_3SLOT 0x00000010
#define MGMT_PHY_EDR_2M_5SLOT 0x00000020
#define MGMT_PHY_EDR_3M_1SLOT 0x00000040
#define MGMT_PHY_EDR_3M_3SLOT 0x00000080
#define MGMT_PHY_EDR_3M_5SLOT 0x00000100
#define MGMT_PHY_LE_1M_TX 0x00000200
#define MGMT_PHY_LE_1M_RX 0x00000400
#define MGMT_PHY_LE_2M_TX 0x00000800
#define MGMT_PHY_LE_2M_RX 0x00001000
#define MGMT_PHY_LE_CODED_TX 0x00002000
#define MGMT_PHY_LE_CODED_RX 0x00004000
#define MGMT_PHY_BREDR_MASK (MGMT_PHY_BR_1M_1SLOT | MGMT_PHY_BR_1M_3SLOT | \
MGMT_PHY_BR_1M_5SLOT | MGMT_PHY_EDR_2M_1SLOT | \
MGMT_PHY_EDR_2M_3SLOT | MGMT_PHY_EDR_2M_5SLOT | \
MGMT_PHY_EDR_3M_1SLOT | MGMT_PHY_EDR_3M_3SLOT | \
MGMT_PHY_EDR_3M_5SLOT)
#define MGMT_PHY_LE_MASK (MGMT_PHY_LE_1M_TX | MGMT_PHY_LE_1M_RX | \
MGMT_PHY_LE_2M_TX | MGMT_PHY_LE_2M_RX | \
MGMT_PHY_LE_CODED_TX | MGMT_PHY_LE_CODED_RX)
#define MGMT_PHY_LE_TX_MASK (MGMT_PHY_LE_1M_TX | MGMT_PHY_LE_2M_TX | \
MGMT_PHY_LE_CODED_TX)
#define MGMT_PHY_LE_RX_MASK (MGMT_PHY_LE_1M_RX | MGMT_PHY_LE_2M_RX | \
MGMT_PHY_LE_CODED_RX)
#define MGMT_OP_SET_PHY_CONFIGURATION 0x0045
struct mgmt_cp_set_phy_confguration {
__le32 selected_phys;
} __packed;
#define MGMT_SET_PHY_CONFIGURATION_SIZE 4
#define MGMT_EV_CMD_COMPLETE 0x0001 #define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete { struct mgmt_ev_cmd_complete {
__le16 opcode; __le16 opcode;
@ -824,3 +874,8 @@ struct mgmt_ev_ext_info_changed {
__le16 eir_len; __le16 eir_len;
__u8 eir[0]; __u8 eir[0];
} __packed; } __packed;
#define MGMT_EV_PHY_CONFIGURATION_CHANGED 0x0026
struct mgmt_ev_phy_configuration_changed {
__le32 selected_phys;
} __packed;

View File

@ -770,6 +770,7 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev,
hdr.hop_limit, &hdr.daddr); hdr.hop_limit, &hdr.daddr);
skb_push(skb, sizeof(hdr)); skb_push(skb, sizeof(hdr));
skb_reset_mac_header(skb);
skb_reset_network_header(skb); skb_reset_network_header(skb);
skb_copy_to_linear_data(skb, &hdr, sizeof(hdr)); skb_copy_to_linear_data(skb, &hdr, sizeof(hdr));

View File

@ -159,7 +159,7 @@ void bt_accept_enqueue(struct sock *parent, struct sock *sk)
BT_DBG("parent %p, sk %p", parent, sk); BT_DBG("parent %p, sk %p", parent, sk);
sock_hold(sk); sock_hold(sk);
lock_sock(sk); lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q); list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
bt_sk(sk)->parent = parent; bt_sk(sk)->parent = parent;
release_sock(sk); release_sock(sk);

View File

@ -748,11 +748,30 @@ static bool conn_use_rpa(struct hci_conn *conn)
return hci_dev_test_flag(hdev, HCI_PRIVACY); return hci_dev_test_flag(hdev, HCI_PRIVACY);
} }
static void set_ext_conn_params(struct hci_conn *conn,
struct hci_cp_le_ext_conn_param *p)
{
struct hci_dev *hdev = conn->hdev;
memset(p, 0, sizeof(*p));
/* Set window to be the same value as the interval to
* enable continuous scanning.
*/
p->scan_interval = cpu_to_le16(hdev->le_scan_interval);
p->scan_window = p->scan_interval;
p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
p->conn_latency = cpu_to_le16(conn->le_conn_latency);
p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
p->min_ce_len = cpu_to_le16(0x0000);
p->max_ce_len = cpu_to_le16(0x0000);
}
static void hci_req_add_le_create_conn(struct hci_request *req, static void hci_req_add_le_create_conn(struct hci_request *req,
struct hci_conn *conn, struct hci_conn *conn,
bdaddr_t *direct_rpa) bdaddr_t *direct_rpa)
{ {
struct hci_cp_le_create_conn cp;
struct hci_dev *hdev = conn->hdev; struct hci_dev *hdev = conn->hdev;
u8 own_addr_type; u8 own_addr_type;
@ -775,25 +794,71 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
return; return;
} }
memset(&cp, 0, sizeof(cp)); if (use_ext_conn(hdev)) {
struct hci_cp_le_ext_create_conn *cp;
struct hci_cp_le_ext_conn_param *p;
u8 data[sizeof(*cp) + sizeof(*p) * 3];
u32 plen;
/* Set window to be the same value as the interval to enable cp = (void *) data;
* continuous scanning. p = (void *) cp->data;
*/
cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
cp.scan_window = cp.scan_interval;
bacpy(&cp.peer_addr, &conn->dst); memset(cp, 0, sizeof(*cp));
cp.peer_addr_type = conn->dst_type;
cp.own_address_type = own_addr_type;
cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
cp.min_ce_len = cpu_to_le16(0x0000);
cp.max_ce_len = cpu_to_le16(0x0000);
hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp); bacpy(&cp->peer_addr, &conn->dst);
cp->peer_addr_type = conn->dst_type;
cp->own_addr_type = own_addr_type;
plen = sizeof(*cp);
if (scan_1m(hdev)) {
cp->phys |= LE_SCAN_PHY_1M;
set_ext_conn_params(conn, p);
p++;
plen += sizeof(*p);
}
if (scan_2m(hdev)) {
cp->phys |= LE_SCAN_PHY_2M;
set_ext_conn_params(conn, p);
p++;
plen += sizeof(*p);
}
if (scan_coded(hdev)) {
cp->phys |= LE_SCAN_PHY_CODED;
set_ext_conn_params(conn, p);
plen += sizeof(*p);
}
hci_req_add(req, HCI_OP_LE_EXT_CREATE_CONN, plen, data);
} else {
struct hci_cp_le_create_conn cp;
memset(&cp, 0, sizeof(cp));
/* Set window to be the same value as the interval to enable
* continuous scanning.
*/
cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
cp.scan_window = cp.scan_interval;
bacpy(&cp.peer_addr, &conn->dst);
cp.peer_addr_type = conn->dst_type;
cp.own_address_type = own_addr_type;
cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
cp.min_ce_len = cpu_to_le16(0x0000);
cp.max_ce_len = cpu_to_le16(0x0000);
hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
}
conn->state = BT_CONNECT; conn->state = BT_CONNECT;
clear_bit(HCI_CONN_SCANNING, &conn->flags); clear_bit(HCI_CONN_SCANNING, &conn->flags);
@ -803,35 +868,81 @@ static void hci_req_directed_advertising(struct hci_request *req,
struct hci_conn *conn) struct hci_conn *conn)
{ {
struct hci_dev *hdev = req->hdev; struct hci_dev *hdev = req->hdev;
struct hci_cp_le_set_adv_param cp;
u8 own_addr_type; u8 own_addr_type;
u8 enable; u8 enable;
/* Clear the HCI_LE_ADV bit temporarily so that the if (ext_adv_capable(hdev)) {
* hci_update_random_address knows that it's safe to go ahead struct hci_cp_le_set_ext_adv_params cp;
* and write a new random address. The flag will be set back on bdaddr_t random_addr;
* as soon as the SET_ADV_ENABLE HCI command completes.
*/
hci_dev_clear_flag(hdev, HCI_LE_ADV);
/* Set require_privacy to false so that the remote device has a /* Set require_privacy to false so that the remote device has a
* chance of identifying us. * chance of identifying us.
*/ */
if (hci_update_random_address(req, false, conn_use_rpa(conn), if (hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL,
&own_addr_type) < 0) &own_addr_type, &random_addr) < 0)
return; return;
memset(&cp, 0, sizeof(cp)); memset(&cp, 0, sizeof(cp));
cp.type = LE_ADV_DIRECT_IND;
cp.own_address_type = own_addr_type;
cp.direct_addr_type = conn->dst_type;
bacpy(&cp.direct_addr, &conn->dst);
cp.channel_map = hdev->le_adv_channel_map;
hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp); cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND);
cp.own_addr_type = own_addr_type;
cp.channel_map = hdev->le_adv_channel_map;
cp.tx_power = HCI_TX_POWER_INVALID;
cp.primary_phy = HCI_ADV_PHY_1M;
cp.secondary_phy = HCI_ADV_PHY_1M;
cp.handle = 0; /* Use instance 0 for directed adv */
cp.own_addr_type = own_addr_type;
cp.peer_addr_type = conn->dst_type;
bacpy(&cp.peer_addr, &conn->dst);
enable = 0x01; hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
if (own_addr_type == ADDR_LE_DEV_RANDOM &&
bacmp(&random_addr, BDADDR_ANY) &&
bacmp(&random_addr, &hdev->random_addr)) {
struct hci_cp_le_set_adv_set_rand_addr cp;
memset(&cp, 0, sizeof(cp));
cp.handle = 0;
bacpy(&cp.bdaddr, &random_addr);
hci_req_add(req,
HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
sizeof(cp), &cp);
}
__hci_req_enable_ext_advertising(req);
} else {
struct hci_cp_le_set_adv_param cp;
/* Clear the HCI_LE_ADV bit temporarily so that the
* hci_update_random_address knows that it's safe to go ahead
* and write a new random address. The flag will be set back on
* as soon as the SET_ADV_ENABLE HCI command completes.
*/
hci_dev_clear_flag(hdev, HCI_LE_ADV);
/* Set require_privacy to false so that the remote device has a
* chance of identifying us.
*/
if (hci_update_random_address(req, false, conn_use_rpa(conn),
&own_addr_type) < 0)
return;
memset(&cp, 0, sizeof(cp));
cp.type = LE_ADV_DIRECT_IND;
cp.own_address_type = own_addr_type;
cp.direct_addr_type = conn->dst_type;
bacpy(&cp.direct_addr, &conn->dst);
cp.channel_map = hdev->le_adv_channel_map;
hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
enable = 0x01;
hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
&enable);
}
conn->state = BT_CONNECT; conn->state = BT_CONNECT;
} }

View File

@ -695,11 +695,42 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt)
if (hdev->commands[35] & (0x20 | 0x40)) if (hdev->commands[35] & (0x20 | 0x40))
events[1] |= 0x08; /* LE PHY Update Complete */ events[1] |= 0x08; /* LE PHY Update Complete */
/* If the controller supports LE Set Extended Scan Parameters
* and LE Set Extended Scan Enable commands, enable the
* corresponding event.
*/
if (use_ext_scan(hdev))
events[1] |= 0x10; /* LE Extended Advertising
* Report
*/
/* If the controller supports the LE Extended Create Connection
* command, enable the corresponding event.
*/
if (use_ext_conn(hdev))
events[1] |= 0x02; /* LE Enhanced Connection
* Complete
*/
/* If the controller supports the LE Extended Advertising
* command, enable the corresponding event.
*/
if (ext_adv_capable(hdev))
events[2] |= 0x02; /* LE Advertising Set
* Terminated
*/
hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events), hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
events); events);
if (hdev->commands[25] & 0x40) { /* Read LE Advertising Channel TX Power */
/* Read LE Advertising Channel TX Power */ if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
/* HCI TS spec forbids mixing of legacy and extended
* advertising commands wherein READ_ADV_TX_POWER is
* also included. So do not call it if extended adv
* is supported otherwise controller will return
* COMMAND_DISALLOWED for extended commands.
*/
hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL); hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
} }
@ -714,6 +745,17 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt)
hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL); hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
} }
if (hdev->commands[34] & 0x40) {
/* Read LE Resolving List Size */
hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
0, NULL);
}
if (hdev->commands[34] & 0x20) {
/* Clear LE Resolving List */
hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
}
if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) { if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
/* Read LE Maximum Data Length */ /* Read LE Maximum Data Length */
hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL); hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
@ -722,6 +764,12 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt)
hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL); hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
} }
if (ext_adv_capable(hdev)) {
/* Read LE Number of Supported Advertising Sets */
hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
0, NULL);
}
hci_set_le_support(req); hci_set_le_support(req);
} }
@ -802,10 +850,9 @@ static int hci_init4_req(struct hci_request *req, unsigned long opt)
if (hdev->commands[35] & 0x20) { if (hdev->commands[35] & 0x20) {
struct hci_cp_le_set_default_phy cp; struct hci_cp_le_set_default_phy cp;
/* No transmitter PHY or receiver PHY preferences */ cp.all_phys = 0x00;
cp.all_phys = 0x03; cp.tx_phys = hdev->le_tx_def_phys;
cp.tx_phys = 0; cp.rx_phys = hdev->le_rx_def_phys;
cp.rx_phys = 0;
hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp); hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
} }
@ -1368,7 +1415,8 @@ static int hci_dev_do_open(struct hci_dev *hdev)
atomic_set(&hdev->cmd_cnt, 1); atomic_set(&hdev->cmd_cnt, 1);
set_bit(HCI_INIT, &hdev->flags); set_bit(HCI_INIT, &hdev->flags);
if (hci_dev_test_flag(hdev, HCI_SETUP)) { if (hci_dev_test_flag(hdev, HCI_SETUP) ||
test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
hci_sock_dev_event(hdev, HCI_DEV_SETUP); hci_sock_dev_event(hdev, HCI_DEV_SETUP);
if (hdev->setup) if (hdev->setup)
@ -1432,6 +1480,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
if (!ret) { if (!ret) {
hci_dev_hold(hdev); hci_dev_hold(hdev);
hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
hci_adv_instances_set_rpa_expired(hdev, true);
set_bit(HCI_UP, &hdev->flags); set_bit(HCI_UP, &hdev->flags);
hci_sock_dev_event(hdev, HCI_DEV_UP); hci_sock_dev_event(hdev, HCI_DEV_UP);
hci_leds_update_powered(hdev, true); hci_leds_update_powered(hdev, true);
@ -1587,9 +1636,15 @@ int hci_dev_do_close(struct hci_dev *hdev)
if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
cancel_delayed_work(&hdev->service_cache); cancel_delayed_work(&hdev->service_cache);
if (hci_dev_test_flag(hdev, HCI_MGMT)) if (hci_dev_test_flag(hdev, HCI_MGMT)) {
struct adv_info *adv_instance;
cancel_delayed_work_sync(&hdev->rpa_expired); cancel_delayed_work_sync(&hdev->rpa_expired);
list_for_each_entry(adv_instance, &hdev->adv_instances, list)
cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
}
/* Avoid potential lockdep warnings from the *_flush() calls by /* Avoid potential lockdep warnings from the *_flush() calls by
* ensuring the workqueue is empty up front. * ensuring the workqueue is empty up front.
*/ */
@ -1897,7 +1952,11 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
break; break;
case HCISETPTYPE: case HCISETPTYPE:
if (hdev->pkt_type == (__u16) dr.dev_opt)
break;
hdev->pkt_type = (__u16) dr.dev_opt; hdev->pkt_type = (__u16) dr.dev_opt;
mgmt_phy_configuration_changed(hdev, NULL);
break; break;
case HCISETACLMTU: case HCISETACLMTU:
@ -2661,6 +2720,8 @@ int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
hdev->cur_adv_instance = 0x00; hdev->cur_adv_instance = 0x00;
} }
cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
list_del(&adv_instance->list); list_del(&adv_instance->list);
kfree(adv_instance); kfree(adv_instance);
@ -2669,6 +2730,14 @@ int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
return 0; return 0;
} }
void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
{
struct adv_info *adv_instance, *n;
list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
adv_instance->rpa_expired = rpa_expired;
}
/* This function requires the caller holds hdev->lock */ /* This function requires the caller holds hdev->lock */
void hci_adv_instances_clear(struct hci_dev *hdev) void hci_adv_instances_clear(struct hci_dev *hdev)
{ {
@ -2680,6 +2749,7 @@ void hci_adv_instances_clear(struct hci_dev *hdev)
} }
list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
list_del(&adv_instance->list); list_del(&adv_instance->list);
kfree(adv_instance); kfree(adv_instance);
} }
@ -2688,6 +2758,16 @@ void hci_adv_instances_clear(struct hci_dev *hdev)
hdev->cur_adv_instance = 0x00; hdev->cur_adv_instance = 0x00;
} }
static void adv_instance_rpa_expired(struct work_struct *work)
{
struct adv_info *adv_instance = container_of(work, struct adv_info,
rpa_expired_cb.work);
BT_DBG("");
adv_instance->rpa_expired = true;
}
/* This function requires the caller holds hdev->lock */ /* This function requires the caller holds hdev->lock */
int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
u16 adv_data_len, u8 *adv_data, u16 adv_data_len, u8 *adv_data,
@ -2736,6 +2816,11 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
else else
adv_instance->duration = duration; adv_instance->duration = duration;
adv_instance->tx_power = HCI_TX_POWER_INVALID;
INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
adv_instance_rpa_expired);
BT_DBG("%s for %dMR", hdev->name, instance); BT_DBG("%s for %dMR", hdev->name, instance);
return 0; return 0;
@ -2999,6 +3084,8 @@ struct hci_dev *hci_alloc_dev(void)
hdev->le_max_tx_time = 0x0148; hdev->le_max_tx_time = 0x0148;
hdev->le_max_rx_len = 0x001b; hdev->le_max_rx_len = 0x001b;
hdev->le_max_rx_time = 0x0148; hdev->le_max_rx_time = 0x0148;
hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
@ -3017,6 +3104,7 @@ struct hci_dev *hci_alloc_dev(void)
INIT_LIST_HEAD(&hdev->identity_resolving_keys); INIT_LIST_HEAD(&hdev->identity_resolving_keys);
INIT_LIST_HEAD(&hdev->remote_oob_data); INIT_LIST_HEAD(&hdev->remote_oob_data);
INIT_LIST_HEAD(&hdev->le_white_list); INIT_LIST_HEAD(&hdev->le_white_list);
INIT_LIST_HEAD(&hdev->le_resolv_list);
INIT_LIST_HEAD(&hdev->le_conn_params); INIT_LIST_HEAD(&hdev->le_conn_params);
INIT_LIST_HEAD(&hdev->pend_le_conns); INIT_LIST_HEAD(&hdev->pend_le_conns);
INIT_LIST_HEAD(&hdev->pend_le_reports); INIT_LIST_HEAD(&hdev->pend_le_reports);
@ -3218,6 +3306,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
hci_remote_oob_data_clear(hdev); hci_remote_oob_data_clear(hdev);
hci_adv_instances_clear(hdev); hci_adv_instances_clear(hdev);
hci_bdaddr_list_clear(&hdev->le_white_list); hci_bdaddr_list_clear(&hdev->le_white_list);
hci_bdaddr_list_clear(&hdev->le_resolv_list);
hci_conn_params_clear_all(hdev); hci_conn_params_clear_all(hdev);
hci_discovery_filter_clear(hdev); hci_discovery_filter_clear(hdev);
hci_dev_unlock(hdev); hci_dev_unlock(hdev);

View File

@ -694,6 +694,21 @@ static int white_list_show(struct seq_file *f, void *ptr)
DEFINE_SHOW_ATTRIBUTE(white_list); DEFINE_SHOW_ATTRIBUTE(white_list);
static int resolv_list_show(struct seq_file *f, void *ptr)
{
struct hci_dev *hdev = f->private;
struct bdaddr_list *b;
hci_dev_lock(hdev);
list_for_each_entry(b, &hdev->le_resolv_list, list)
seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
hci_dev_unlock(hdev);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(resolv_list);
static int identity_resolving_keys_show(struct seq_file *f, void *ptr) static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
{ {
struct hci_dev *hdev = f->private; struct hci_dev *hdev = f->private;
@ -955,6 +970,10 @@ void hci_debugfs_create_le(struct hci_dev *hdev)
&hdev->le_white_list_size); &hdev->le_white_list_size);
debugfs_create_file("white_list", 0444, hdev->debugfs, hdev, debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
&white_list_fops); &white_list_fops);
debugfs_create_u8("resolv_list_size", 0444, hdev->debugfs,
&hdev->le_resolv_list_size);
debugfs_create_file("resolv_list", 0444, hdev->debugfs, hdev,
&resolv_list_fops);
debugfs_create_file("identity_resolving_keys", 0400, hdev->debugfs, debugfs_create_file("identity_resolving_keys", 0400, hdev->debugfs,
hdev, &identity_resolving_keys_fops); hdev, &identity_resolving_keys_fops);
debugfs_create_file("long_term_keys", 0400, hdev->debugfs, hdev, debugfs_create_file("long_term_keys", 0400, hdev->debugfs, hdev,

View File

@ -221,6 +221,7 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
hdev->ssp_debug_mode = 0; hdev->ssp_debug_mode = 0;
hci_bdaddr_list_clear(&hdev->le_white_list); hci_bdaddr_list_clear(&hdev->le_white_list);
hci_bdaddr_list_clear(&hdev->le_resolv_list);
} }
static void hci_cc_read_stored_link_key(struct hci_dev *hdev, static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
@ -1041,6 +1042,57 @@ static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_unlock(hdev); hci_dev_unlock(hdev);
} }
static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
struct hci_cp_le_set_default_phy *cp;
BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (status)
return;
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
if (!cp)
return;
hci_dev_lock(hdev);
hdev->le_tx_def_phys = cp->tx_phys;
hdev->le_rx_def_phys = cp->rx_phys;
hci_dev_unlock(hdev);
}
static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
struct hci_cp_le_set_adv_set_rand_addr *cp;
struct adv_info *adv_instance;
if (status)
return;
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
if (!cp)
return;
hci_dev_lock(hdev);
if (!hdev->cur_adv_instance) {
/* Store in hdev for instance 0 (Set adv and Directed advs) */
bacpy(&hdev->random_addr, &cp->bdaddr);
} else {
adv_instance = hci_find_adv_instance(hdev,
hdev->cur_adv_instance);
if (adv_instance)
bacpy(&adv_instance->random_addr, &cp->bdaddr);
}
hci_dev_unlock(hdev);
}
static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb) static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
{ {
__u8 *sent, status = *((__u8 *) skb->data); __u8 *sent, status = *((__u8 *) skb->data);
@ -1076,6 +1128,43 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_unlock(hdev); hci_dev_unlock(hdev);
} }
static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
struct sk_buff *skb)
{
struct hci_cp_le_set_ext_adv_enable *cp;
struct hci_cp_ext_adv_set *adv_set;
__u8 status = *((__u8 *) skb->data);
BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (status)
return;
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
if (!cp)
return;
adv_set = (void *) cp->data;
hci_dev_lock(hdev);
if (cp->enable) {
struct hci_conn *conn;
hci_dev_set_flag(hdev, HCI_LE_ADV);
conn = hci_lookup_le_connect(hdev);
if (conn)
queue_delayed_work(hdev->workqueue,
&conn->le_conn_timeout,
conn->conn_timeout);
} else {
hci_dev_clear_flag(hdev, HCI_LE_ADV);
}
hci_dev_unlock(hdev);
}
static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb) static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
{ {
struct hci_cp_le_set_scan_param *cp; struct hci_cp_le_set_scan_param *cp;
@ -1097,6 +1186,31 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_unlock(hdev); hci_dev_unlock(hdev);
} }
static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
struct sk_buff *skb)
{
struct hci_cp_le_set_ext_scan_params *cp;
__u8 status = *((__u8 *) skb->data);
struct hci_cp_le_scan_phy_params *phy_param;
BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (status)
return;
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
if (!cp)
return;
phy_param = (void *)cp->data;
hci_dev_lock(hdev);
hdev->le_scan_type = phy_param->type;
hci_dev_unlock(hdev);
}
static bool has_pending_adv_report(struct hci_dev *hdev) static bool has_pending_adv_report(struct hci_dev *hdev)
{ {
struct discovery_state *d = &hdev->discovery; struct discovery_state *d = &hdev->discovery;
@ -1126,24 +1240,11 @@ static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
d->last_adv_data_len = len; d->last_adv_data_len = len;
} }
static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
struct sk_buff *skb)
{ {
struct hci_cp_le_set_scan_enable *cp;
__u8 status = *((__u8 *) skb->data);
BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (status)
return;
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
if (!cp)
return;
hci_dev_lock(hdev); hci_dev_lock(hdev);
switch (cp->enable) { switch (enable) {
case LE_SCAN_ENABLE: case LE_SCAN_ENABLE:
hci_dev_set_flag(hdev, HCI_LE_SCAN); hci_dev_set_flag(hdev, HCI_LE_SCAN);
if (hdev->le_scan_type == LE_SCAN_ACTIVE) if (hdev->le_scan_type == LE_SCAN_ACTIVE)
@ -1189,13 +1290,63 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
default: default:
bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d", bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
cp->enable); enable);
break; break;
} }
hci_dev_unlock(hdev); hci_dev_unlock(hdev);
} }
static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
struct sk_buff *skb)
{
struct hci_cp_le_set_scan_enable *cp;
__u8 status = *((__u8 *) skb->data);
BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (status)
return;
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
if (!cp)
return;
le_set_scan_enable_complete(hdev, cp->enable);
}
static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
struct sk_buff *skb)
{
struct hci_cp_le_set_ext_scan_enable *cp;
__u8 status = *((__u8 *) skb->data);
BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (status)
return;
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
if (!cp)
return;
le_set_scan_enable_complete(hdev, cp->enable);
}
static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
struct sk_buff *skb)
{
struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
rp->num_of_sets);
if (rp->status)
return;
hdev->le_num_of_adv_sets = rp->num_of_sets;
}
static void hci_cc_le_read_white_list_size(struct hci_dev *hdev, static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
struct sk_buff *skb) struct sk_buff *skb)
{ {
@ -1306,6 +1457,32 @@ static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
hdev->le_def_tx_time = le16_to_cpu(sent->tx_time); hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
} }
static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (status)
return;
hci_bdaddr_list_clear(&hdev->le_resolv_list);
}
static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
struct sk_buff *skb)
{
struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
if (rp->status)
return;
hdev->le_resolv_list_size = rp->size;
}
static void hci_cc_le_read_max_data_len(struct hci_dev *hdev, static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
struct sk_buff *skb) struct sk_buff *skb)
{ {
@ -1375,6 +1552,37 @@ static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_unlock(hdev); hci_dev_unlock(hdev);
} }
static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
struct hci_cp_le_set_ext_adv_params *cp;
struct adv_info *adv_instance;
BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
if (!cp)
return;
hci_dev_lock(hdev);
hdev->adv_addr_type = cp->own_addr_type;
if (!hdev->cur_adv_instance) {
/* Store in hdev for instance 0 */
hdev->adv_tx_power = rp->tx_power;
} else {
adv_instance = hci_find_adv_instance(hdev,
hdev->cur_adv_instance);
if (adv_instance)
adv_instance->tx_power = rp->tx_power;
}
/* Update adv data as tx power is known now */
hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
hci_dev_unlock(hdev);
}
static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb) static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
{ {
struct hci_rp_read_rssi *rp = (void *) skb->data; struct hci_rp_read_rssi *rp = (void *) skb->data;
@ -1896,10 +2104,44 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
hci_dev_unlock(hdev); hci_dev_unlock(hdev);
} }
static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
u8 peer_addr_type, u8 own_address_type,
u8 filter_policy)
{
struct hci_conn *conn;
conn = hci_conn_hash_lookup_le(hdev, peer_addr,
peer_addr_type);
if (!conn)
return;
/* Store the initiator and responder address information which
* is needed for SMP. These values will not change during the
* lifetime of the connection.
*/
conn->init_addr_type = own_address_type;
if (own_address_type == ADDR_LE_DEV_RANDOM)
bacpy(&conn->init_addr, &hdev->random_addr);
else
bacpy(&conn->init_addr, &hdev->bdaddr);
conn->resp_addr_type = peer_addr_type;
bacpy(&conn->resp_addr, peer_addr);
/* We don't want the connection attempt to stick around
* indefinitely since LE doesn't have a page timeout concept
* like BR/EDR. Set a timer for any connection that doesn't use
* the white list for connecting.
*/
if (filter_policy == HCI_LE_USE_PEER_ADDR)
queue_delayed_work(conn->hdev->workqueue,
&conn->le_conn_timeout,
conn->conn_timeout);
}
static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
{ {
struct hci_cp_le_create_conn *cp; struct hci_cp_le_create_conn *cp;
struct hci_conn *conn;
BT_DBG("%s status 0x%2.2x", hdev->name, status); BT_DBG("%s status 0x%2.2x", hdev->name, status);
@ -1916,35 +2158,34 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
hci_dev_lock(hdev); hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_le(hdev, &cp->peer_addr, cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
cp->peer_addr_type); cp->own_address_type, cp->filter_policy);
if (!conn)
goto unlock;
/* Store the initiator and responder address information which hci_dev_unlock(hdev);
* is needed for SMP. These values will not change during the }
* lifetime of the connection.
static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
{
struct hci_cp_le_ext_create_conn *cp;
BT_DBG("%s status 0x%2.2x", hdev->name, status);
/* All connection failure handling is taken care of by the
* hci_le_conn_failed function which is triggered by the HCI
* request completion callbacks used for connecting.
*/ */
conn->init_addr_type = cp->own_address_type; if (status)
if (cp->own_address_type == ADDR_LE_DEV_RANDOM) return;
bacpy(&conn->init_addr, &hdev->random_addr);
else
bacpy(&conn->init_addr, &hdev->bdaddr);
conn->resp_addr_type = cp->peer_addr_type; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
bacpy(&conn->resp_addr, &cp->peer_addr); if (!cp)
return;
/* We don't want the connection attempt to stick around hci_dev_lock(hdev);
* indefinitely since LE doesn't have a page timeout concept
* like BR/EDR. Set a timer for any connection that doesn't use cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
* the white list for connecting. cp->own_addr_type, cp->filter_policy);
*/
if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
queue_delayed_work(conn->hdev->workqueue,
&conn->le_conn_timeout,
conn->conn_timeout);
unlock:
hci_dev_unlock(hdev); hci_dev_unlock(hdev);
} }
@ -2618,8 +2859,10 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
/* We should disregard the current RPA and generate a new one /* We should disregard the current RPA and generate a new one
* whenever the encryption procedure fails. * whenever the encryption procedure fails.
*/ */
if (ev->status && conn->type == LE_LINK) if (ev->status && conn->type == LE_LINK) {
hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
hci_adv_instances_set_rpa_expired(hdev, true);
}
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
@ -3015,6 +3258,14 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
hci_cc_le_write_def_data_len(hdev, skb); hci_cc_le_write_def_data_len(hdev, skb);
break; break;
case HCI_OP_LE_CLEAR_RESOLV_LIST:
hci_cc_le_clear_resolv_list(hdev, skb);
break;
case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
hci_cc_le_read_resolv_list_size(hdev, skb);
break;
case HCI_OP_LE_READ_MAX_DATA_LEN: case HCI_OP_LE_READ_MAX_DATA_LEN:
hci_cc_le_read_max_data_len(hdev, skb); hci_cc_le_read_max_data_len(hdev, skb);
break; break;
@ -3039,6 +3290,34 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
hci_cc_write_ssp_debug_mode(hdev, skb); hci_cc_write_ssp_debug_mode(hdev, skb);
break; break;
case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
hci_cc_le_set_ext_scan_param(hdev, skb);
break;
case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
hci_cc_le_set_ext_scan_enable(hdev, skb);
break;
case HCI_OP_LE_SET_DEFAULT_PHY:
hci_cc_le_set_default_phy(hdev, skb);
break;
case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
hci_cc_le_read_num_adv_sets(hdev, skb);
break;
case HCI_OP_LE_SET_EXT_ADV_PARAMS:
hci_cc_set_ext_adv_param(hdev, skb);
break;
case HCI_OP_LE_SET_EXT_ADV_ENABLE:
hci_cc_le_set_ext_adv_enable(hdev, skb);
break;
case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
hci_cc_le_set_adv_set_random_addr(hdev, skb);
break;
default: default:
BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode); BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
break; break;
@ -3134,6 +3413,10 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
hci_cs_le_start_enc(hdev, ev->status); hci_cs_le_start_enc(hdev, ev->status);
break; break;
case HCI_OP_LE_EXT_CREATE_CONN:
hci_cs_le_ext_create_conn(hdev, ev->status);
break;
default: default:
BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode); BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
break; break;
@ -4460,16 +4743,15 @@ static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
} }
#endif #endif
static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
bdaddr_t *bdaddr, u8 bdaddr_type, u8 role, u16 handle,
u16 interval, u16 latency, u16 supervision_timeout)
{ {
struct hci_ev_le_conn_complete *ev = (void *) skb->data;
struct hci_conn_params *params; struct hci_conn_params *params;
struct hci_conn *conn; struct hci_conn *conn;
struct smp_irk *irk; struct smp_irk *irk;
u8 addr_type; u8 addr_type;
BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev); hci_dev_lock(hdev);
/* All controllers implicitly stop advertising in the event of a /* All controllers implicitly stop advertising in the event of a
@ -4479,13 +4761,13 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
conn = hci_lookup_le_connect(hdev); conn = hci_lookup_le_connect(hdev);
if (!conn) { if (!conn) {
conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role); conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
if (!conn) { if (!conn) {
bt_dev_err(hdev, "no memory for new connection"); bt_dev_err(hdev, "no memory for new connection");
goto unlock; goto unlock;
} }
conn->dst_type = ev->bdaddr_type; conn->dst_type = bdaddr_type;
/* If we didn't have a hci_conn object previously /* If we didn't have a hci_conn object previously
* but we're in master role this must be something * but we're in master role this must be something
@ -4496,8 +4778,8 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
* initiator address based on the HCI_PRIVACY flag. * initiator address based on the HCI_PRIVACY flag.
*/ */
if (conn->out) { if (conn->out) {
conn->resp_addr_type = ev->bdaddr_type; conn->resp_addr_type = bdaddr_type;
bacpy(&conn->resp_addr, &ev->bdaddr); bacpy(&conn->resp_addr, bdaddr);
if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
conn->init_addr_type = ADDR_LE_DEV_RANDOM; conn->init_addr_type = ADDR_LE_DEV_RANDOM;
bacpy(&conn->init_addr, &hdev->rpa); bacpy(&conn->init_addr, &hdev->rpa);
@ -4516,13 +4798,18 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
* the advertising address type. * the advertising address type.
*/ */
conn->resp_addr_type = hdev->adv_addr_type; conn->resp_addr_type = hdev->adv_addr_type;
if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
bacpy(&conn->resp_addr, &hdev->random_addr); /* In case of ext adv, resp_addr will be updated in
else * Adv Terminated event.
*/
if (!ext_adv_capable(hdev))
bacpy(&conn->resp_addr, &hdev->random_addr);
} else {
bacpy(&conn->resp_addr, &hdev->bdaddr); bacpy(&conn->resp_addr, &hdev->bdaddr);
}
conn->init_addr_type = ev->bdaddr_type; conn->init_addr_type = bdaddr_type;
bacpy(&conn->init_addr, &ev->bdaddr); bacpy(&conn->init_addr, bdaddr);
/* For incoming connections, set the default minimum /* For incoming connections, set the default minimum
* and maximum connection interval. They will be used * and maximum connection interval. They will be used
@ -4548,8 +4835,8 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
conn->dst_type = irk->addr_type; conn->dst_type = irk->addr_type;
} }
if (ev->status) { if (status) {
hci_le_conn_failed(conn, ev->status); hci_le_conn_failed(conn, status);
goto unlock; goto unlock;
} }
@ -4568,17 +4855,17 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
mgmt_device_connected(hdev, conn, 0, NULL, 0); mgmt_device_connected(hdev, conn, 0, NULL, 0);
conn->sec_level = BT_SECURITY_LOW; conn->sec_level = BT_SECURITY_LOW;
conn->handle = __le16_to_cpu(ev->handle); conn->handle = handle;
conn->state = BT_CONFIG; conn->state = BT_CONFIG;
conn->le_conn_interval = le16_to_cpu(ev->interval); conn->le_conn_interval = interval;
conn->le_conn_latency = le16_to_cpu(ev->latency); conn->le_conn_latency = latency;
conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout); conn->le_supv_timeout = supervision_timeout;
hci_debugfs_create_conn(conn); hci_debugfs_create_conn(conn);
hci_conn_add_sysfs(conn); hci_conn_add_sysfs(conn);
if (!ev->status) { if (!status) {
/* The remote features procedure is defined for master /* The remote features procedure is defined for master
* role only. So only in case of an initiated connection * role only. So only in case of an initiated connection
* request the remote features. * request the remote features.
@ -4600,10 +4887,10 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_conn_hold(conn); hci_conn_hold(conn);
} else { } else {
conn->state = BT_CONNECTED; conn->state = BT_CONNECTED;
hci_connect_cfm(conn, ev->status); hci_connect_cfm(conn, status);
} }
} else { } else {
hci_connect_cfm(conn, ev->status); hci_connect_cfm(conn, status);
} }
params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst, params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
@ -4622,6 +4909,61 @@ unlock:
hci_dev_unlock(hdev); hci_dev_unlock(hdev);
} }
static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_le_conn_complete *ev = (void *) skb->data;
BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
ev->role, le16_to_cpu(ev->handle),
le16_to_cpu(ev->interval),
le16_to_cpu(ev->latency),
le16_to_cpu(ev->supervision_timeout));
}
static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
struct sk_buff *skb)
{
struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
ev->role, le16_to_cpu(ev->handle),
le16_to_cpu(ev->interval),
le16_to_cpu(ev->latency),
le16_to_cpu(ev->supervision_timeout));
}
static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
struct hci_conn *conn;
BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
if (ev->status)
return;
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
if (conn) {
struct adv_info *adv_instance;
if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM)
return;
if (!hdev->cur_adv_instance) {
bacpy(&conn->resp_addr, &hdev->random_addr);
return;
}
adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
if (adv_instance)
bacpy(&conn->resp_addr, &adv_instance->random_addr);
}
}
static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
struct sk_buff *skb) struct sk_buff *skb)
{ {
@ -4957,6 +5299,78 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_unlock(hdev); hci_dev_unlock(hdev);
} }
static u8 ext_evt_type_to_legacy(u16 evt_type)
{
if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
switch (evt_type) {
case LE_LEGACY_ADV_IND:
return LE_ADV_IND;
case LE_LEGACY_ADV_DIRECT_IND:
return LE_ADV_DIRECT_IND;
case LE_LEGACY_ADV_SCAN_IND:
return LE_ADV_SCAN_IND;
case LE_LEGACY_NONCONN_IND:
return LE_ADV_NONCONN_IND;
case LE_LEGACY_SCAN_RSP_ADV:
case LE_LEGACY_SCAN_RSP_ADV_SCAN:
return LE_ADV_SCAN_RSP;
}
BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
evt_type);
return LE_ADV_INVALID;
}
if (evt_type & LE_EXT_ADV_CONN_IND) {
if (evt_type & LE_EXT_ADV_DIRECT_IND)
return LE_ADV_DIRECT_IND;
return LE_ADV_IND;
}
if (evt_type & LE_EXT_ADV_SCAN_RSP)
return LE_ADV_SCAN_RSP;
if (evt_type & LE_EXT_ADV_SCAN_IND)
return LE_ADV_SCAN_IND;
if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
evt_type & LE_EXT_ADV_DIRECT_IND)
return LE_ADV_NONCONN_IND;
BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
evt_type);
return LE_ADV_INVALID;
}
static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
u8 num_reports = skb->data[0];
void *ptr = &skb->data[1];
hci_dev_lock(hdev);
while (num_reports--) {
struct hci_ev_le_ext_adv_report *ev = ptr;
u8 legacy_evt_type;
u16 evt_type;
evt_type = __le16_to_cpu(ev->evt_type);
legacy_evt_type = ext_evt_type_to_legacy(evt_type);
if (legacy_evt_type != LE_ADV_INVALID) {
process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
ev->bdaddr_type, NULL, 0, ev->rssi,
ev->data, ev->length);
}
ptr += sizeof(*ev) + ev->length + 1;
}
hci_dev_unlock(hdev);
}
static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
struct sk_buff *skb) struct sk_buff *skb)
{ {
@ -5189,6 +5603,18 @@ static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_le_direct_adv_report_evt(hdev, skb); hci_le_direct_adv_report_evt(hdev, skb);
break; break;
case HCI_EV_LE_EXT_ADV_REPORT:
hci_le_ext_adv_report_evt(hdev, skb);
break;
case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
hci_le_enh_conn_complete_evt(hdev, skb);
break;
case HCI_EV_LE_EXT_ADV_SET_TERM:
hci_le_ext_adv_term_evt(hdev, skb);
break;
default: default:
break; break;
} }

View File

@ -647,11 +647,22 @@ void __hci_req_update_eir(struct hci_request *req)
void hci_req_add_le_scan_disable(struct hci_request *req) void hci_req_add_le_scan_disable(struct hci_request *req)
{ {
struct hci_cp_le_set_scan_enable cp; struct hci_dev *hdev = req->hdev;
memset(&cp, 0, sizeof(cp)); if (use_ext_scan(hdev)) {
cp.enable = LE_SCAN_DISABLE; struct hci_cp_le_set_ext_scan_enable cp;
hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
memset(&cp, 0, sizeof(cp));
cp.enable = LE_SCAN_DISABLE;
hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
&cp);
} else {
struct hci_cp_le_set_scan_enable cp;
memset(&cp, 0, sizeof(cp));
cp.enable = LE_SCAN_DISABLE;
hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
}
} }
static void add_to_white_list(struct hci_request *req, static void add_to_white_list(struct hci_request *req,
@ -767,10 +778,86 @@ static bool scan_use_rpa(struct hci_dev *hdev)
return hci_dev_test_flag(hdev, HCI_PRIVACY); return hci_dev_test_flag(hdev, HCI_PRIVACY);
} }
static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
u16 window, u8 own_addr_type, u8 filter_policy)
{
struct hci_dev *hdev = req->hdev;
/* Use ext scanning if set ext scan param and ext scan enable is
* supported
*/
if (use_ext_scan(hdev)) {
struct hci_cp_le_set_ext_scan_params *ext_param_cp;
struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
struct hci_cp_le_scan_phy_params *phy_params;
u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
u32 plen;
ext_param_cp = (void *)data;
phy_params = (void *)ext_param_cp->data;
memset(ext_param_cp, 0, sizeof(*ext_param_cp));
ext_param_cp->own_addr_type = own_addr_type;
ext_param_cp->filter_policy = filter_policy;
plen = sizeof(*ext_param_cp);
if (scan_1m(hdev) || scan_2m(hdev)) {
ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
memset(phy_params, 0, sizeof(*phy_params));
phy_params->type = type;
phy_params->interval = cpu_to_le16(interval);
phy_params->window = cpu_to_le16(window);
plen += sizeof(*phy_params);
phy_params++;
}
if (scan_coded(hdev)) {
ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
memset(phy_params, 0, sizeof(*phy_params));
phy_params->type = type;
phy_params->interval = cpu_to_le16(interval);
phy_params->window = cpu_to_le16(window);
plen += sizeof(*phy_params);
phy_params++;
}
hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
plen, ext_param_cp);
memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
ext_enable_cp.enable = LE_SCAN_ENABLE;
ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
sizeof(ext_enable_cp), &ext_enable_cp);
} else {
struct hci_cp_le_set_scan_param param_cp;
struct hci_cp_le_set_scan_enable enable_cp;
memset(&param_cp, 0, sizeof(param_cp));
param_cp.type = type;
param_cp.interval = cpu_to_le16(interval);
param_cp.window = cpu_to_le16(window);
param_cp.own_address_type = own_addr_type;
param_cp.filter_policy = filter_policy;
hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
&param_cp);
memset(&enable_cp, 0, sizeof(enable_cp));
enable_cp.enable = LE_SCAN_ENABLE;
enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
&enable_cp);
}
}
void hci_req_add_le_passive_scan(struct hci_request *req) void hci_req_add_le_passive_scan(struct hci_request *req)
{ {
struct hci_cp_le_set_scan_param param_cp;
struct hci_cp_le_set_scan_enable enable_cp;
struct hci_dev *hdev = req->hdev; struct hci_dev *hdev = req->hdev;
u8 own_addr_type; u8 own_addr_type;
u8 filter_policy; u8 filter_policy;
@ -804,20 +891,26 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
(hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
filter_policy |= 0x02; filter_policy |= 0x02;
memset(&param_cp, 0, sizeof(param_cp)); hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval,
param_cp.type = LE_SCAN_PASSIVE; hdev->le_scan_window, own_addr_type, filter_policy);
param_cp.interval = cpu_to_le16(hdev->le_scan_interval); }
param_cp.window = cpu_to_le16(hdev->le_scan_window);
param_cp.own_address_type = own_addr_type;
param_cp.filter_policy = filter_policy;
hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
&param_cp);
memset(&enable_cp, 0, sizeof(enable_cp)); static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
enable_cp.enable = LE_SCAN_ENABLE; {
enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; struct adv_info *adv_instance;
hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
&enable_cp); /* Ignore instance 0 */
if (instance == 0x00)
return 0;
adv_instance = hci_find_adv_instance(hdev, instance);
if (!adv_instance)
return 0;
/* TODO: Take into account the "appearance" and "local-name" flags here.
* These are currently being ignored as they are not supported.
*/
return adv_instance->scan_rsp_len;
} }
static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev) static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
@ -841,9 +934,19 @@ static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
void __hci_req_disable_advertising(struct hci_request *req) void __hci_req_disable_advertising(struct hci_request *req)
{ {
u8 enable = 0x00; if (ext_adv_capable(req->hdev)) {
struct hci_cp_le_set_ext_adv_enable cp;
hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); cp.enable = 0x00;
/* Disable all sets since we only support one set at the moment */
cp.num_of_sets = 0x00;
hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp);
} else {
u8 enable = 0x00;
hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}
} }
static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance) static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
@ -1081,29 +1184,58 @@ static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance) void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
{ {
struct hci_dev *hdev = req->hdev; struct hci_dev *hdev = req->hdev;
struct hci_cp_le_set_scan_rsp_data cp;
u8 len; u8 len;
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
return; return;
memset(&cp, 0, sizeof(cp)); if (ext_adv_capable(hdev)) {
struct hci_cp_le_set_ext_scan_rsp_data cp;
if (instance) memset(&cp, 0, sizeof(cp));
len = create_instance_scan_rsp_data(hdev, instance, cp.data);
else
len = create_default_scan_rsp_data(hdev, cp.data);
if (hdev->scan_rsp_data_len == len && if (instance)
!memcmp(cp.data, hdev->scan_rsp_data, len)) len = create_instance_scan_rsp_data(hdev, instance,
return; cp.data);
else
len = create_default_scan_rsp_data(hdev, cp.data);
memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); if (hdev->scan_rsp_data_len == len &&
hdev->scan_rsp_data_len = len; !memcmp(cp.data, hdev->scan_rsp_data, len))
return;
cp.length = len; memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
hdev->scan_rsp_data_len = len;
hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp); cp.handle = 0;
cp.length = len;
cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
&cp);
} else {
struct hci_cp_le_set_scan_rsp_data cp;
memset(&cp, 0, sizeof(cp));
if (instance)
len = create_instance_scan_rsp_data(hdev, instance,
cp.data);
else
len = create_default_scan_rsp_data(hdev, cp.data);
if (hdev->scan_rsp_data_len == len &&
!memcmp(cp.data, hdev->scan_rsp_data, len))
return;
memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
hdev->scan_rsp_data_len = len;
cp.length = len;
hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
}
} }
static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
@ -1160,15 +1292,27 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
ptr += adv_instance->adv_data_len; ptr += adv_instance->adv_data_len;
} }
/* Provide Tx Power only if we can provide a valid value for it */ if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
if (hdev->adv_tx_power != HCI_TX_POWER_INVALID && s8 adv_tx_power;
(instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
ptr[0] = 0x02;
ptr[1] = EIR_TX_POWER;
ptr[2] = (u8)hdev->adv_tx_power;
ad_len += 3; if (ext_adv_capable(hdev)) {
ptr += 3; if (adv_instance)
adv_tx_power = adv_instance->tx_power;
else
adv_tx_power = hdev->adv_tx_power;
} else {
adv_tx_power = hdev->adv_tx_power;
}
/* Provide Tx Power only if we can provide a valid value for it */
if (adv_tx_power != HCI_TX_POWER_INVALID) {
ptr[0] = 0x02;
ptr[1] = EIR_TX_POWER;
ptr[2] = (u8)adv_tx_power;
ad_len += 3;
ptr += 3;
}
} }
return ad_len; return ad_len;
@ -1177,27 +1321,51 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
void __hci_req_update_adv_data(struct hci_request *req, u8 instance) void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
{ {
struct hci_dev *hdev = req->hdev; struct hci_dev *hdev = req->hdev;
struct hci_cp_le_set_adv_data cp;
u8 len; u8 len;
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
return; return;
memset(&cp, 0, sizeof(cp)); if (ext_adv_capable(hdev)) {
struct hci_cp_le_set_ext_adv_data cp;
len = create_instance_adv_data(hdev, instance, cp.data); memset(&cp, 0, sizeof(cp));
/* There's nothing to do if the data hasn't changed */ len = create_instance_adv_data(hdev, instance, cp.data);
if (hdev->adv_data_len == len &&
memcmp(cp.data, hdev->adv_data, len) == 0)
return;
memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); /* There's nothing to do if the data hasn't changed */
hdev->adv_data_len = len; if (hdev->adv_data_len == len &&
memcmp(cp.data, hdev->adv_data, len) == 0)
return;
cp.length = len; memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
hdev->adv_data_len = len;
hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp); cp.length = len;
cp.handle = 0;
cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
} else {
struct hci_cp_le_set_adv_data cp;
memset(&cp, 0, sizeof(cp));
len = create_instance_adv_data(hdev, instance, cp.data);
/* There's nothing to do if the data hasn't changed */
if (hdev->adv_data_len == len &&
memcmp(cp.data, hdev->adv_data, len) == 0)
return;
memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
hdev->adv_data_len = len;
cp.length = len;
hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
}
} }
int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance) int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
@ -1229,9 +1397,13 @@ void hci_req_reenable_advertising(struct hci_dev *hdev)
__hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance, __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
true); true);
} else { } else {
__hci_req_update_adv_data(&req, 0x00); if (ext_adv_capable(hdev)) {
__hci_req_update_scan_rsp_data(&req, 0x00); __hci_req_start_ext_adv(&req, 0x00);
__hci_req_enable_advertising(&req); } else {
__hci_req_update_adv_data(&req, 0x00);
__hci_req_update_scan_rsp_data(&req, 0x00);
__hci_req_enable_advertising(&req);
}
} }
hci_req_run(&req, adv_enable_complete); hci_req_run(&req, adv_enable_complete);
@ -1268,6 +1440,245 @@ unlock:
hci_dev_unlock(hdev); hci_dev_unlock(hdev);
} }
int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
bool use_rpa, struct adv_info *adv_instance,
u8 *own_addr_type, bdaddr_t *rand_addr)
{
int err;
bacpy(rand_addr, BDADDR_ANY);
/* If privacy is enabled use a resolvable private address. If
* current RPA has expired then generate a new one.
*/
if (use_rpa) {
int to;
*own_addr_type = ADDR_LE_DEV_RANDOM;
if (adv_instance) {
if (!adv_instance->rpa_expired &&
!bacmp(&adv_instance->random_addr, &hdev->rpa))
return 0;
adv_instance->rpa_expired = false;
} else {
if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
!bacmp(&hdev->random_addr, &hdev->rpa))
return 0;
}
err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
if (err < 0) {
BT_ERR("%s failed to generate new RPA", hdev->name);
return err;
}
bacpy(rand_addr, &hdev->rpa);
to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
if (adv_instance)
queue_delayed_work(hdev->workqueue,
&adv_instance->rpa_expired_cb, to);
else
queue_delayed_work(hdev->workqueue,
&hdev->rpa_expired, to);
return 0;
}
/* In case of required privacy without resolvable private address,
* use an non-resolvable private address. This is useful for
* non-connectable advertising.
*/
if (require_privacy) {
bdaddr_t nrpa;
while (true) {
/* The non-resolvable private address is generated
* from random six bytes with the two most significant
* bits cleared.
*/
get_random_bytes(&nrpa, 6);
nrpa.b[5] &= 0x3f;
/* The non-resolvable private address shall not be
* equal to the public address.
*/
if (bacmp(&hdev->bdaddr, &nrpa))
break;
}
*own_addr_type = ADDR_LE_DEV_RANDOM;
bacpy(rand_addr, &nrpa);
return 0;
}
/* No privacy so use a public address. */
*own_addr_type = ADDR_LE_DEV_PUBLIC;
return 0;
}
void __hci_req_clear_ext_adv_sets(struct hci_request *req)
{
hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
}
int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
{
struct hci_cp_le_set_ext_adv_params cp;
struct hci_dev *hdev = req->hdev;
bool connectable;
u32 flags;
bdaddr_t random_addr;
u8 own_addr_type;
int err;
struct adv_info *adv_instance;
bool secondary_adv;
/* In ext adv set param interval is 3 octets */
const u8 adv_interval[3] = { 0x00, 0x08, 0x00 };
if (instance > 0) {
adv_instance = hci_find_adv_instance(hdev, instance);
if (!adv_instance)
return -EINVAL;
} else {
adv_instance = NULL;
}
flags = get_adv_instance_flags(hdev, instance);
/* If the "connectable" instance flag was not set, then choose between
* ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
*/
connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
mgmt_get_connectable(hdev);
if (!is_advertising_allowed(hdev, connectable))
return -EPERM;
/* Set require_privacy to true only when non-connectable
* advertising is used. In that case it is fine to use a
* non-resolvable private address.
*/
err = hci_get_random_address(hdev, !connectable,
adv_use_rpa(hdev, flags), adv_instance,
&own_addr_type, &random_addr);
if (err < 0)
return err;
memset(&cp, 0, sizeof(cp));
memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval));
memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval));
secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
if (connectable) {
if (secondary_adv)
cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
else
cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
} else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
if (secondary_adv)
cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
else
cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
} else {
if (secondary_adv)
cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
else
cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
}
cp.own_addr_type = own_addr_type;
cp.channel_map = hdev->le_adv_channel_map;
cp.tx_power = 127;
cp.handle = 0;
if (flags & MGMT_ADV_FLAG_SEC_2M) {
cp.primary_phy = HCI_ADV_PHY_1M;
cp.secondary_phy = HCI_ADV_PHY_2M;
} else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
cp.primary_phy = HCI_ADV_PHY_CODED;
cp.secondary_phy = HCI_ADV_PHY_CODED;
} else {
/* In all other cases use 1M */
cp.primary_phy = HCI_ADV_PHY_1M;
cp.secondary_phy = HCI_ADV_PHY_1M;
}
hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
if (own_addr_type == ADDR_LE_DEV_RANDOM &&
bacmp(&random_addr, BDADDR_ANY)) {
struct hci_cp_le_set_adv_set_rand_addr cp;
/* Check if random address need to be updated */
if (adv_instance) {
if (!bacmp(&random_addr, &adv_instance->random_addr))
return 0;
} else {
if (!bacmp(&random_addr, &hdev->random_addr))
return 0;
}
memset(&cp, 0, sizeof(cp));
cp.handle = 0;
bacpy(&cp.bdaddr, &random_addr);
hci_req_add(req,
HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
sizeof(cp), &cp);
}
return 0;
}
void __hci_req_enable_ext_advertising(struct hci_request *req)
{
struct hci_cp_le_set_ext_adv_enable *cp;
struct hci_cp_ext_adv_set *adv_set;
u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
cp = (void *) data;
adv_set = (void *) cp->data;
memset(cp, 0, sizeof(*cp));
cp->enable = 0x01;
cp->num_of_sets = 0x01;
memset(adv_set, 0, sizeof(*adv_set));
adv_set->handle = 0;
hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
data);
}
int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
{
struct hci_dev *hdev = req->hdev;
int err;
if (hci_dev_test_flag(hdev, HCI_LE_ADV))
__hci_req_disable_advertising(req);
err = __hci_req_setup_ext_adv_instance(req, instance);
if (err < 0)
return err;
__hci_req_update_scan_rsp_data(req, instance);
__hci_req_enable_ext_advertising(req);
return 0;
}
int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance, int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
bool force) bool force)
{ {
@ -1321,9 +1732,13 @@ int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
return 0; return 0;
hdev->cur_adv_instance = instance; hdev->cur_adv_instance = instance;
__hci_req_update_adv_data(req, instance); if (ext_adv_capable(hdev)) {
__hci_req_update_scan_rsp_data(req, instance); __hci_req_start_ext_adv(req, instance);
__hci_req_enable_advertising(req); } else {
__hci_req_update_adv_data(req, instance);
__hci_req_update_scan_rsp_data(req, instance);
__hci_req_enable_advertising(req);
}
return 0; return 0;
} }
@ -1594,8 +2009,12 @@ static int connectable_update(struct hci_request *req, unsigned long opt)
/* Update the advertising parameters if necessary */ /* Update the advertising parameters if necessary */
if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
!list_empty(&hdev->adv_instances)) !list_empty(&hdev->adv_instances)) {
__hci_req_enable_advertising(req); if (ext_adv_capable(hdev))
__hci_req_start_ext_adv(req, hdev->cur_adv_instance);
else
__hci_req_enable_advertising(req);
}
__hci_update_background_scan(req); __hci_update_background_scan(req);
@ -1704,8 +2123,12 @@ static int discoverable_update(struct hci_request *req, unsigned long opt)
/* Discoverable mode affects the local advertising /* Discoverable mode affects the local advertising
* address in limited privacy mode. * address in limited privacy mode.
*/ */
if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
__hci_req_enable_advertising(req); if (ext_adv_capable(hdev))
__hci_req_start_ext_adv(req, 0x00);
else
__hci_req_enable_advertising(req);
}
} }
hci_dev_unlock(hdev); hci_dev_unlock(hdev);
@ -1940,7 +2363,6 @@ discov_stopped:
static int le_scan_restart(struct hci_request *req, unsigned long opt) static int le_scan_restart(struct hci_request *req, unsigned long opt)
{ {
struct hci_dev *hdev = req->hdev; struct hci_dev *hdev = req->hdev;
struct hci_cp_le_set_scan_enable cp;
/* If controller is not scanning we are done. */ /* If controller is not scanning we are done. */
if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
@ -1948,10 +2370,23 @@ static int le_scan_restart(struct hci_request *req, unsigned long opt)
hci_req_add_le_scan_disable(req); hci_req_add_le_scan_disable(req);
memset(&cp, 0, sizeof(cp)); if (use_ext_scan(hdev)) {
cp.enable = LE_SCAN_ENABLE; struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
ext_enable_cp.enable = LE_SCAN_ENABLE;
ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
sizeof(ext_enable_cp), &ext_enable_cp);
} else {
struct hci_cp_le_set_scan_enable cp;
memset(&cp, 0, sizeof(cp));
cp.enable = LE_SCAN_ENABLE;
cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
}
return 0; return 0;
} }
@ -2010,8 +2445,6 @@ static int active_scan(struct hci_request *req, unsigned long opt)
{ {
uint16_t interval = opt; uint16_t interval = opt;
struct hci_dev *hdev = req->hdev; struct hci_dev *hdev = req->hdev;
struct hci_cp_le_set_scan_param param_cp;
struct hci_cp_le_set_scan_enable enable_cp;
u8 own_addr_type; u8 own_addr_type;
int err; int err;
@ -2050,22 +2483,8 @@ static int active_scan(struct hci_request *req, unsigned long opt)
if (err < 0) if (err < 0)
own_addr_type = ADDR_LE_DEV_PUBLIC; own_addr_type = ADDR_LE_DEV_PUBLIC;
memset(&param_cp, 0, sizeof(param_cp)); hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
param_cp.type = LE_SCAN_ACTIVE; own_addr_type, 0);
param_cp.interval = cpu_to_le16(interval);
param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
param_cp.own_address_type = own_addr_type;
hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
&param_cp);
memset(&enable_cp, 0, sizeof(enable_cp));
enable_cp.enable = LE_SCAN_ENABLE;
enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
&enable_cp);
return 0; return 0;
} }
@ -2302,11 +2721,26 @@ static int powered_update_hci(struct hci_request *req, unsigned long opt)
*/ */
if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
list_empty(&hdev->adv_instances)) { list_empty(&hdev->adv_instances)) {
__hci_req_update_adv_data(req, 0x00); int err;
__hci_req_update_scan_rsp_data(req, 0x00);
if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) if (ext_adv_capable(hdev)) {
__hci_req_enable_advertising(req); err = __hci_req_setup_ext_adv_instance(req,
0x00);
if (!err)
__hci_req_update_scan_rsp_data(req,
0x00);
} else {
err = 0;
__hci_req_update_adv_data(req, 0x00);
__hci_req_update_scan_rsp_data(req, 0x00);
}
if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
if (!ext_adv_capable(hdev))
__hci_req_enable_advertising(req);
else if (!err)
__hci_req_enable_ext_advertising(req);
}
} else if (!list_empty(&hdev->adv_instances)) { } else if (!list_empty(&hdev->adv_instances)) {
struct adv_info *adv_instance; struct adv_info *adv_instance;

View File

@ -80,6 +80,14 @@ void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
struct hci_request *req, u8 instance, struct hci_request *req, u8 instance,
bool force); bool force);
int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance);
int __hci_req_start_ext_adv(struct hci_request *req, u8 instance);
void __hci_req_enable_ext_advertising(struct hci_request *req);
void __hci_req_clear_ext_adv_sets(struct hci_request *req);
int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
bool use_rpa, struct adv_info *adv_instance,
u8 *own_addr_type, bdaddr_t *rand_addr);
void __hci_req_update_class(struct hci_request *req); void __hci_req_update_class(struct hci_request *req);
/* Returns true if HCI commands were queued */ /* Returns true if HCI commands were queued */

View File

@ -431,8 +431,8 @@ static void hidp_del_timer(struct hidp_session *session)
del_timer(&session->timer); del_timer(&session->timer);
} }
static void hidp_process_report(struct hidp_session *session, static void hidp_process_report(struct hidp_session *session, int type,
int type, const u8 *data, int len, int intr) const u8 *data, unsigned int len, int intr)
{ {
if (len > HID_MAX_BUFFER_SIZE) if (len > HID_MAX_BUFFER_SIZE)
len = HID_MAX_BUFFER_SIZE; len = HID_MAX_BUFFER_SIZE;
@ -775,7 +775,7 @@ static int hidp_setup_hid(struct hidp_session *session,
hid->version = req->version; hid->version = req->version;
hid->country = req->country; hid->country = req->country;
strncpy(hid->name, req->name, sizeof(req->name) - 1); strncpy(hid->name, req->name, sizeof(hid->name));
snprintf(hid->phys, sizeof(hid->phys), "%pMR", snprintf(hid->phys, sizeof(hid->phys), "%pMR",
&l2cap_pi(session->ctrl_sock->sk)->chan->src); &l2cap_pi(session->ctrl_sock->sk)->chan->src);

View File

@ -617,6 +617,127 @@ static int read_config_info(struct sock *sk, struct hci_dev *hdev,
&rp, sizeof(rp)); &rp, sizeof(rp));
} }
static u32 get_supported_phys(struct hci_dev *hdev)
{
u32 supported_phys = 0;
if (lmp_bredr_capable(hdev)) {
supported_phys |= MGMT_PHY_BR_1M_1SLOT;
if (hdev->features[0][0] & LMP_3SLOT)
supported_phys |= MGMT_PHY_BR_1M_3SLOT;
if (hdev->features[0][0] & LMP_5SLOT)
supported_phys |= MGMT_PHY_BR_1M_5SLOT;
if (lmp_edr_2m_capable(hdev)) {
supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
if (lmp_edr_3slot_capable(hdev))
supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
if (lmp_edr_5slot_capable(hdev))
supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
if (lmp_edr_3m_capable(hdev)) {
supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
if (lmp_edr_3slot_capable(hdev))
supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
if (lmp_edr_5slot_capable(hdev))
supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
}
}
}
if (lmp_le_capable(hdev)) {
supported_phys |= MGMT_PHY_LE_1M_TX;
supported_phys |= MGMT_PHY_LE_1M_RX;
if (hdev->le_features[1] & HCI_LE_PHY_2M) {
supported_phys |= MGMT_PHY_LE_2M_TX;
supported_phys |= MGMT_PHY_LE_2M_RX;
}
if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
supported_phys |= MGMT_PHY_LE_CODED_TX;
supported_phys |= MGMT_PHY_LE_CODED_RX;
}
}
return supported_phys;
}
static u32 get_selected_phys(struct hci_dev *hdev)
{
u32 selected_phys = 0;
if (lmp_bredr_capable(hdev)) {
selected_phys |= MGMT_PHY_BR_1M_1SLOT;
if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
selected_phys |= MGMT_PHY_BR_1M_3SLOT;
if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
selected_phys |= MGMT_PHY_BR_1M_5SLOT;
if (lmp_edr_2m_capable(hdev)) {
if (!(hdev->pkt_type & HCI_2DH1))
selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
if (lmp_edr_3slot_capable(hdev) &&
!(hdev->pkt_type & HCI_2DH3))
selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
if (lmp_edr_5slot_capable(hdev) &&
!(hdev->pkt_type & HCI_2DH5))
selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
if (lmp_edr_3m_capable(hdev)) {
if (!(hdev->pkt_type & HCI_3DH1))
selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
if (lmp_edr_3slot_capable(hdev) &&
!(hdev->pkt_type & HCI_3DH3))
selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
if (lmp_edr_5slot_capable(hdev) &&
!(hdev->pkt_type & HCI_3DH5))
selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
}
}
}
if (lmp_le_capable(hdev)) {
if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
selected_phys |= MGMT_PHY_LE_1M_TX;
if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
selected_phys |= MGMT_PHY_LE_1M_RX;
if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
selected_phys |= MGMT_PHY_LE_2M_TX;
if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
selected_phys |= MGMT_PHY_LE_2M_RX;
if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
selected_phys |= MGMT_PHY_LE_CODED_TX;
if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
selected_phys |= MGMT_PHY_LE_CODED_RX;
}
return selected_phys;
}
static u32 get_configurable_phys(struct hci_dev *hdev)
{
return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
}
static u32 get_supported_settings(struct hci_dev *hdev) static u32 get_supported_settings(struct hci_dev *hdev)
{ {
u32 settings = 0; u32 settings = 0;
@ -654,6 +775,8 @@ static u32 get_supported_settings(struct hci_dev *hdev)
hdev->set_bdaddr) hdev->set_bdaddr)
settings |= MGMT_SETTING_CONFIGURATION; settings |= MGMT_SETTING_CONFIGURATION;
settings |= MGMT_SETTING_PHY_CONFIGURATION;
return settings; return settings;
} }
@ -817,7 +940,10 @@ static void rpa_expired(struct work_struct *work)
* function. * function.
*/ */
hci_req_init(&req, hdev); hci_req_init(&req, hdev);
__hci_req_enable_advertising(&req); if (ext_adv_capable(hdev))
__hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
else
__hci_req_enable_advertising(&req);
hci_req_run(&req, NULL); hci_req_run(&req, NULL);
} }
@ -1721,10 +1847,17 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
*/ */
if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
struct hci_request req; struct hci_request req;
hci_req_init(&req, hdev); hci_req_init(&req, hdev);
__hci_req_update_adv_data(&req, 0x00); if (ext_adv_capable(hdev)) {
__hci_req_update_scan_rsp_data(&req, 0x00); int err;
err = __hci_req_setup_ext_adv_instance(&req, 0x00);
if (!err)
__hci_req_update_scan_rsp_data(&req, 0x00);
} else {
__hci_req_update_adv_data(&req, 0x00);
__hci_req_update_scan_rsp_data(&req, 0x00);
}
hci_req_run(&req, NULL); hci_req_run(&req, NULL);
hci_update_background_scan(hdev); hci_update_background_scan(hdev);
} }
@ -1823,6 +1956,9 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
} else { } else {
if (hci_dev_test_flag(hdev, HCI_LE_ADV)) if (hci_dev_test_flag(hdev, HCI_LE_ADV))
__hci_req_disable_advertising(&req); __hci_req_disable_advertising(&req);
if (ext_adv_capable(hdev))
__hci_req_clear_ext_adv_sets(&req);
} }
hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp), hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
@ -3184,6 +3320,228 @@ static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
return err; return err;
} }
static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
struct mgmt_rp_get_phy_confguration rp;
BT_DBG("sock %p %s", sk, hdev->name);
hci_dev_lock(hdev);
memset(&rp, 0, sizeof(rp));
rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
hci_dev_unlock(hdev);
return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
&rp, sizeof(rp));
}
int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
{
struct mgmt_ev_phy_configuration_changed ev;
memset(&ev, 0, sizeof(ev));
ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
sizeof(ev), skip);
}
static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
u16 opcode, struct sk_buff *skb)
{
struct mgmt_cp_set_phy_confguration *cp;
struct mgmt_pending_cmd *cmd;
BT_DBG("status 0x%02x", status);
hci_dev_lock(hdev);
cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
if (!cmd)
goto unlock;
cp = cmd->param;
if (status) {
mgmt_cmd_status(cmd->sk, hdev->id,
MGMT_OP_SET_PHY_CONFIGURATION,
mgmt_status(status));
} else {
mgmt_cmd_complete(cmd->sk, hdev->id,
MGMT_OP_SET_PHY_CONFIGURATION, 0,
NULL, 0);
mgmt_phy_configuration_changed(hdev, cmd->sk);
}
mgmt_pending_remove(cmd);
unlock:
hci_dev_unlock(hdev);
}
static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
struct mgmt_cp_set_phy_confguration *cp = data;
struct hci_cp_le_set_default_phy cp_phy;
struct mgmt_pending_cmd *cmd;
struct hci_request req;
u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
u16 pkt_type = (HCI_DH1 | HCI_DM1);
bool changed = false;
int err;
BT_DBG("sock %p %s", sk, hdev->name);
configurable_phys = get_configurable_phys(hdev);
supported_phys = get_supported_phys(hdev);
selected_phys = __le32_to_cpu(cp->selected_phys);
if (selected_phys & ~supported_phys)
return mgmt_cmd_status(sk, hdev->id,
MGMT_OP_SET_PHY_CONFIGURATION,
MGMT_STATUS_INVALID_PARAMS);
unconfigure_phys = supported_phys & ~configurable_phys;
if ((selected_phys & unconfigure_phys) != unconfigure_phys)
return mgmt_cmd_status(sk, hdev->id,
MGMT_OP_SET_PHY_CONFIGURATION,
MGMT_STATUS_INVALID_PARAMS);
if (selected_phys == get_selected_phys(hdev))
return mgmt_cmd_complete(sk, hdev->id,
MGMT_OP_SET_PHY_CONFIGURATION,
0, NULL, 0);
hci_dev_lock(hdev);
if (!hdev_is_powered(hdev)) {
err = mgmt_cmd_status(sk, hdev->id,
MGMT_OP_SET_PHY_CONFIGURATION,
MGMT_STATUS_REJECTED);
goto unlock;
}
if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
err = mgmt_cmd_status(sk, hdev->id,
MGMT_OP_SET_PHY_CONFIGURATION,
MGMT_STATUS_BUSY);
goto unlock;
}
if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
pkt_type |= (HCI_DH3 | HCI_DM3);
else
pkt_type &= ~(HCI_DH3 | HCI_DM3);
if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
pkt_type |= (HCI_DH5 | HCI_DM5);
else
pkt_type &= ~(HCI_DH5 | HCI_DM5);
if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
pkt_type &= ~HCI_2DH1;
else
pkt_type |= HCI_2DH1;
if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
pkt_type &= ~HCI_2DH3;
else
pkt_type |= HCI_2DH3;
if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
pkt_type &= ~HCI_2DH5;
else
pkt_type |= HCI_2DH5;
if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
pkt_type &= ~HCI_3DH1;
else
pkt_type |= HCI_3DH1;
if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
pkt_type &= ~HCI_3DH3;
else
pkt_type |= HCI_3DH3;
if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
pkt_type &= ~HCI_3DH5;
else
pkt_type |= HCI_3DH5;
if (pkt_type != hdev->pkt_type) {
hdev->pkt_type = pkt_type;
changed = true;
}
if ((selected_phys & MGMT_PHY_LE_MASK) ==
(get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
if (changed)
mgmt_phy_configuration_changed(hdev, sk);
err = mgmt_cmd_complete(sk, hdev->id,
MGMT_OP_SET_PHY_CONFIGURATION,
0, NULL, 0);
goto unlock;
}
cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
len);
if (!cmd) {
err = -ENOMEM;
goto unlock;
}
hci_req_init(&req, hdev);
memset(&cp_phy, 0, sizeof(cp_phy));
if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
cp_phy.all_phys |= 0x01;
if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
cp_phy.all_phys |= 0x02;
if (selected_phys & MGMT_PHY_LE_1M_TX)
cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
if (selected_phys & MGMT_PHY_LE_2M_TX)
cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
if (selected_phys & MGMT_PHY_LE_CODED_TX)
cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
if (selected_phys & MGMT_PHY_LE_1M_RX)
cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
if (selected_phys & MGMT_PHY_LE_2M_RX)
cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
if (selected_phys & MGMT_PHY_LE_CODED_RX)
cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
err = hci_req_run_skb(&req, set_default_phy_complete);
if (err < 0)
mgmt_pending_remove(cmd);
unlock:
hci_dev_unlock(hdev);
return err;
}
static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status, static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
u16 opcode, struct sk_buff *skb) u16 opcode, struct sk_buff *skb)
{ {
@ -4037,9 +4395,14 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
* HCI_ADVERTISING flag is not yet set. * HCI_ADVERTISING flag is not yet set.
*/ */
hdev->cur_adv_instance = 0x00; hdev->cur_adv_instance = 0x00;
__hci_req_update_adv_data(&req, 0x00);
__hci_req_update_scan_rsp_data(&req, 0x00); if (ext_adv_capable(hdev)) {
__hci_req_enable_advertising(&req); __hci_req_start_ext_adv(&req, 0x00);
} else {
__hci_req_update_adv_data(&req, 0x00);
__hci_req_update_scan_rsp_data(&req, 0x00);
__hci_req_enable_advertising(&req);
}
} else { } else {
__hci_req_disable_advertising(&req); __hci_req_disable_advertising(&req);
} }
@ -4609,6 +4972,7 @@ static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY); changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
memcpy(hdev->irk, cp->irk, sizeof(hdev->irk)); memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
hci_adv_instances_set_rpa_expired(hdev, true);
if (cp->privacy == 0x02) if (cp->privacy == 0x02)
hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY); hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
else else
@ -4617,6 +4981,7 @@ static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY); changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
memset(hdev->irk, 0, sizeof(hdev->irk)); memset(hdev->irk, 0, sizeof(hdev->irk));
hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED); hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
hci_adv_instances_set_rpa_expired(hdev, false);
hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY); hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
} }
@ -5967,9 +6332,23 @@ static u32 get_supported_adv_flags(struct hci_dev *hdev)
flags |= MGMT_ADV_FLAG_APPEARANCE; flags |= MGMT_ADV_FLAG_APPEARANCE;
flags |= MGMT_ADV_FLAG_LOCAL_NAME; flags |= MGMT_ADV_FLAG_LOCAL_NAME;
if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) /* In extended adv TX_POWER returned from Set Adv Param
* will be always valid.
*/
if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
ext_adv_capable(hdev))
flags |= MGMT_ADV_FLAG_TX_POWER; flags |= MGMT_ADV_FLAG_TX_POWER;
if (ext_adv_capable(hdev)) {
flags |= MGMT_ADV_FLAG_SEC_1M;
if (hdev->le_features[1] & HCI_LE_PHY_2M)
flags |= MGMT_ADV_FLAG_SEC_2M;
if (hdev->le_features[1] & HCI_LE_PHY_CODED)
flags |= MGMT_ADV_FLAG_SEC_CODED;
}
return flags; return flags;
} }
@ -6175,7 +6554,7 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
struct mgmt_cp_add_advertising *cp = data; struct mgmt_cp_add_advertising *cp = data;
struct mgmt_rp_add_advertising rp; struct mgmt_rp_add_advertising rp;
u32 flags; u32 flags;
u32 supported_flags; u32 supported_flags, phy_flags;
u8 status; u8 status;
u16 timeout, duration; u16 timeout, duration;
unsigned int prev_instance_cnt = hdev->adv_instance_cnt; unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
@ -6205,10 +6584,12 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
duration = __le16_to_cpu(cp->duration); duration = __le16_to_cpu(cp->duration);
/* The current implementation only supports a subset of the specified /* The current implementation only supports a subset of the specified
* flags. * flags. Also need to check mutual exclusiveness of sec flags.
*/ */
supported_flags = get_supported_adv_flags(hdev); supported_flags = get_supported_adv_flags(hdev);
if (flags & ~supported_flags) phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
if (flags & ~supported_flags ||
((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
MGMT_STATUS_INVALID_PARAMS); MGMT_STATUS_INVALID_PARAMS);
@ -6544,6 +6925,8 @@ static const struct hci_mgmt_handler mgmt_handlers[] = {
{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE, { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
HCI_MGMT_UNTRUSTED }, HCI_MGMT_UNTRUSTED },
{ set_appearance, MGMT_SET_APPEARANCE_SIZE }, { set_appearance, MGMT_SET_APPEARANCE_SIZE },
{ get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
{ set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
}; };
void mgmt_index_added(struct hci_dev *hdev) void mgmt_index_added(struct hci_dev *hdev)

View File

@ -393,7 +393,8 @@ static void sco_sock_cleanup_listen(struct sock *parent)
*/ */
static void sco_sock_kill(struct sock *sk) static void sco_sock_kill(struct sock *sk)
{ {
if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket ||
sock_flag(sk, SOCK_DEAD))
return; return;
BT_DBG("sk %p state %d", sk, sk->sk_state); BT_DBG("sk %p state %d", sk, sk->sk_state);