2014-12-19 19:40:20 +08:00
|
|
|
/*
|
|
|
|
BlueZ - Bluetooth protocol stack for Linux
|
|
|
|
|
|
|
|
Copyright (C) 2014 Intel Corporation
|
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License version 2 as
|
|
|
|
published by the Free Software Foundation;
|
|
|
|
|
|
|
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
|
|
|
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
|
|
|
|
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
|
|
|
|
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
|
|
|
|
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
|
|
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
|
|
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
|
|
|
|
|
|
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
|
|
|
|
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
|
|
|
|
SOFTWARE IS DISCLAIMED.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <net/bluetooth/bluetooth.h>
|
|
|
|
#include <net/bluetooth/hci_core.h>
|
|
|
|
|
|
|
|
#include "smp.h"
|
|
|
|
#include "hci_request.h"
|
|
|
|
|
2015-11-10 15:44:54 +08:00
|
|
|
#define HCI_REQ_DONE 0
|
|
|
|
#define HCI_REQ_PEND 1
|
|
|
|
#define HCI_REQ_CANCELED 2
|
|
|
|
|
2014-12-19 19:40:20 +08:00
|
|
|
void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
|
|
|
|
{
|
|
|
|
skb_queue_head_init(&req->cmd_q);
|
|
|
|
req->hdev = hdev;
|
|
|
|
req->err = 0;
|
|
|
|
}
|
|
|
|
|
2015-04-02 18:41:08 +08:00
|
|
|
static int req_run(struct hci_request *req, hci_req_complete_t complete,
|
|
|
|
hci_req_complete_skb_t complete_skb)
|
2014-12-19 19:40:20 +08:00
|
|
|
{
|
|
|
|
struct hci_dev *hdev = req->hdev;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
BT_DBG("length %u", skb_queue_len(&req->cmd_q));
|
|
|
|
|
|
|
|
/* If an error occurred during request building, remove all HCI
|
|
|
|
* commands queued on the HCI request queue.
|
|
|
|
*/
|
|
|
|
if (req->err) {
|
|
|
|
skb_queue_purge(&req->cmd_q);
|
|
|
|
return req->err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Do not allow empty requests */
|
|
|
|
if (skb_queue_empty(&req->cmd_q))
|
|
|
|
return -ENODATA;
|
|
|
|
|
|
|
|
skb = skb_peek_tail(&req->cmd_q);
|
2015-11-05 15:31:40 +08:00
|
|
|
if (complete) {
|
|
|
|
bt_cb(skb)->hci.req_complete = complete;
|
|
|
|
} else if (complete_skb) {
|
|
|
|
bt_cb(skb)->hci.req_complete_skb = complete_skb;
|
|
|
|
bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
|
|
|
|
}
|
2014-12-19 19:40:20 +08:00
|
|
|
|
|
|
|
spin_lock_irqsave(&hdev->cmd_q.lock, flags);
|
|
|
|
skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
|
|
|
|
spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
|
|
|
|
|
|
|
|
queue_work(hdev->workqueue, &hdev->cmd_work);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-04-02 18:41:08 +08:00
|
|
|
int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
|
|
|
|
{
|
|
|
|
return req_run(req, complete, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
|
|
|
|
{
|
|
|
|
return req_run(req, NULL, complete);
|
|
|
|
}
|
|
|
|
|
2015-11-10 15:44:54 +08:00
|
|
|
static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
BT_DBG("%s result 0x%2.2x", hdev->name, result);
|
|
|
|
|
|
|
|
if (hdev->req_status == HCI_REQ_PEND) {
|
|
|
|
hdev->req_result = result;
|
|
|
|
hdev->req_status = HCI_REQ_DONE;
|
|
|
|
if (skb)
|
|
|
|
hdev->req_skb = skb_get(skb);
|
|
|
|
wake_up_interruptible(&hdev->req_wait_q);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-10 15:44:55 +08:00
|
|
|
void hci_req_sync_cancel(struct hci_dev *hdev, int err)
|
2015-11-10 15:44:54 +08:00
|
|
|
{
|
|
|
|
BT_DBG("%s err 0x%2.2x", hdev->name, err);
|
|
|
|
|
|
|
|
if (hdev->req_status == HCI_REQ_PEND) {
|
|
|
|
hdev->req_result = err;
|
|
|
|
hdev->req_status = HCI_REQ_CANCELED;
|
|
|
|
wake_up_interruptible(&hdev->req_wait_q);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
|
|
|
|
const void *param, u8 event, u32 timeout)
|
|
|
|
{
|
|
|
|
DECLARE_WAITQUEUE(wait, current);
|
|
|
|
struct hci_request req;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
BT_DBG("%s", hdev->name);
|
|
|
|
|
|
|
|
hci_req_init(&req, hdev);
|
|
|
|
|
|
|
|
hci_req_add_ev(&req, opcode, plen, param, event);
|
|
|
|
|
|
|
|
hdev->req_status = HCI_REQ_PEND;
|
|
|
|
|
|
|
|
add_wait_queue(&hdev->req_wait_q, &wait);
|
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
|
|
|
|
|
|
err = hci_req_run_skb(&req, hci_req_sync_complete);
|
|
|
|
if (err < 0) {
|
|
|
|
remove_wait_queue(&hdev->req_wait_q, &wait);
|
|
|
|
set_current_state(TASK_RUNNING);
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
schedule_timeout(timeout);
|
|
|
|
|
|
|
|
remove_wait_queue(&hdev->req_wait_q, &wait);
|
|
|
|
|
|
|
|
if (signal_pending(current))
|
|
|
|
return ERR_PTR(-EINTR);
|
|
|
|
|
|
|
|
switch (hdev->req_status) {
|
|
|
|
case HCI_REQ_DONE:
|
|
|
|
err = -bt_to_errno(hdev->req_result);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HCI_REQ_CANCELED:
|
|
|
|
err = -hdev->req_result;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
err = -ETIMEDOUT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
hdev->req_status = hdev->req_result = 0;
|
|
|
|
skb = hdev->req_skb;
|
|
|
|
hdev->req_skb = NULL;
|
|
|
|
|
|
|
|
BT_DBG("%s end: err %d", hdev->name, err);
|
|
|
|
|
|
|
|
if (err < 0) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!skb)
|
|
|
|
return ERR_PTR(-ENODATA);
|
|
|
|
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__hci_cmd_sync_ev);
|
|
|
|
|
|
|
|
struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
|
|
|
|
const void *param, u32 timeout)
|
|
|
|
{
|
|
|
|
return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__hci_cmd_sync);
|
|
|
|
|
|
|
|
/* Execute request and wait for completion. */
|
|
|
|
int __hci_req_sync(struct hci_dev *hdev, void (*func)(struct hci_request *req,
|
|
|
|
unsigned long opt),
|
|
|
|
unsigned long opt, __u32 timeout)
|
|
|
|
{
|
|
|
|
struct hci_request req;
|
|
|
|
DECLARE_WAITQUEUE(wait, current);
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
BT_DBG("%s start", hdev->name);
|
|
|
|
|
|
|
|
hci_req_init(&req, hdev);
|
|
|
|
|
|
|
|
hdev->req_status = HCI_REQ_PEND;
|
|
|
|
|
|
|
|
func(&req, opt);
|
|
|
|
|
|
|
|
add_wait_queue(&hdev->req_wait_q, &wait);
|
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
|
|
|
|
|
|
err = hci_req_run_skb(&req, hci_req_sync_complete);
|
|
|
|
if (err < 0) {
|
|
|
|
hdev->req_status = 0;
|
|
|
|
|
|
|
|
remove_wait_queue(&hdev->req_wait_q, &wait);
|
|
|
|
set_current_state(TASK_RUNNING);
|
|
|
|
|
|
|
|
/* ENODATA means the HCI request command queue is empty.
|
|
|
|
* This can happen when a request with conditionals doesn't
|
|
|
|
* trigger any commands to be sent. This is normal behavior
|
|
|
|
* and should not trigger an error return.
|
|
|
|
*/
|
|
|
|
if (err == -ENODATA)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
schedule_timeout(timeout);
|
|
|
|
|
|
|
|
remove_wait_queue(&hdev->req_wait_q, &wait);
|
|
|
|
|
|
|
|
if (signal_pending(current))
|
|
|
|
return -EINTR;
|
|
|
|
|
|
|
|
switch (hdev->req_status) {
|
|
|
|
case HCI_REQ_DONE:
|
|
|
|
err = -bt_to_errno(hdev->req_result);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HCI_REQ_CANCELED:
|
|
|
|
err = -hdev->req_result;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
err = -ETIMEDOUT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
hdev->req_status = hdev->req_result = 0;
|
|
|
|
|
|
|
|
BT_DBG("%s end: err %d", hdev->name, err);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int hci_req_sync(struct hci_dev *hdev, void (*req)(struct hci_request *req,
|
|
|
|
unsigned long opt),
|
|
|
|
unsigned long opt, __u32 timeout)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!test_bit(HCI_UP, &hdev->flags))
|
|
|
|
return -ENETDOWN;
|
|
|
|
|
|
|
|
/* Serialize all requests */
|
2015-11-10 15:44:55 +08:00
|
|
|
hci_req_sync_lock(hdev);
|
2015-11-10 15:44:54 +08:00
|
|
|
ret = __hci_req_sync(hdev, req, opt, timeout);
|
2015-11-10 15:44:55 +08:00
|
|
|
hci_req_sync_unlock(hdev);
|
2015-11-10 15:44:54 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-12-19 19:40:20 +08:00
|
|
|
struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
|
|
|
|
const void *param)
|
|
|
|
{
|
|
|
|
int len = HCI_COMMAND_HDR_SIZE + plen;
|
|
|
|
struct hci_command_hdr *hdr;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
skb = bt_skb_alloc(len, GFP_ATOMIC);
|
|
|
|
if (!skb)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
|
|
|
|
hdr->opcode = cpu_to_le16(opcode);
|
|
|
|
hdr->plen = plen;
|
|
|
|
|
|
|
|
if (plen)
|
|
|
|
memcpy(skb_put(skb, plen), param, plen);
|
|
|
|
|
|
|
|
BT_DBG("skb len %d", skb->len);
|
|
|
|
|
2015-11-05 14:10:00 +08:00
|
|
|
hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
|
|
|
|
hci_skb_opcode(skb) = opcode;
|
2014-12-19 19:40:20 +08:00
|
|
|
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Queue a command to an asynchronous HCI request */
|
|
|
|
void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
|
|
|
|
const void *param, u8 event)
|
|
|
|
{
|
|
|
|
struct hci_dev *hdev = req->hdev;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
|
|
|
|
|
|
|
|
/* If an error occurred during request building, there is no point in
|
|
|
|
* queueing the HCI command. We can simply return.
|
|
|
|
*/
|
|
|
|
if (req->err)
|
|
|
|
return;
|
|
|
|
|
|
|
|
skb = hci_prepare_cmd(hdev, opcode, plen, param);
|
|
|
|
if (!skb) {
|
|
|
|
BT_ERR("%s no memory for command (opcode 0x%4.4x)",
|
|
|
|
hdev->name, opcode);
|
|
|
|
req->err = -ENOMEM;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (skb_queue_empty(&req->cmd_q))
|
2015-11-05 15:31:40 +08:00
|
|
|
bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
|
2014-12-19 19:40:20 +08:00
|
|
|
|
2015-10-26 05:45:53 +08:00
|
|
|
bt_cb(skb)->hci.req_event = event;
|
2014-12-19 19:40:20 +08:00
|
|
|
|
|
|
|
skb_queue_tail(&req->cmd_q, skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
|
|
|
|
const void *param)
|
|
|
|
{
|
|
|
|
hci_req_add_ev(req, opcode, plen, param, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void hci_req_add_le_scan_disable(struct hci_request *req)
|
|
|
|
{
|
|
|
|
struct hci_cp_le_set_scan_enable cp;
|
|
|
|
|
|
|
|
memset(&cp, 0, sizeof(cp));
|
|
|
|
cp.enable = LE_SCAN_DISABLE;
|
|
|
|
hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void add_to_white_list(struct hci_request *req,
|
|
|
|
struct hci_conn_params *params)
|
|
|
|
{
|
|
|
|
struct hci_cp_le_add_to_white_list cp;
|
|
|
|
|
|
|
|
cp.bdaddr_type = params->addr_type;
|
|
|
|
bacpy(&cp.bdaddr, ¶ms->addr);
|
|
|
|
|
|
|
|
hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static u8 update_white_list(struct hci_request *req)
|
|
|
|
{
|
|
|
|
struct hci_dev *hdev = req->hdev;
|
|
|
|
struct hci_conn_params *params;
|
|
|
|
struct bdaddr_list *b;
|
|
|
|
uint8_t white_list_entries = 0;
|
|
|
|
|
|
|
|
/* Go through the current white list programmed into the
|
|
|
|
* controller one by one and check if that address is still
|
|
|
|
* in the list of pending connections or list of devices to
|
|
|
|
* report. If not present in either list, then queue the
|
|
|
|
* command to remove it from the controller.
|
|
|
|
*/
|
|
|
|
list_for_each_entry(b, &hdev->le_white_list, list) {
|
|
|
|
struct hci_cp_le_del_from_white_list cp;
|
|
|
|
|
|
|
|
if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
|
|
|
|
&b->bdaddr, b->bdaddr_type) ||
|
|
|
|
hci_pend_le_action_lookup(&hdev->pend_le_reports,
|
|
|
|
&b->bdaddr, b->bdaddr_type)) {
|
|
|
|
white_list_entries++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
cp.bdaddr_type = b->bdaddr_type;
|
|
|
|
bacpy(&cp.bdaddr, &b->bdaddr);
|
|
|
|
|
|
|
|
hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
|
|
|
|
sizeof(cp), &cp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Since all no longer valid white list entries have been
|
|
|
|
* removed, walk through the list of pending connections
|
|
|
|
* and ensure that any new device gets programmed into
|
|
|
|
* the controller.
|
|
|
|
*
|
|
|
|
* If the list of the devices is larger than the list of
|
|
|
|
* available white list entries in the controller, then
|
|
|
|
* just abort and return filer policy value to not use the
|
|
|
|
* white list.
|
|
|
|
*/
|
|
|
|
list_for_each_entry(params, &hdev->pend_le_conns, action) {
|
|
|
|
if (hci_bdaddr_list_lookup(&hdev->le_white_list,
|
|
|
|
¶ms->addr, params->addr_type))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (white_list_entries >= hdev->le_white_list_size) {
|
|
|
|
/* Select filter policy to accept all advertising */
|
|
|
|
return 0x00;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hci_find_irk_by_addr(hdev, ¶ms->addr,
|
|
|
|
params->addr_type)) {
|
|
|
|
/* White list can not be used with RPAs */
|
|
|
|
return 0x00;
|
|
|
|
}
|
|
|
|
|
|
|
|
white_list_entries++;
|
|
|
|
add_to_white_list(req, params);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* After adding all new pending connections, walk through
|
|
|
|
* the list of pending reports and also add these to the
|
|
|
|
* white list if there is still space.
|
|
|
|
*/
|
|
|
|
list_for_each_entry(params, &hdev->pend_le_reports, action) {
|
|
|
|
if (hci_bdaddr_list_lookup(&hdev->le_white_list,
|
|
|
|
¶ms->addr, params->addr_type))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (white_list_entries >= hdev->le_white_list_size) {
|
|
|
|
/* Select filter policy to accept all advertising */
|
|
|
|
return 0x00;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hci_find_irk_by_addr(hdev, ¶ms->addr,
|
|
|
|
params->addr_type)) {
|
|
|
|
/* White list can not be used with RPAs */
|
|
|
|
return 0x00;
|
|
|
|
}
|
|
|
|
|
|
|
|
white_list_entries++;
|
|
|
|
add_to_white_list(req, params);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Select filter policy to use white list */
|
|
|
|
return 0x01;
|
|
|
|
}
|
|
|
|
|
|
|
|
void hci_req_add_le_passive_scan(struct hci_request *req)
|
|
|
|
{
|
|
|
|
struct hci_cp_le_set_scan_param param_cp;
|
|
|
|
struct hci_cp_le_set_scan_enable enable_cp;
|
|
|
|
struct hci_dev *hdev = req->hdev;
|
|
|
|
u8 own_addr_type;
|
|
|
|
u8 filter_policy;
|
|
|
|
|
|
|
|
/* Set require_privacy to false since no SCAN_REQ are send
|
|
|
|
* during passive scanning. Not using an non-resolvable address
|
|
|
|
* here is important so that peer devices using direct
|
|
|
|
* advertising with our address will be correctly reported
|
|
|
|
* by the controller.
|
|
|
|
*/
|
|
|
|
if (hci_update_random_address(req, false, &own_addr_type))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Adding or removing entries from the white list must
|
|
|
|
* happen before enabling scanning. The controller does
|
|
|
|
* not allow white list modification while scanning.
|
|
|
|
*/
|
|
|
|
filter_policy = update_white_list(req);
|
|
|
|
|
|
|
|
/* When the controller is using random resolvable addresses and
|
|
|
|
* with that having LE privacy enabled, then controllers with
|
|
|
|
* Extended Scanner Filter Policies support can now enable support
|
|
|
|
* for handling directed advertising.
|
|
|
|
*
|
|
|
|
* So instead of using filter polices 0x00 (no whitelist)
|
|
|
|
* and 0x01 (whitelist enabled) use the new filter policies
|
|
|
|
* 0x02 (no whitelist) and 0x03 (whitelist enabled).
|
|
|
|
*/
|
2015-03-13 17:11:00 +08:00
|
|
|
if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
|
2014-12-19 19:40:20 +08:00
|
|
|
(hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
|
|
|
|
filter_policy |= 0x02;
|
|
|
|
|
|
|
|
memset(¶m_cp, 0, sizeof(param_cp));
|
|
|
|
param_cp.type = LE_SCAN_PASSIVE;
|
|
|
|
param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
|
|
|
|
param_cp.window = cpu_to_le16(hdev->le_scan_window);
|
|
|
|
param_cp.own_address_type = own_addr_type;
|
|
|
|
param_cp.filter_policy = filter_policy;
|
|
|
|
hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
|
|
|
|
¶m_cp);
|
|
|
|
|
|
|
|
memset(&enable_cp, 0, sizeof(enable_cp));
|
|
|
|
enable_cp.enable = LE_SCAN_ENABLE;
|
|
|
|
enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
|
|
|
|
hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
|
|
|
|
&enable_cp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
|
|
|
|
{
|
|
|
|
struct hci_dev *hdev = req->hdev;
|
|
|
|
|
|
|
|
/* If we're advertising or initiating an LE connection we can't
|
|
|
|
* go ahead and change the random address at this time. This is
|
|
|
|
* because the eventual initiator address used for the
|
|
|
|
* subsequently created connection will be undefined (some
|
|
|
|
* controllers use the new address and others the one we had
|
|
|
|
* when the operation started).
|
|
|
|
*
|
|
|
|
* In this kind of scenario skip the update and let the random
|
|
|
|
* address be updated at the next cycle.
|
|
|
|
*/
|
2015-03-13 17:11:00 +08:00
|
|
|
if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
|
2015-08-08 02:22:52 +08:00
|
|
|
hci_lookup_le_connect(hdev)) {
|
2014-12-19 19:40:20 +08:00
|
|
|
BT_DBG("Deferring random address update");
|
2015-03-13 17:11:01 +08:00
|
|
|
hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
|
2014-12-19 19:40:20 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
|
|
|
|
}
|
|
|
|
|
|
|
|
int hci_update_random_address(struct hci_request *req, bool require_privacy,
|
|
|
|
u8 *own_addr_type)
|
|
|
|
{
|
|
|
|
struct hci_dev *hdev = req->hdev;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* If privacy is enabled use a resolvable private address. If
|
|
|
|
* current RPA has expired or there is something else than
|
|
|
|
* the current RPA in use, then generate a new one.
|
|
|
|
*/
|
2015-03-13 17:11:00 +08:00
|
|
|
if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
|
2014-12-19 19:40:20 +08:00
|
|
|
int to;
|
|
|
|
|
|
|
|
*own_addr_type = ADDR_LE_DEV_RANDOM;
|
|
|
|
|
2015-03-13 17:11:05 +08:00
|
|
|
if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
|
2014-12-19 19:40:20 +08:00
|
|
|
!bacmp(&hdev->random_addr, &hdev->rpa))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
|
|
|
|
if (err < 0) {
|
|
|
|
BT_ERR("%s failed to generate new RPA", hdev->name);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
set_random_addr(req, &hdev->rpa);
|
|
|
|
|
|
|
|
to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
|
|
|
|
queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* In case of required privacy without resolvable private address,
|
|
|
|
* use an non-resolvable private address. This is useful for active
|
|
|
|
* scanning and non-connectable advertising.
|
|
|
|
*/
|
|
|
|
if (require_privacy) {
|
|
|
|
bdaddr_t nrpa;
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
/* The non-resolvable private address is generated
|
|
|
|
* from random six bytes with the two most significant
|
|
|
|
* bits cleared.
|
|
|
|
*/
|
|
|
|
get_random_bytes(&nrpa, 6);
|
|
|
|
nrpa.b[5] &= 0x3f;
|
|
|
|
|
|
|
|
/* The non-resolvable private address shall not be
|
|
|
|
* equal to the public address.
|
|
|
|
*/
|
|
|
|
if (bacmp(&hdev->bdaddr, &nrpa))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
*own_addr_type = ADDR_LE_DEV_RANDOM;
|
|
|
|
set_random_addr(req, &nrpa);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If forcing static address is in use or there is no public
|
|
|
|
* address use the static address as random address (but skip
|
|
|
|
* the HCI command if the current random address is already the
|
|
|
|
* static one.
|
2014-12-20 06:05:35 +08:00
|
|
|
*
|
|
|
|
* In case BR/EDR has been disabled on a dual-mode controller
|
|
|
|
* and a static address has been configured, then use that
|
|
|
|
* address instead of the public BR/EDR address.
|
2014-12-19 19:40:20 +08:00
|
|
|
*/
|
2015-03-14 01:20:35 +08:00
|
|
|
if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
|
2014-12-20 06:05:35 +08:00
|
|
|
!bacmp(&hdev->bdaddr, BDADDR_ANY) ||
|
2015-03-13 17:11:00 +08:00
|
|
|
(!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
|
2014-12-20 06:05:35 +08:00
|
|
|
bacmp(&hdev->static_addr, BDADDR_ANY))) {
|
2014-12-19 19:40:20 +08:00
|
|
|
*own_addr_type = ADDR_LE_DEV_RANDOM;
|
|
|
|
if (bacmp(&hdev->static_addr, &hdev->random_addr))
|
|
|
|
hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
|
|
|
|
&hdev->static_addr);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Neither privacy nor static address is being used so use a
|
|
|
|
* public address.
|
|
|
|
*/
|
|
|
|
*own_addr_type = ADDR_LE_DEV_PUBLIC;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2014-12-20 04:26:00 +08:00
|
|
|
|
2014-12-20 05:18:22 +08:00
|
|
|
static bool disconnected_whitelist_entries(struct hci_dev *hdev)
|
|
|
|
{
|
|
|
|
struct bdaddr_list *b;
|
|
|
|
|
|
|
|
list_for_each_entry(b, &hdev->whitelist, list) {
|
|
|
|
struct hci_conn *conn;
|
|
|
|
|
|
|
|
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
|
|
|
|
if (!conn)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void __hci_update_page_scan(struct hci_request *req)
|
|
|
|
{
|
|
|
|
struct hci_dev *hdev = req->hdev;
|
|
|
|
u8 scan;
|
|
|
|
|
2015-03-13 17:11:00 +08:00
|
|
|
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
|
2014-12-20 05:18:22 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (!hdev_is_powered(hdev))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (mgmt_powering_down(hdev))
|
|
|
|
return;
|
|
|
|
|
2015-03-13 17:11:00 +08:00
|
|
|
if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
|
2014-12-20 05:18:22 +08:00
|
|
|
disconnected_whitelist_entries(hdev))
|
|
|
|
scan = SCAN_PAGE;
|
|
|
|
else
|
|
|
|
scan = SCAN_DISABLED;
|
|
|
|
|
|
|
|
if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
|
|
|
|
return;
|
|
|
|
|
2015-03-13 17:11:00 +08:00
|
|
|
if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
|
2014-12-20 05:18:22 +08:00
|
|
|
scan |= SCAN_INQUIRY;
|
|
|
|
|
|
|
|
hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
|
|
|
|
}
|
|
|
|
|
|
|
|
void hci_update_page_scan(struct hci_dev *hdev)
|
|
|
|
{
|
|
|
|
struct hci_request req;
|
|
|
|
|
|
|
|
hci_req_init(&req, hdev);
|
|
|
|
__hci_update_page_scan(&req);
|
|
|
|
hci_req_run(&req, NULL);
|
|
|
|
}
|
|
|
|
|
2014-12-20 04:26:00 +08:00
|
|
|
/* This function controls the background scanning based on hdev->pend_le_conns
|
|
|
|
* list. If there are pending LE connection we start the background scanning,
|
|
|
|
* otherwise we stop it.
|
|
|
|
*
|
|
|
|
* This function requires the caller holds hdev->lock.
|
|
|
|
*/
|
|
|
|
void __hci_update_background_scan(struct hci_request *req)
|
|
|
|
{
|
|
|
|
struct hci_dev *hdev = req->hdev;
|
|
|
|
|
|
|
|
if (!test_bit(HCI_UP, &hdev->flags) ||
|
|
|
|
test_bit(HCI_INIT, &hdev->flags) ||
|
2015-03-13 17:11:00 +08:00
|
|
|
hci_dev_test_flag(hdev, HCI_SETUP) ||
|
|
|
|
hci_dev_test_flag(hdev, HCI_CONFIG) ||
|
|
|
|
hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
|
|
|
|
hci_dev_test_flag(hdev, HCI_UNREGISTER))
|
2014-12-20 04:26:00 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* No point in doing scanning if LE support hasn't been enabled */
|
2015-03-13 17:11:00 +08:00
|
|
|
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
|
2014-12-20 04:26:00 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* If discovery is active don't interfere with it */
|
|
|
|
if (hdev->discovery.state != DISCOVERY_STOPPED)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Reset RSSI and UUID filters when starting background scanning
|
|
|
|
* since these filters are meant for service discovery only.
|
|
|
|
*
|
|
|
|
* The Start Discovery and Start Service Discovery operations
|
|
|
|
* ensure to set proper values for RSSI threshold and UUID
|
|
|
|
* filter list. So it is safe to just reset them here.
|
|
|
|
*/
|
|
|
|
hci_discovery_filter_clear(hdev);
|
|
|
|
|
|
|
|
if (list_empty(&hdev->pend_le_conns) &&
|
|
|
|
list_empty(&hdev->pend_le_reports)) {
|
|
|
|
/* If there is no pending LE connections or devices
|
|
|
|
* to be scanned for, we should stop the background
|
|
|
|
* scanning.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* If controller is not scanning we are done. */
|
2015-03-13 17:11:00 +08:00
|
|
|
if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
|
2014-12-20 04:26:00 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
hci_req_add_le_scan_disable(req);
|
|
|
|
|
|
|
|
BT_DBG("%s stopping background scanning", hdev->name);
|
|
|
|
} else {
|
|
|
|
/* If there is at least one pending LE connection, we should
|
|
|
|
* keep the background scan running.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* If controller is connecting, we should not start scanning
|
|
|
|
* since some controllers are not able to scan and connect at
|
|
|
|
* the same time.
|
|
|
|
*/
|
2015-08-08 02:22:52 +08:00
|
|
|
if (hci_lookup_le_connect(hdev))
|
2014-12-20 04:26:00 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* If controller is currently scanning, we stop it to ensure we
|
|
|
|
* don't miss any advertising (due to duplicates filter).
|
|
|
|
*/
|
2015-03-13 17:11:00 +08:00
|
|
|
if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
|
2014-12-20 04:26:00 +08:00
|
|
|
hci_req_add_le_scan_disable(req);
|
|
|
|
|
|
|
|
hci_req_add_le_passive_scan(req);
|
|
|
|
|
|
|
|
BT_DBG("%s starting background scanning", hdev->name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-22 15:49:37 +08:00
|
|
|
void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
|
|
|
|
u8 reason)
|
|
|
|
{
|
|
|
|
switch (conn->state) {
|
|
|
|
case BT_CONNECTED:
|
|
|
|
case BT_CONFIG:
|
|
|
|
if (conn->type == AMP_LINK) {
|
|
|
|
struct hci_cp_disconn_phy_link cp;
|
|
|
|
|
|
|
|
cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
|
|
|
|
cp.reason = reason;
|
|
|
|
hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
|
|
|
|
&cp);
|
|
|
|
} else {
|
|
|
|
struct hci_cp_disconnect dc;
|
|
|
|
|
|
|
|
dc.handle = cpu_to_le16(conn->handle);
|
|
|
|
dc.reason = reason;
|
|
|
|
hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
|
|
|
|
}
|
|
|
|
|
|
|
|
conn->state = BT_DISCONN;
|
|
|
|
|
|
|
|
break;
|
|
|
|
case BT_CONNECT:
|
|
|
|
if (conn->type == LE_LINK) {
|
|
|
|
if (test_bit(HCI_CONN_SCANNING, &conn->flags))
|
|
|
|
break;
|
|
|
|
hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
|
|
|
|
0, NULL);
|
|
|
|
} else if (conn->type == ACL_LINK) {
|
|
|
|
if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
|
|
|
|
break;
|
|
|
|
hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
|
|
|
|
6, &conn->dst);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case BT_CONNECT2:
|
|
|
|
if (conn->type == ACL_LINK) {
|
|
|
|
struct hci_cp_reject_conn_req rej;
|
|
|
|
|
|
|
|
bacpy(&rej.bdaddr, &conn->dst);
|
|
|
|
rej.reason = reason;
|
|
|
|
|
|
|
|
hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
|
|
|
|
sizeof(rej), &rej);
|
|
|
|
} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
|
|
|
|
struct hci_cp_reject_sync_conn_req rej;
|
|
|
|
|
|
|
|
bacpy(&rej.bdaddr, &conn->dst);
|
|
|
|
|
|
|
|
/* SCO rejection has its own limited set of
|
|
|
|
* allowed error values (0x0D-0x0F) which isn't
|
|
|
|
* compatible with most values passed to this
|
|
|
|
* function. To be safe hard-code one of the
|
|
|
|
* values that's suitable for SCO.
|
|
|
|
*/
|
|
|
|
rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
|
|
|
|
|
|
|
|
hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
|
|
|
|
sizeof(rej), &rej);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
conn->state = BT_CLOSED;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
|
|
|
|
{
|
|
|
|
if (status)
|
|
|
|
BT_DBG("Failed to abort connection: status 0x%2.2x", status);
|
|
|
|
}
|
|
|
|
|
|
|
|
int hci_abort_conn(struct hci_conn *conn, u8 reason)
|
|
|
|
{
|
|
|
|
struct hci_request req;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
hci_req_init(&req, conn->hdev);
|
|
|
|
|
|
|
|
__hci_abort_conn(&req, conn, reason);
|
|
|
|
|
|
|
|
err = hci_req_run(&req, abort_conn_complete);
|
|
|
|
if (err && err != -ENODATA) {
|
|
|
|
BT_ERR("Failed to run HCI request: err %d", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2015-11-11 14:11:16 +08:00
|
|
|
|
2015-11-11 14:11:17 +08:00
|
|
|
static void update_bg_scan(struct hci_request *req, unsigned long opt)
|
|
|
|
{
|
|
|
|
hci_dev_lock(req->hdev);
|
|
|
|
__hci_update_background_scan(req);
|
|
|
|
hci_dev_unlock(req->hdev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bg_scan_update(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct hci_dev *hdev = container_of(work, struct hci_dev,
|
|
|
|
bg_scan_update);
|
|
|
|
|
|
|
|
hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT);
|
|
|
|
}
|
|
|
|
|
2015-11-11 14:11:16 +08:00
|
|
|
void hci_request_setup(struct hci_dev *hdev)
|
|
|
|
{
|
2015-11-11 14:11:17 +08:00
|
|
|
INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
|
2015-11-11 14:11:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void hci_request_cancel_all(struct hci_dev *hdev)
|
|
|
|
{
|
2015-11-11 14:11:17 +08:00
|
|
|
cancel_work_sync(&hdev->bg_scan_update);
|
2015-11-11 14:11:16 +08:00
|
|
|
}
|