/* hci_core.c - HCI core Bluetooth handling */
/*
* Copyright (c) 2017-2021 Nordic Semiconductor ASA
* Copyright (c) 2015-2016 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/kernel.h>
#include <string.h>
#include <stdio.h>
#include <errno.h>
#include <zephyr/net_buf.h>
#include <zephyr/sys/atomic.h>
#include <zephyr/sys/check.h>
#include <zephyr/sys/util_macro.h>
#include <zephyr/sys/util.h>
#include <zephyr/sys/slist.h>
#include <zephyr/sys/byteorder.h>
#include <zephyr/debug/stack.h>
#include <zephyr/sys/__assert.h>
#include <soc.h>
#include <zephyr/settings/settings.h>
#include <zephyr/bluetooth/bluetooth.h>
#include <zephyr/bluetooth/conn.h>
#include <zephyr/bluetooth/l2cap.h>
#include <zephyr/bluetooth/hci.h>
#include <zephyr/bluetooth/hci_vs.h>
#include <zephyr/bluetooth/testing.h>
#if DT_HAS_CHOSEN(zephyr_bt_hci)
#include <zephyr/drivers/bluetooth.h>
#else
#include <zephyr/drivers/bluetooth/hci_driver.h>
#endif
#include "common/bt_str.h"
#include "common/assert.h"
#include "common/rpa.h"
#include "keys.h"
#include "monitor.h"
#include "hci_core.h"
#include "hci_ecc.h"
#include "ecc.h"
#include "id.h"
#include "adv.h"
#include "scan.h"
#include "addr_internal.h"
#include "conn_internal.h"
#include "iso_internal.h"
#include "l2cap_internal.h"
#include "gatt_internal.h"
#include "smp.h"
#include "crypto.h"
#include "settings.h"
#if defined(CONFIG_BT_CLASSIC)
#include "classic/br.h"
#endif
#if defined(CONFIG_BT_DF)
#include "direction_internal.h"
#endif /* CONFIG_BT_DF */
#define LOG_LEVEL CONFIG_BT_HCI_CORE_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(bt_hci_core);
#define BT_HCI_DEV DT_CHOSEN(zephyr_bt_hci)
#define BT_HCI_BUS BT_DT_HCI_BUS_GET(BT_HCI_DEV)
#define BT_HCI_NAME BT_DT_HCI_NAME_GET(BT_HCI_DEV)
void bt_tx_irq_raise(void);
#define HCI_CMD_TIMEOUT K_SECONDS(10)
/* Stacks for the threads */
static void rx_work_handler(struct k_work *work);
static K_WORK_DEFINE(rx_work, rx_work_handler);
#if defined(CONFIG_BT_RECV_WORKQ_BT)
static struct k_work_q bt_workq;
static K_KERNEL_STACK_DEFINE(rx_thread_stack, CONFIG_BT_RX_STACK_SIZE);
#endif /* CONFIG_BT_RECV_WORKQ_BT */
static void init_work(struct k_work *work);
struct bt_dev bt_dev = {
.init = Z_WORK_INITIALIZER(init_work),
#if defined(CONFIG_BT_PRIVACY)
.rpa_timeout = CONFIG_BT_RPA_TIMEOUT,
#endif
#if defined(CONFIG_BT_DEVICE_APPEARANCE_DYNAMIC)
.appearance = CONFIG_BT_DEVICE_APPEARANCE,
#endif
#if DT_HAS_CHOSEN(zephyr_bt_hci)
.hci = DEVICE_DT_GET(BT_HCI_DEV),
#endif
};
static bt_ready_cb_t ready_cb;
#if defined(CONFIG_BT_HCI_VS_EVT_USER)
static bt_hci_vnd_evt_cb_t *hci_vnd_evt_cb;
#endif /* CONFIG_BT_HCI_VS_EVT_USER */
struct cmd_data {
/** HCI status of the command completion */
uint8_t status;
/** The command OpCode that the buffer contains */
uint16_t opcode;
/** The state to update when command completes with success. */
struct bt_hci_cmd_state_set *state;
/** Used by bt_hci_cmd_send_sync. */
struct k_sem *sync;
};
static struct cmd_data cmd_data[CONFIG_BT_BUF_CMD_TX_COUNT];
#define cmd(buf) (&cmd_data[net_buf_id(buf)])
#define acl(buf) ((struct acl_data *)net_buf_user_data(buf))
#if DT_HAS_CHOSEN(zephyr_bt_hci)
static bool drv_quirk_no_reset(void)
{
return ((BT_DT_HCI_QUIRKS_GET(DT_CHOSEN(zephyr_bt_hci)) & BT_HCI_QUIRK_NO_RESET) != 0);
}
bool bt_drv_quirk_no_auto_dle(void)
{
return ((BT_DT_HCI_QUIRKS_GET(DT_CHOSEN(zephyr_bt_hci)) & BT_HCI_QUIRK_NO_AUTO_DLE) != 0);
}
#else
static bool drv_quirk_no_reset(void)
{
return ((bt_dev.drv->quirks & BT_QUIRK_NO_RESET) != 0);
}
bool bt_drv_quirk_no_auto_dle(void)
{
return ((bt_dev.drv->quirks & BT_QUIRK_NO_AUTO_DLE) != 0);
}
#endif
void bt_hci_cmd_state_set_init(struct net_buf *buf,
struct bt_hci_cmd_state_set *state,
atomic_t *target, int bit, bool val)
{
state->target = target;
state->bit = bit;
state->val = val;
cmd(buf)->state = state;
}
/* HCI command buffers. Derive the needed size from both Command and Event
* buffer length since the buffer is also used for the response event i.e
* command complete or command status.
*/
#define CMD_BUF_SIZE MAX(BT_BUF_EVT_RX_SIZE, BT_BUF_CMD_TX_SIZE)
NET_BUF_POOL_FIXED_DEFINE(hci_cmd_pool, CONFIG_BT_BUF_CMD_TX_COUNT,
CMD_BUF_SIZE, sizeof(struct bt_buf_data), NULL);
struct event_handler {
uint8_t event;
uint8_t min_len;
void (*handler)(struct net_buf *buf);
};
#define EVENT_HANDLER(_evt, _handler, _min_len) \
{ \
.event = _evt, \
.handler = _handler, \
.min_len = _min_len, \
}
static int handle_event_common(uint8_t event, struct net_buf *buf,
const struct event_handler *handlers, size_t num_handlers)
{
size_t i;
for (i = 0; i < num_handlers; i++) {
const struct event_handler *handler = &handlers[i];
if (handler->event != event) {
continue;
}
if (buf->len < handler->min_len) {
LOG_ERR("Too small (%u bytes) event 0x%02x", buf->len, event);
return -EINVAL;
}
handler->handler(buf);
return 0;
}
return -EOPNOTSUPP;
}
static void handle_event(uint8_t event, struct net_buf *buf, const struct event_handler *handlers,
size_t num_handlers)
{
int err;
err = handle_event_common(event, buf, handlers, num_handlers);
if (err == -EOPNOTSUPP) {
LOG_WRN("Unhandled event 0x%02x len %u: %s", event, buf->len,
bt_hex(buf->data, buf->len));
}
/* Other possible errors are handled by handle_event_common function */
}
static void handle_vs_event(uint8_t event, struct net_buf *buf,
const struct event_handler *handlers, size_t num_handlers)
{
int err;
err = handle_event_common(event, buf, handlers, num_handlers);
if (err == -EOPNOTSUPP) {
LOG_WRN("Unhandled vendor-specific event 0x%02x len %u: %s", event, buf->len,
bt_hex(buf->data, buf->len));
}
/* Other possible errors are handled by handle_event_common function */
}
void bt_acl_set_ncp_sent(struct net_buf *packet, bool value)
{
acl(packet)->host_ncp_sent = value;
}
void bt_send_one_host_num_completed_packets(uint16_t handle)
{
if (!IS_ENABLED(CONFIG_BT_HCI_ACL_FLOW_CONTROL)) {
ARG_UNUSED(handle);
return;
}
struct bt_hci_cp_host_num_completed_packets *cp;
struct bt_hci_handle_count *hc;
struct net_buf *buf;
int err;
LOG_DBG("Reporting completed packet for handle %u", handle);
buf = bt_hci_cmd_create(BT_HCI_OP_HOST_NUM_COMPLETED_PACKETS,
sizeof(*cp) + sizeof(*hc));
BT_ASSERT_MSG(buf, "Unable to alloc for Host NCP");
cp = net_buf_add(buf, sizeof(*cp));
cp->num_handles = sys_cpu_to_le16(1);
hc = net_buf_add(buf, sizeof(*hc));
hc->handle = sys_cpu_to_le16(handle);
hc->count = sys_cpu_to_le16(1);
err = bt_hci_cmd_send(BT_HCI_OP_HOST_NUM_COMPLETED_PACKETS, buf);
BT_ASSERT_MSG(err == 0, "Unable to send Host NCP (err %d)", err);
}
#if defined(CONFIG_BT_TESTING)
__weak void bt_testing_trace_event_acl_pool_destroy(struct net_buf *buf)
{
}
#endif
#if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
void bt_hci_host_num_completed_packets(struct net_buf *buf)
{
uint16_t handle = acl(buf)->handle;
struct bt_conn *conn;
uint8_t index = acl(buf)->index;
if (IS_ENABLED(CONFIG_BT_TESTING)) {
bt_testing_trace_event_acl_pool_destroy(buf);
}
net_buf_destroy(buf);
if (acl(buf)->host_ncp_sent) {
return;
}
/* Do nothing if controller to host flow control is not supported */
if (!BT_CMD_TEST(bt_dev.supported_commands, 10, 5)) {
return;
}
conn = bt_conn_lookup_index(index);
if (!conn) {
LOG_WRN("Unable to look up conn with index 0x%02x", index);
return;
}
if (conn->state != BT_CONN_CONNECTED &&
conn->state != BT_CONN_DISCONNECTING) {
LOG_WRN("Not reporting packet for non-connected conn");
bt_conn_unref(conn);
return;
}
bt_conn_unref(conn);
bt_send_one_host_num_completed_packets(handle);
}
#endif /* defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL) */
struct net_buf *bt_hci_cmd_create(uint16_t opcode, uint8_t param_len)
{
struct bt_hci_cmd_hdr *hdr;
struct net_buf *buf;
LOG_DBG("opcode 0x%04x param_len %u", opcode, param_len);
/* net_buf_alloc(K_FOREVER) can fail when run from the syswq */
buf = net_buf_alloc(&hci_cmd_pool, K_FOREVER);
if (!buf) {
LOG_DBG("Unable to allocate a command buffer");
return NULL;
}
LOG_DBG("buf %p", buf);
net_buf_reserve(buf, BT_BUF_RESERVE);
bt_buf_set_type(buf, BT_BUF_CMD);
cmd(buf)->opcode = opcode;
cmd(buf)->sync = NULL;
cmd(buf)->state = NULL;
hdr = net_buf_add(buf, sizeof(*hdr));
hdr->opcode = sys_cpu_to_le16(opcode);
hdr->param_len = param_len;
return buf;
}
int bt_hci_cmd_send(uint16_t opcode, struct net_buf *buf)
{
if (!buf) {
buf = bt_hci_cmd_create(opcode, 0);
if (!buf) {
return -ENOBUFS;
}
}
LOG_DBG("opcode 0x%04x len %u", opcode, buf->len);
/* Host Number of Completed Packets can ignore the ncmd value
* and does not generate any cmd complete/status events.
*/
if (opcode == BT_HCI_OP_HOST_NUM_COMPLETED_PACKETS) {
int err;
err = bt_send(buf);
if (err) {
LOG_ERR("Unable to send to driver (err %d)", err);
net_buf_unref(buf);
}
return err;
}
k_fifo_put(&bt_dev.cmd_tx_queue, buf);
bt_tx_irq_raise();
return 0;
}
static bool process_pending_cmd(k_timeout_t timeout);
int bt_hci_cmd_send_sync(uint16_t opcode, struct net_buf *buf,
struct net_buf **rsp)
{
struct k_sem sync_sem;
uint8_t status;
int err;
if (!buf) {
buf = bt_hci_cmd_create(opcode, 0);
if (!buf) {
return -ENOBUFS;
}
} else {
/* `cmd(buf)` depends on this */
if (net_buf_pool_get(buf->pool_id) != &hci_cmd_pool) {
__ASSERT_NO_MSG(false);
return -EINVAL;
}
}
LOG_DBG("buf %p opcode 0x%04x len %u", buf, opcode, buf->len);
/* This local sem is just for suspending the current thread until the
* command is processed by the LL. It is given (and we are awaken) by
* the cmd_complete/status handlers.
*/
k_sem_init(&sync_sem, 0, 1);
cmd(buf)->sync = &sync_sem;
k_fifo_put(&bt_dev.cmd_tx_queue, net_buf_ref(buf));
bt_tx_irq_raise();
/* TODO: disallow sending sync commands from syswq altogether */
/* Since the commands are now processed in the syswq, we cannot suspend
* and wait. We have to send the command from the current context.
*/
if (k_current_get() == &k_sys_work_q.thread) {
/* drain the command queue until we get to send the command of interest. */
struct net_buf *cmd = NULL;
do {
cmd = k_fifo_peek_head(&bt_dev.cmd_tx_queue);
LOG_DBG("process cmd %p want %p", cmd, buf);
/* Wait for a response from the Bluetooth Controller.
* The Controller may fail to respond if:
* - It was never programmed or connected.
* - There was a fatal error.
*
* See the `BT_HCI_OP_` macros in hci_types.h or
* Core_v5.4, Vol 4, Part E, Section 5.4.1 and Section 7
* to map the opcode to the HCI command documentation.
* Example: 0x0c03 represents HCI_Reset command.
*/
__maybe_unused bool success = process_pending_cmd(HCI_CMD_TIMEOUT);
BT_ASSERT_MSG(success, "command opcode 0x%04x timeout", opcode);
} while (buf != cmd);
}
/* Now that we have sent the command, suspend until the LL replies */
err = k_sem_take(&sync_sem, HCI_CMD_TIMEOUT);
BT_ASSERT_MSG(err == 0,
"Controller unresponsive, command opcode 0x%04x timeout with err %d",
opcode, err);
status = cmd(buf)->status;
if (status) {
LOG_WRN("opcode 0x%04x status 0x%02x %s", opcode,
status, bt_hci_err_to_str(status));
net_buf_unref(buf);
switch (status) {
case BT_HCI_ERR_CONN_LIMIT_EXCEEDED:
return -ECONNREFUSED;
case BT_HCI_ERR_INSUFFICIENT_RESOURCES:
return -ENOMEM;
case BT_HCI_ERR_INVALID_PARAM:
return -EINVAL;
case BT_HCI_ERR_CMD_DISALLOWED:
return -EACCES;
default:
return -EIO;
}
}
LOG_DBG("rsp %p opcode 0x%04x len %u", buf, opcode, buf->len);
if (rsp) {
*rsp = buf;
} else {
net_buf_unref(buf);
}
return 0;
}
int bt_hci_le_rand(void *buffer, size_t len)
{
struct bt_hci_rp_le_rand *rp;
struct net_buf *rsp;
size_t count;
int err;
/* Check first that HCI_LE_Rand is supported */
if (!BT_CMD_TEST(bt_dev.supported_commands, 27, 7)) {
return -ENOTSUP;
}
while (len > 0) {
/* Number of bytes to fill on this iteration */
count = MIN(len, sizeof(rp->rand));
/* Request the next 8 bytes over HCI */
err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_RAND, NULL, &rsp);
if (err) {
return err;
}
/* Copy random data into buffer */
rp = (void *)rsp->data;
memcpy(buffer, rp->rand, count);
net_buf_unref(rsp);
buffer = (uint8_t *)buffer + count;
len -= count;
}
return 0;
}
int bt_hci_le_read_max_data_len(uint16_t *tx_octets, uint16_t *tx_time)
{
struct bt_hci_rp_le_read_max_data_len *rp;
struct net_buf *rsp;
int err;
err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_MAX_DATA_LEN, NULL, &rsp);
if (err) {
LOG_ERR("Failed to read DLE max data len");
return err;
}
rp = (void *)rsp->data;
*tx_octets = sys_le16_to_cpu(rp->max_tx_octets);
*tx_time = sys_le16_to_cpu(rp->max_tx_time);
net_buf_unref(rsp);
if (!IN_RANGE(*tx_octets, BT_HCI_LE_MAX_TX_OCTETS_MIN, BT_HCI_LE_MAX_TX_OCTETS_MAX)) {
LOG_WRN("tx_octets exceeds the valid range %u", *tx_octets);
}
if (!IN_RANGE(*tx_time, BT_HCI_LE_MAX_TX_TIME_MIN, BT_HCI_LE_MAX_TX_TIME_MAX)) {
LOG_WRN("tx_time exceeds the valid range %u", *tx_time);
}
return 0;
}
uint8_t bt_get_phy(uint8_t hci_phy)
{
switch (hci_phy) {
case BT_HCI_LE_PHY_1M:
return BT_GAP_LE_PHY_1M;
case BT_HCI_LE_PHY_2M:
return BT_GAP_LE_PHY_2M;
case BT_HCI_LE_PHY_CODED:
return BT_GAP_LE_PHY_CODED;
default:
return 0;
}
}
int bt_get_df_cte_type(uint8_t hci_cte_type)
{
switch (hci_cte_type) {
case BT_HCI_LE_AOA_CTE:
return BT_DF_CTE_TYPE_AOA;
case BT_HCI_LE_AOD_CTE_1US:
return BT_DF_CTE_TYPE_AOD_1US;
case BT_HCI_LE_AOD_CTE_2US:
return BT_DF_CTE_TYPE_AOD_2US;
case BT_HCI_LE_NO_CTE:
return BT_DF_CTE_TYPE_NONE;
default:
return BT_DF_CTE_TYPE_NONE;
}
}
#if defined(CONFIG_BT_CONN_TX)
static void hci_num_completed_packets(struct net_buf *buf)
{
struct bt_hci_evt_num_completed_packets *evt = (void *)buf->data;
int i;
if (sizeof(*evt) + sizeof(evt->h[0]) * evt->num_handles > buf->len) {
LOG_ERR("evt num_handles (=%u) too large (%u > %u)",
evt->num_handles,
sizeof(*evt) + sizeof(evt->h[0]) * evt->num_handles,
buf->len);
return;
}
LOG_DBG("num_handles %u", evt->num_handles);
for (i = 0; i < evt->num_handles; i++) {
uint16_t handle, count;
struct bt_conn *conn;
handle = sys_le16_to_cpu(evt->h[i].handle);
count = sys_le16_to_cpu(evt->h[i].count);
LOG_DBG("handle %u count %u", handle, count);
conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_ALL);
if (!conn) {
LOG_ERR("No connection for handle %u", handle);
continue;
}
while (count--) {
sys_snode_t *node;
k_sem_give(bt_conn_get_pkts(conn));
/* move the next TX context from the `pending` list to
* the `complete` list.
*/
node = sys_slist_get(&conn->tx_pending);
if (!node) {
LOG_ERR("packets count mismatch");
__ASSERT_NO_MSG(0);
break;
}
sys_slist_append(&conn->tx_complete, node);
/* align the `pending` value */
__ASSERT_NO_MSG(atomic_get(&conn->in_ll));
atomic_dec(&conn->in_ll);
/* TX context free + callback happens in there */
bt_conn_tx_notify(conn, false);
}
bt_conn_unref(conn);
}
}
#endif /* CONFIG_BT_CONN_TX */
#if defined(CONFIG_BT_CONN)
static void hci_acl(struct net_buf *buf)
{
struct bt_hci_acl_hdr *hdr;
uint16_t handle, len;
struct bt_conn *conn;
uint8_t flags;
LOG_DBG("buf %p", buf);
if (buf->len < sizeof(*hdr)) {
LOG_ERR("Invalid HCI ACL packet size (%u)", buf->len);
net_buf_unref(buf);
return;
}
hdr = net_buf_pull_mem(buf, sizeof(*hdr));
len = sys_le16_to_cpu(hdr->len);
handle = sys_le16_to_cpu(hdr->handle);
flags = bt_acl_flags(handle);
acl(buf)->handle = bt_acl_handle(handle);
acl(buf)->index = BT_CONN_INDEX_INVALID;
LOG_DBG("handle %u len %u flags %u", acl(buf)->handle, len, flags);
if (buf->len != len) {
LOG_ERR("ACL data length mismatch (%u != %u)", buf->len, len);
net_buf_unref(buf);
return;
}
conn = bt_conn_lookup_handle(acl(buf)->handle, BT_CONN_TYPE_ALL);
if (!conn) {
LOG_ERR("Unable to find conn for handle %u", acl(buf)->handle);
net_buf_unref(buf);
return;
}
acl(buf)->index = bt_conn_index(conn);
bt_conn_recv(conn, buf, flags);
bt_conn_unref(conn);
}
static void hci_data_buf_overflow(struct net_buf *buf)
{
struct bt_hci_evt_data_buf_overflow *evt = (void *)buf->data;
LOG_WRN("Data buffer overflow (link type 0x%02x)", evt->link_type);
}
#if defined(CONFIG_BT_CENTRAL)
static void set_phy_conn_param(const struct bt_conn *conn,
struct bt_hci_ext_conn_phy *phy)
{
phy->conn_interval_min = sys_cpu_to_le16(conn->le.interval_min);
phy->conn_interval_max = sys_cpu_to_le16(conn->le.interval_max);
phy->conn_latency = sys_cpu_to_le16(conn->le.latency);
phy->supervision_timeout = sys_cpu_to_le16(conn->le.timeout);
phy->min_ce_len = 0;
phy->max_ce_len = 0;
}
int bt_le_create_conn_ext(const struct bt_conn *conn)
{
struct bt_hci_cp_le_ext_create_conn *cp;
struct bt_hci_ext_conn_phy *phy;
struct bt_hci_cmd_state_set state;
bool use_filter = false;
struct net_buf *buf;
uint8_t own_addr_type;
uint8_t num_phys;
int err;
if (IS_ENABLED(CONFIG_BT_FILTER_ACCEPT_LIST)) {
use_filter = atomic_test_bit(conn->flags, BT_CONN_AUTO_CONNECT);
}
err = bt_id_set_create_conn_own_addr(use_filter, &own_addr_type);
if (err) {
return err;
}
num_phys = (!(bt_dev.create_param.options &
BT_CONN_LE_OPT_NO_1M) ? 1 : 0) +
((bt_dev.create_param.options &
BT_CONN_LE_OPT_CODED) ? 1 : 0);
buf = bt_hci_cmd_create(BT_HCI_OP_LE_EXT_CREATE_CONN, sizeof(*cp) +
num_phys * sizeof(*phy));
if (!buf) {
return -ENOBUFS;
}
cp = net_buf_add(buf, sizeof(*cp));
(void)memset(cp, 0, sizeof(*cp));
if (use_filter) {
/* User Initiated procedure use fast scan parameters. */
bt_addr_le_copy(&cp->peer_addr, BT_ADDR_LE_ANY);
cp->filter_policy = BT_HCI_LE_CREATE_CONN_FP_FILTER;
} else {
const bt_addr_le_t *peer_addr = &conn->le.dst;
#if defined(CONFIG_BT_SMP)
if (bt_dev.le.rl_entries > bt_dev.le.rl_size) {
/* Host resolving is used, use the RPA directly. */
peer_addr = &conn->le.resp_addr;
}
#endif
bt_addr_le_copy(&cp->peer_addr, peer_addr);
cp->filter_policy = BT_HCI_LE_CREATE_CONN_FP_NO_FILTER;
}
cp->own_addr_type = own_addr_type;
cp->phys = 0;
if (!(bt_dev.create_param.options & BT_CONN_LE_OPT_NO_1M)) {
cp->phys |= BT_HCI_LE_EXT_SCAN_PHY_1M;
phy = net_buf_add(buf, sizeof(*phy));
phy->scan_interval = sys_cpu_to_le16(
bt_dev.create_param.interval);
phy->scan_window = sys_cpu_to_le16(
bt_dev.create_param.window);
set_phy_conn_param(conn, phy);
}
if (bt_dev.create_param.options & BT_CONN_LE_OPT_CODED) {
cp->phys |= BT_HCI_LE_EXT_SCAN_PHY_CODED;
phy = net_buf_add(buf, sizeof(*phy));
phy->scan_interval = sys_cpu_to_le16(
bt_dev.create_param.interval_coded);
phy->scan_window = sys_cpu_to_le16(
bt_dev.create_param.window_coded);
set_phy_conn_param(conn, phy);
}
bt_hci_cmd_state_set_init(buf, &state, bt_dev.flags,
BT_DEV_INITIATING, true);
return bt_hci_cmd_send_sync(BT_HCI_OP_LE_EXT_CREATE_CONN, buf, NULL);
}
int bt_le_create_conn_synced(const struct bt_conn *conn, const struct bt_le_ext_adv *adv,
uint8_t subevent)
{
struct bt_hci_cp_le_ext_create_conn_v2 *cp;
struct bt_hci_ext_conn_phy *phy;
struct bt_hci_cmd_state_set state;
struct net_buf *buf;
uint8_t own_addr_type;
int err;
err = bt_id_set_create_conn_own_addr(false, &own_addr_type);
if (err) {
return err;
}
/* There shall only be one Initiating_PHYs */
buf = bt_hci_cmd_create(BT_HCI_OP_LE_EXT_CREATE_CONN_V2, sizeof(*cp) + sizeof(*phy));
if (!buf) {
return -ENOBUFS;
}
cp = net_buf_add(buf, sizeof(*cp));
(void)memset(cp, 0, sizeof(*cp));
cp->subevent = subevent;
cp->adv_handle = adv->handle;
bt_addr_le_copy(&cp->peer_addr, &conn->le.dst);
cp->filter_policy = BT_HCI_LE_CREATE_CONN_FP_NO_FILTER;
cp->own_addr_type = own_addr_type;
/* The Initiating_PHY is the secondary phy of the corresponding ext adv set */
if (adv->options & BT_LE_ADV_OPT_CODED) {
cp->phys = BT_HCI_LE_EXT_SCAN_PHY_CODED;
} else if (adv->options & BT_LE_ADV_OPT_NO_2M) {
cp->phys = BT_HCI_LE_EXT_SCAN_PHY_1M;
} else {
cp->phys = BT_HCI_LE_EXT_SCAN_PHY_2M;
}
phy = net_buf_add(buf, sizeof(*phy));
(void)memset(phy, 0, sizeof(*phy));
set_phy_conn_param(conn, phy);
bt_hci_cmd_state_set_init(buf, &state, bt_dev.flags, BT_DEV_INITIATING, true);
return bt_hci_cmd_send_sync(BT_HCI_OP_LE_EXT_CREATE_CONN_V2, buf, NULL);
}
static int bt_le_create_conn_legacy(const struct bt_conn *conn)
{
struct bt_hci_cp_le_create_conn *cp;
struct bt_hci_cmd_state_set state;
bool use_filter = false;
struct net_buf *buf;
uint8_t own_addr_type;
int err;
if (IS_ENABLED(CONFIG_BT_FILTER_ACCEPT_LIST)) {
use_filter = atomic_test_bit(conn->flags, BT_CONN_AUTO_CONNECT);
}
err = bt_id_set_create_conn_own_addr(use_filter, &own_addr_type);
if (err) {
return err;
}
buf = bt_hci_cmd_create(BT_HCI_OP_LE_CREATE_CONN, sizeof(*cp));
if (!buf) {
return -ENOBUFS;
}
cp = net_buf_add(buf, sizeof(*cp));
memset(cp, 0, sizeof(*cp));
cp->own_addr_type = own_addr_type;
if (use_filter) {
/* User Initiated procedure use fast scan parameters. */
bt_addr_le_copy(&cp->peer_addr, BT_ADDR_LE_ANY);
cp->filter_policy = BT_HCI_LE_CREATE_CONN_FP_FILTER;
} else {
const bt_addr_le_t *peer_addr = &conn->le.dst;
#if defined(CONFIG_BT_SMP)
if (bt_dev.le.rl_entries > bt_dev.le.rl_size) {
/* Host resolving is used, use the RPA directly. */
peer_addr = &conn->le.resp_addr;
}
#endif
bt_addr_le_copy(&cp->peer_addr, peer_addr);
cp->filter_policy = BT_HCI_LE_CREATE_CONN_FP_NO_FILTER;
}
cp->scan_interval = sys_cpu_to_le16(bt_dev.create_param.interval);
cp->scan_window = sys_cpu_to_le16(bt_dev.create_param.window);
cp->conn_interval_min = sys_cpu_to_le16(conn->le.interval_min);
cp->conn_interval_max = sys_cpu_to_le16(conn->le.interval_max);
cp->conn_latency = sys_cpu_to_le16(conn->le.latency);
cp->supervision_timeout = sys_cpu_to_le16(conn->le.timeout);
bt_hci_cmd_state_set_init(buf, &state, bt_dev.flags,
BT_DEV_INITIATING, true);
return bt_hci_cmd_send_sync(BT_HCI_OP_LE_CREATE_CONN, buf, NULL);
}
int bt_le_create_conn(const struct bt_conn *conn)
{
if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
return bt_le_create_conn_ext(conn);
}
return bt_le_create_conn_legacy(conn);
}
int bt_le_create_conn_cancel(void)
{
struct net_buf *buf;
struct bt_hci_cmd_state_set state;
buf = bt_hci_cmd_create(BT_HCI_OP_LE_CREATE_CONN_CANCEL, 0);
bt_hci_cmd_state_set_init(buf, &state, bt_dev.flags,
BT_DEV_INITIATING, false);
return bt_hci_cmd_send_sync(BT_HCI_OP_LE_CREATE_CONN_CANCEL, buf, NULL);
}
#endif /* CONFIG_BT_CENTRAL */
int bt_hci_disconnect(uint16_t handle, uint8_t reason)
{
struct net_buf *buf;
struct bt_hci_cp_disconnect *disconn;
buf = bt_hci_cmd_create(BT_HCI_OP_DISCONNECT, sizeof(*disconn));
if (!buf) {
return -ENOBUFS;
}
disconn = net_buf_add(buf, sizeof(*disconn));
disconn->handle = sys_cpu_to_le16(handle);
disconn->reason = reason;
return bt_hci_cmd_send_sync(BT_HCI_OP_DISCONNECT, buf, NULL);
}
static uint16_t disconnected_handles[CONFIG_BT_MAX_CONN];
static uint8_t disconnected_handles_reason[CONFIG_BT_MAX_CONN];
static void disconnected_handles_reset(void)
{
(void)memset(disconnected_handles, 0, sizeof(disconnected_handles));
}
static void conn_handle_disconnected(uint16_t handle, uint8_t disconnect_reason)
{
for (int i = 0; i < ARRAY_SIZE(disconnected_handles); i++) {
if (!disconnected_handles[i]) {
/* Use invalid connection handle bits so that connection
* handle 0 can be used as a valid non-zero handle.
*/
disconnected_handles[i] = ~BT_ACL_HANDLE_MASK | handle;
disconnected_handles_reason[i] = disconnect_reason;
}
}
}
/** @returns the disconnect reason. */
static uint8_t conn_handle_is_disconnected(uint16_t handle)
{
handle |= ~BT_ACL_HANDLE_MASK;
for (int i = 0; i < ARRAY_SIZE(disconnected_handles); i++) {
if (disconnected_handles[i] == handle) {
disconnected_handles[i] = 0;
return disconnected_handles_reason[i];
}
}
return 0;
}
static void hci_disconn_complete_prio(struct net_buf *buf)
{
struct bt_hci_evt_disconn_complete *evt = (void *)buf->data;
uint16_t handle = sys_le16_to_cpu(evt->handle);
struct bt_conn *conn;
LOG_DBG("status 0x%02x %s handle %u reason 0x%02x",
evt->status, bt_hci_err_to_str(evt->status), handle, evt->reason);
if (evt->status) {
return;
}
conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_ALL);
if (!conn) {
/* Priority disconnect complete event received before normal
* connection complete event.
*/
conn_handle_disconnected(handle, evt->reason);
return;
}
conn->err = evt->reason;
bt_conn_set_state(conn, BT_CONN_DISCONNECT_COMPLETE);
bt_conn_unref(conn);
}
static void hci_disconn_complete(struct net_buf *buf)
{
struct bt_hci_evt_disconn_complete *evt = (void *)buf->data;
uint16_t handle = sys_le16_to_cpu(evt->handle);
struct bt_conn *conn;
LOG_DBG("status 0x%02x %s handle %u reason 0x%02x",
evt->status, bt_hci_err_to_str(evt->status), handle, evt->reason);
if (evt->status) {
return;
}
conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_ALL);
if (!conn) {
LOG_ERR("Unable to look up conn with handle %u", handle);
return;
}
bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
if (conn->type != BT_CONN_TYPE_LE) {
#if defined(CONFIG_BT_CLASSIC)
if (conn->type == BT_CONN_TYPE_SCO) {
bt_sco_cleanup(conn);
return;
}
/*
* If only for one connection session bond was set, clear keys
* database row for this connection.
*/
if (conn->type == BT_CONN_TYPE_BR &&
atomic_test_and_clear_bit(conn->flags, BT_CONN_BR_NOBOND)) {
bt_keys_link_key_clear(conn->br.link_key);
}
#endif
bt_conn_unref(conn);
return;
}
#if defined(CONFIG_BT_CENTRAL) && !defined(CONFIG_BT_FILTER_ACCEPT_LIST)
if (atomic_test_bit(conn->flags, BT_CONN_AUTO_CONNECT)) {
bt_conn_set_state(conn, BT_CONN_SCAN_BEFORE_INITIATING);
/* Just a best-effort check if the scanner should be started. */
int err = bt_le_scan_user_remove(BT_LE_SCAN_USER_NONE);
if (err) {
LOG_WRN("Error while updating the scanner (%d)", err);
}
}
#endif /* defined(CONFIG_BT_CENTRAL) && !defined(CONFIG_BT_FILTER_ACCEPT_LIST) */
bt_conn_unref(conn);
}
int bt_hci_le_read_remote_features(struct bt_conn *conn)
{
struct bt_hci_cp_le_read_remote_features *cp;
struct net_buf *buf;
buf = bt_hci_cmd_create(BT_HCI_OP_LE_READ_REMOTE_FEATURES,
sizeof(*cp));
if (!buf) {
return -ENOBUFS;
}
cp = net_buf_add(buf, sizeof(*cp));
cp->handle = sys_cpu_to_le16(conn->handle);
/* Results in BT_HCI_EVT_LE_REMOTE_FEAT_COMPLETE */
return bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_REMOTE_FEATURES, buf, NULL);
}
int bt_hci_read_remote_version(struct bt_conn *conn)
{
struct bt_hci_cp_read_remote_version_info *cp;
struct net_buf *buf;
if (conn->state != BT_CONN_CONNECTED) {
return -ENOTCONN;
}
/* Remote version cannot change. */
if (atomic_test_bit(conn->flags, BT_CONN_AUTO_VERSION_INFO)) {
return 0;
}
buf = bt_hci_cmd_create(BT_HCI_OP_READ_REMOTE_VERSION_INFO,
sizeof(*cp));
if (!buf) {
return -ENOBUFS;
}
cp = net_buf_add(buf, sizeof(*cp));
cp->handle = sys_cpu_to_le16(conn->handle);
return bt_hci_cmd_send_sync(BT_HCI_OP_READ_REMOTE_VERSION_INFO, buf,
NULL);
}
/* LE Data Length Change Event is optional so this function just ignore
* error and stack will continue to use default values.
*/
int bt_le_set_data_len(struct bt_conn *conn, uint16_t tx_octets, uint16_t tx_time)
{
struct bt_hci_cp_le_set_data_len *cp;
struct net_buf *buf;
buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_DATA_LEN, sizeof(*cp));
if (!buf) {
return -ENOBUFS;
}
cp = net_buf_add(buf, sizeof(*cp));
cp->handle = sys_cpu_to_le16(conn->handle);
cp->tx_octets = sys_cpu_to_le16(tx_octets);
cp->tx_time = sys_cpu_to_le16(tx_time);
return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_DATA_LEN, buf, NULL);
}
#if defined(CONFIG_BT_USER_PHY_UPDATE)
static int hci_le_read_phy(struct bt_conn *conn)
{
struct bt_hci_cp_le_read_phy *cp;
struct bt_hci_rp_le_read_phy *rp;
struct net_buf *buf, *rsp;
int err;
buf = bt_hci_cmd_create(BT_HCI_OP_LE_READ_PHY, sizeof(*cp));
if (!buf) {
return -ENOBUFS;
}
cp = net_buf_add(buf, sizeof(*cp));
cp->handle = sys_cpu_to_le16(conn->handle);
err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_PHY, buf, &rsp);
if (err) {
return err;
}
rp = (void *)rsp->data;
conn->le.phy.tx_phy = bt_get_phy(rp->tx_phy);
conn->le.phy.rx_phy = bt_get_phy(rp->rx_phy);
net_buf_unref(rsp);
return 0;
}
#endif /* defined(CONFIG_BT_USER_PHY_UPDATE) */
int bt_le_set_phy(struct bt_conn *conn, uint8_t all_phys,
uint8_t pref_tx_phy, uint8_t pref_rx_phy, uint8_t phy_opts)
{
struct bt_hci_cp_le_set_phy *cp;
struct net_buf *buf;
buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_PHY, sizeof(*cp));
if (!buf) {
return -ENOBUFS;
}
cp = net_buf_add(buf, sizeof(*cp));
cp->handle = sys_cpu_to_le16(conn->handle);
cp->all_phys = all_phys;
cp->tx_phys = pref_tx_phy;
cp->rx_phys = pref_rx_phy;
cp->phy_opts = phy_opts;
return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_PHY, buf, NULL);
}
static struct bt_conn *find_pending_connect(uint8_t role, bt_addr_le_t *peer_addr)
{
struct bt_conn *conn;
/*
* Make lookup to check if there's a connection object in
* CONNECT or CONNECT_AUTO state associated with passed peer LE address.
*/
if (IS_ENABLED(CONFIG_BT_CENTRAL) && role == BT_HCI_ROLE_CENTRAL) {
conn = bt_conn_lookup_state_le(BT_ID_DEFAULT, peer_addr,
BT_CONN_INITIATING);
if (IS_ENABLED(CONFIG_BT_FILTER_ACCEPT_LIST) && !conn) {
conn = bt_conn_lookup_state_le(BT_ID_DEFAULT,
BT_ADDR_LE_NONE,
BT_CONN_INITIATING_FILTER_LIST);
}
return conn;
}
if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && role == BT_HCI_ROLE_PERIPHERAL) {
conn = bt_conn_lookup_state_le(bt_dev.adv_conn_id, peer_addr,
BT_CONN_ADV_DIR_CONNECTABLE);
if (!conn) {
conn = bt_conn_lookup_state_le(bt_dev.adv_conn_id,
BT_ADDR_LE_NONE,
BT_CONN_ADV_CONNECTABLE);
}
return conn;
}
return NULL;
}
static void le_conn_complete_cancel(uint8_t err)
{
int ret;
struct bt_conn *conn;
/* Handle create connection cancel.
*
* There is no need to check ID address as only one
* connection in central role can be in pending state.
*/
conn = find_pending_connect(BT_HCI_ROLE_CENTRAL, NULL);
if (!conn) {
LOG_ERR("No pending central connection");
return;
}
if (atomic_test_bit(conn->flags, BT_CONN_AUTO_CONNECT)) {
if (!IS_ENABLED(CONFIG_BT_FILTER_ACCEPT_LIST)) {
/* Restart passive scanner for device */
bt_conn_set_state(conn, BT_CONN_SCAN_BEFORE_INITIATING);
} else {
/* Restart FAL initiator after RPA timeout. */
ret = bt_le_create_conn(conn);
if (ret) {
LOG_ERR("Failed to restart initiator");
}
}
} else {
int busy_status = k_work_delayable_busy_get(&conn->deferred_work);
if (!(busy_status & (K_WORK_QUEUED | K_WORK_DELAYED))) {
LOG_WRN("Connection creation timeout triggered");
conn->err = err;
bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
} else {
/* Restart initiator after RPA timeout. */
ret = bt_le_create_conn(conn);
if (ret) {
LOG_ERR("Failed to restart initiator");
}
}
}
bt_conn_unref(conn);
}
static void le_conn_complete_adv_timeout(void)
{
if (!(IS_ENABLED(CONFIG_BT_EXT_ADV) &&
BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features))) {
struct bt_le_ext_adv *adv = bt_le_adv_lookup_legacy();
struct bt_conn *conn;
/* Handle advertising timeout after high duty cycle directed
* advertising.
*/
atomic_clear_bit(adv->flags, BT_ADV_ENABLED);
if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
!BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
/* No advertising set terminated event, must be a
* legacy advertiser set.
*/
bt_le_adv_delete_legacy();
}
/* There is no need to check ID address as only one
* connection in peripheral role can be in pending state.
*/
conn = find_pending_connect(BT_HCI_ROLE_PERIPHERAL, NULL);
if (!conn) {
LOG_ERR("No pending peripheral connection");
return;
}
conn->err = BT_HCI_ERR_ADV_TIMEOUT;
bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
bt_conn_unref(conn);
}
}
static void enh_conn_complete(struct bt_hci_evt_le_enh_conn_complete *evt)
{
#if defined(CONFIG_BT_CONN) && (CONFIG_BT_EXT_ADV_MAX_ADV_SET > 1)
if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
evt->role == BT_HCI_ROLE_PERIPHERAL &&
evt->status == BT_HCI_ERR_SUCCESS &&
(IS_ENABLED(CONFIG_BT_EXT_ADV) &&
BT_FEAT_LE_EXT_ADV(bt_dev.le.features))) {
/* Cache the connection complete event. Process it later.
* See bt_dev.cached_conn_complete.
*/
for (int i = 0; i < ARRAY_SIZE(bt_dev.cached_conn_complete); i++) {
if (!bt_dev.cached_conn_complete[i].valid) {
(void)memcpy(&bt_dev.cached_conn_complete[i].evt,
evt,
sizeof(struct bt_hci_evt_le_enh_conn_complete));
bt_dev.cached_conn_complete[i].valid = true;
return;
}
}
__ASSERT(false, "No more cache entries available."
"This should not happen by design");
return;
}
#endif
bt_hci_le_enh_conn_complete(evt);
}
static void translate_addrs(bt_addr_le_t *peer_addr, bt_addr_le_t *id_addr,
const struct bt_hci_evt_le_enh_conn_complete *evt, uint8_t id)
{
if (bt_addr_le_is_resolved(&evt->peer_addr)) {
bt_addr_le_copy_resolved(id_addr, &evt->peer_addr);
bt_addr_copy(&peer_addr->a, &evt->peer_rpa);
peer_addr->type = BT_ADDR_LE_RANDOM;
} else {
bt_addr_le_copy(id_addr, bt_lookup_id_addr(id, &evt->peer_addr));
bt_addr_le_copy(peer_addr, &evt->peer_addr);
}
}
static void update_conn(struct bt_conn *conn, const bt_addr_le_t *id_addr,
const struct bt_hci_evt_le_enh_conn_complete *evt)
{
conn->handle = sys_le16_to_cpu(evt->handle);
bt_addr_le_copy(&conn->le.dst, id_addr);
conn->le.interval = sys_le16_to_cpu(evt->interval);
conn->le.latency = sys_le16_to_cpu(evt->latency);
conn->le.timeout = sys_le16_to_cpu(evt->supv_timeout);
conn->role = evt->role;
conn->err = 0U;
#if defined(CONFIG_BT_USER_DATA_LEN_UPDATE)
conn->le.data_len.tx_max_len = BT_GAP_DATA_LEN_DEFAULT;
conn->le.data_len.tx_max_time = BT_GAP_DATA_TIME_DEFAULT;
conn->le.data_len.rx_max_len = BT_GAP_DATA_LEN_DEFAULT;
conn->le.data_len.rx_max_time = BT_GAP_DATA_TIME_DEFAULT;
#endif
#if defined(CONFIG_BT_SUBRATING)
conn->le.subrate.factor = 1; /* No subrating. */
conn->le.subrate.continuation_number = 0;
#endif
}
void bt_hci_le_enh_conn_complete(struct bt_hci_evt_le_enh_conn_complete *evt)
{
__ASSERT_NO_MSG(evt->status == BT_HCI_ERR_SUCCESS);
uint16_t handle = sys_le16_to_cpu(evt->handle);
uint8_t disconnect_reason = conn_handle_is_disconnected(handle);
bt_addr_le_t peer_addr, id_addr;
struct bt_conn *conn;
uint8_t id;
LOG_DBG("status 0x%02x %s handle %u role %u peer %s peer RPA %s",
evt->status, bt_hci_err_to_str(evt->status), handle,
evt->role, bt_addr_le_str(&evt->peer_addr), bt_addr_str(&evt->peer_rpa));
LOG_DBG("local RPA %s", bt_addr_str(&evt->local_rpa));
#if defined(CONFIG_BT_SMP)
bt_id_pending_keys_update();
#endif
id = evt->role == BT_HCI_ROLE_PERIPHERAL ? bt_dev.adv_conn_id : BT_ID_DEFAULT;
translate_addrs(&peer_addr, &id_addr, evt, id);
conn = find_pending_connect(evt->role, &id_addr);
if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
evt->role == BT_HCI_ROLE_PERIPHERAL &&
!(IS_ENABLED(CONFIG_BT_EXT_ADV) &&
BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features))) {
struct bt_le_ext_adv *adv = bt_le_adv_lookup_legacy();
/* Clear advertising even if we are not able to add connection
* object to keep host in sync with controller state.
*/
atomic_clear_bit(adv->flags, BT_ADV_ENABLED);
(void)bt_le_lim_adv_cancel_timeout(adv);
}
if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
evt->role == BT_HCI_ROLE_CENTRAL) {
/* Clear initiating even if we are not able to add connection
* object to keep the host in sync with controller state.
*/
atomic_clear_bit(bt_dev.flags, BT_DEV_INITIATING);
}
if (!conn) {
LOG_ERR("No pending conn for peer %s", bt_addr_le_str(&evt->peer_addr));
bt_hci_disconnect(handle, BT_HCI_ERR_REMOTE_USER_TERM_CONN);
return;
}
update_conn(conn, &id_addr, evt);
#if defined(CONFIG_BT_USER_PHY_UPDATE)
conn->le.phy.tx_phy = BT_GAP_LE_PHY_1M;
conn->le.phy.rx_phy = BT_GAP_LE_PHY_1M;
#endif
/*
* Use connection address (instead of identity address) as initiator
* or responder address. Only peripheral needs to be updated. For central all
* was set during outgoing connection creation.
*/
if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
conn->role == BT_HCI_ROLE_PERIPHERAL) {
bt_addr_le_copy(&conn->le.init_addr, &peer_addr);
if (!(IS_ENABLED(CONFIG_BT_EXT_ADV) &&
BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features))) {
struct bt_le_ext_adv *adv = bt_le_adv_lookup_legacy();
if (IS_ENABLED(CONFIG_BT_PRIVACY) &&
!atomic_test_bit(adv->flags, BT_ADV_USE_IDENTITY)) {
conn->le.resp_addr.type = BT_ADDR_LE_RANDOM;
if (!bt_addr_eq(&evt->local_rpa, BT_ADDR_ANY)) {
bt_addr_copy(&conn->le.resp_addr.a,
&evt->local_rpa);
} else {
bt_addr_copy(&conn->le.resp_addr.a,
&bt_dev.random_addr.a);
}
} else {
bt_addr_le_copy(&conn->le.resp_addr,
&bt_dev.id_addr[conn->id]);
}
} else {
/* Copy the local RPA and handle this in advertising set
* terminated event.
*/
bt_addr_copy(&conn->le.resp_addr.a, &evt->local_rpa);
}
/* if the controller supports, lets advertise for another
* peripheral connection.
* check for connectable advertising state is sufficient as
* this is how this le connection complete for peripheral occurred.
*/
if (BT_LE_STATES_PER_CONN_ADV(bt_dev.le.states)) {
bt_le_adv_resume();
}
if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
!BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
struct bt_le_ext_adv *adv = bt_le_adv_lookup_legacy();
/* No advertising set terminated event, must be a
* legacy advertiser set.
*/
if (!atomic_test_bit(adv->flags, BT_ADV_PERSIST)) {
bt_le_adv_delete_legacy();
}
}
}
if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
conn->role == BT_HCI_ROLE_CENTRAL) {
bt_addr_le_copy(&conn->le.resp_addr, &peer_addr);
if (IS_ENABLED(CONFIG_BT_PRIVACY)) {
conn->le.init_addr.type = BT_ADDR_LE_RANDOM;
if (!bt_addr_eq(&evt->local_rpa, BT_ADDR_ANY)) {
bt_addr_copy(&conn->le.init_addr.a,
&evt->local_rpa);
} else {
bt_addr_copy(&conn->le.init_addr.a,
&bt_dev.random_addr.a);
}
} else {
bt_addr_le_copy(&conn->le.init_addr,
&bt_dev.id_addr[conn->id]);
}
}
#if defined(CONFIG_BT_USER_PHY_UPDATE)
if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
int err;
err = hci_le_read_phy(conn);
if (err) {
LOG_WRN("Failed to read PHY (%d)", err);
}
}
#endif /* defined(CONFIG_BT_USER_PHY_UPDATE) */
bt_conn_set_state(conn, BT_CONN_CONNECTED);
if (disconnect_reason) {
/* Mark the connection as already disconnected before calling
* the connected callback, so that the application cannot
* start sending packets
*/
conn->err = disconnect_reason;
bt_conn_set_state(conn, BT_CONN_DISCONNECT_COMPLETE);
}
bt_conn_connected(conn);
bt_conn_unref(conn);
if (IS_ENABLED(CONFIG_BT_CENTRAL) && conn->role == BT_HCI_ROLE_CENTRAL) {
int err;
/* Just a best-effort check if the scanner should be started. */
err = bt_le_scan_user_remove(BT_LE_SCAN_USER_NONE);
if (err) {
LOG_WRN("Error while updating the scanner (%d)", err);
}
}
}
#if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
void bt_hci_le_enh_conn_complete_sync(struct bt_hci_evt_le_enh_conn_complete_v2 *evt,
struct bt_le_per_adv_sync *sync)
{
__ASSERT_NO_MSG(evt->status == BT_HCI_ERR_SUCCESS);
uint16_t handle = sys_le16_to_cpu(evt->handle);
uint8_t disconnect_reason = conn_handle_is_disconnected(handle);
bt_addr_le_t peer_addr, id_addr;
struct bt_conn *conn;
if (!sync->num_subevents) {
LOG_ERR("Unexpected connection complete event");
return;
}
conn = bt_conn_add_le(BT_ID_DEFAULT, BT_ADDR_LE_ANY);
if (!conn) {
LOG_ERR("Unable to allocate connection");
/* Tell the controller to disconnect to keep it in sync with
* the host state and avoid a "rogue" connection.
*/
bt_hci_disconnect(handle, BT_HCI_ERR_REMOTE_USER_TERM_CONN);
return;
}
LOG_DBG("status 0x%02x %s handle %u role %u peer %s peer RPA %s",
evt->status, bt_hci_err_to_str(evt->status), handle,
evt->role, bt_addr_le_str(&evt->peer_addr), bt_addr_str(&evt->peer_rpa));
LOG_DBG("local RPA %s", bt_addr_str(&evt->local_rpa));
if (evt->role != BT_HCI_ROLE_PERIPHERAL) {
LOG_ERR("PAwR sync always becomes peripheral");
return;
}
#if defined(CONFIG_BT_SMP)
bt_id_pending_keys_update();
#endif
translate_addrs(&peer_addr, &id_addr, (const struct bt_hci_evt_le_enh_conn_complete *)evt,
BT_ID_DEFAULT);
update_conn(conn, &id_addr, (const struct bt_hci_evt_le_enh_conn_complete *)evt);
#if defined(CONFIG_BT_USER_PHY_UPDATE)
/* The connection is always initiated on the same phy as the PAwR advertiser */
conn->le.phy.tx_phy = sync->phy;
conn->le.phy.rx_phy = sync->phy;
#endif
bt_addr_le_copy(&conn->le.init_addr, &peer_addr);
if (IS_ENABLED(CONFIG_BT_PRIVACY)) {
conn->le.resp_addr.type = BT_ADDR_LE_RANDOM;
bt_addr_copy(&conn->le.resp_addr.a, &evt->local_rpa);
} else {
bt_addr_le_copy(&conn->le.resp_addr, &bt_dev.id_addr[conn->id]);
}
bt_conn_set_state(conn, BT_CONN_CONNECTED);
if (disconnect_reason) {
/* Mark the connection as already disconnected before calling
* the connected callback, so that the application cannot
* start sending packets
*/
conn->err = disconnect_reason;
bt_conn_set_state(conn, BT_CONN_DISCONNECT_COMPLETE);
}
bt_conn_connected(conn);
/* Since we don't give the application a reference to manage
* for peripheral connections, we need to release this reference here.
*/
bt_conn_unref(conn);
}
#endif /* CONFIG_BT_PER_ADV_SYNC_RSP */
static void enh_conn_complete_error_handle(uint8_t status)
{
if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && status == BT_HCI_ERR_ADV_TIMEOUT) {
le_conn_complete_adv_timeout();
return;
}
if (IS_ENABLED(CONFIG_BT_CENTRAL) && status == BT_HCI_ERR_UNKNOWN_CONN_ID) {
le_conn_complete_cancel(status);
int err = bt_le_scan_user_remove(BT_LE_SCAN_USER_NONE);
if (err) {
LOG_WRN("Error while updating the scanner (%d)", err);
}
return;
}
if (IS_ENABLED(CONFIG_BT_CENTRAL) && IS_ENABLED(CONFIG_BT_PER_ADV_RSP) &&
status == BT_HCI_ERR_CONN_FAIL_TO_ESTAB) {
le_conn_complete_cancel(status);
atomic_clear_bit(bt_dev.flags, BT_DEV_INITIATING);
return;
}
LOG_WRN("Unexpected status 0x%02x %s", status, bt_hci_err_to_str(status));
}
static void le_enh_conn_complete(struct net_buf *buf)
{
struct bt_hci_evt_le_enh_conn_complete *evt =
(struct bt_hci_evt_le_enh_conn_complete *)buf->data;
if (evt->status != BT_HCI_ERR_SUCCESS) {
enh_conn_complete_error_handle(evt->status);
return;
}
enh_conn_complete(evt);
}
#if defined(CONFIG_BT_PER_ADV_RSP) || defined(CONFIG_BT_PER_ADV_SYNC_RSP)
static void le_enh_conn_complete_v2(struct net_buf *buf)
{
struct bt_hci_evt_le_enh_conn_complete_v2 *evt =
(struct bt_hci_evt_le_enh_conn_complete_v2 *)buf->data;
if (evt->status != BT_HCI_ERR_SUCCESS) {
enh_conn_complete_error_handle(evt->status);
return;
}
if (evt->adv_handle == BT_HCI_ADV_HANDLE_INVALID &&
evt->sync_handle == BT_HCI_SYNC_HANDLE_INVALID) {
/* The connection was not created via PAwR, handle the event like v1 */
enh_conn_complete((struct bt_hci_evt_le_enh_conn_complete *)evt);
}
#if defined(CONFIG_BT_PER_ADV_RSP)
else if (evt->adv_handle != BT_HCI_ADV_HANDLE_INVALID &&
evt->sync_handle == BT_HCI_SYNC_HANDLE_INVALID) {
/* The connection was created via PAwR advertiser, it can be handled like v1 */
enh_conn_complete((struct bt_hci_evt_le_enh_conn_complete *)evt);
}
#endif /* CONFIG_BT_PER_ADV_RSP */
#if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
else if (evt->adv_handle == BT_HCI_ADV_HANDLE_INVALID &&
evt->sync_handle != BT_HCI_SYNC_HANDLE_INVALID) {
/* Created via PAwR sync, no adv set terminated event, needs separate handling */
struct bt_le_per_adv_sync *sync;
sync = bt_hci_per_adv_sync_lookup_handle(evt->sync_handle);
if (!sync) {
LOG_ERR("Unknown sync handle %d", evt->sync_handle);
return;
}
bt_hci_le_enh_conn_complete_sync(evt, sync);
}
#endif /* CONFIG_BT_PER_ADV_SYNC_RSP */
else {
LOG_ERR("Invalid connection complete event");
}
}
#endif /* CONFIG_BT_PER_ADV_RSP || CONFIG_BT_PER_ADV_SYNC_RSP */
static void le_legacy_conn_complete(struct net_buf *buf)
{
struct bt_hci_evt_le_conn_complete *evt = (void *)buf->data;
struct bt_hci_evt_le_enh_conn_complete enh;
if (evt->status != BT_HCI_ERR_SUCCESS) {
enh_conn_complete_error_handle(evt->status);
return;
}
LOG_DBG("status 0x%02x %s role %u %s",
evt->status, bt_hci_err_to_str(evt->status), evt->role,
bt_addr_le_str(&evt->peer_addr));
enh.status = evt->status;
enh.handle = evt->handle;
enh.role = evt->role;
enh.interval = evt->interval;
enh.latency = evt->latency;
enh.supv_timeout = evt->supv_timeout;
enh.clock_accuracy = evt->clock_accuracy;
bt_addr_le_copy(&enh.peer_addr, &evt->peer_addr);
if (IS_ENABLED(CONFIG_BT_PRIVACY)) {
bt_addr_copy(&enh.local_rpa, &bt_dev.random_addr.a);
} else {
bt_addr_copy(&enh.local_rpa, BT_ADDR_ANY);
}
bt_addr_copy(&enh.peer_rpa, BT_ADDR_ANY);
enh_conn_complete(&enh);
}
static void le_remote_feat_complete(struct net_buf *buf)
{
struct bt_hci_evt_le_remote_feat_complete *evt = (void *)buf->data;
uint16_t handle = sys_le16_to_cpu(evt->handle);
struct bt_conn *conn;
conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_LE);
if (!conn) {
LOG_ERR("Unable to lookup conn for handle %u", handle);
return;
}
if (!evt->status) {
memcpy(conn->le.features, evt->features,
sizeof(conn->le.features));
}
atomic_set_bit(conn->flags, BT_CONN_LE_FEATURES_EXCHANGED);
if (IS_ENABLED(CONFIG_BT_REMOTE_INFO) &&
!IS_ENABLED(CONFIG_BT_REMOTE_VERSION)) {
notify_remote_info(conn);
}
bt_conn_unref(conn);
}
#if defined(CONFIG_BT_DATA_LEN_UPDATE)
static void le_data_len_change(struct net_buf *buf)
{
struct bt_hci_evt_le_data_len_change *evt = (void *)buf->data;
uint16_t handle = sys_le16_to_cpu(evt->handle);
struct bt_conn *conn;
conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_LE);
if (!conn) {
LOG_ERR("Unable to lookup conn for handle %u", handle);
return;
}
#if defined(CONFIG_BT_USER_DATA_LEN_UPDATE)
uint16_t max_tx_octets = sys_le16_to_cpu(evt->max_tx_octets);
uint16_t max_rx_octets = sys_le16_to_cpu(evt->max_rx_octets);
uint16_t max_tx_time = sys_le16_to_cpu(evt->max_tx_time);
uint16_t max_rx_time = sys_le16_to_cpu(evt->max_rx_time);
if (!IN_RANGE(max_tx_octets, BT_HCI_LE_MAX_TX_OCTETS_MIN, BT_HCI_LE_MAX_TX_OCTETS_MAX)) {
LOG_WRN("max_tx_octets exceeds the valid range %u", max_tx_octets);
}
if (!IN_RANGE(max_rx_octets, BT_HCI_LE_MAX_RX_OCTETS_MIN, BT_HCI_LE_MAX_RX_OCTETS_MAX)) {
LOG_WRN("max_rx_octets exceeds the valid range %u", max_rx_octets);
}
if (!IN_RANGE(max_tx_time, BT_HCI_LE_MAX_TX_TIME_MIN, BT_HCI_LE_MAX_TX_TIME_MAX)) {
LOG_WRN("max_tx_time exceeds the valid range %u", max_tx_time);
}
if (!IN_RANGE(max_rx_time, BT_HCI_LE_MAX_RX_TIME_MIN, BT_HCI_LE_MAX_RX_TIME_MAX)) {
LOG_WRN("max_rx_time exceeds the valid range %u", max_rx_time);
}
LOG_DBG("max. tx: %u (%uus), max. rx: %u (%uus)", max_tx_octets, max_tx_time, max_rx_octets,
max_rx_time);
conn->le.data_len.tx_max_len = max_tx_octets;
conn->le.data_len.tx_max_time = max_tx_time;
conn->le.data_len.rx_max_len = max_rx_octets;
conn->le.data_len.rx_max_time = max_rx_time;
notify_le_data_len_updated(conn);
#endif
bt_conn_unref(conn);
}
#endif /* CONFIG_BT_DATA_LEN_UPDATE */
#if defined(CONFIG_BT_PHY_UPDATE)
static void le_phy_update_complete(struct net_buf *buf)
{
struct bt_hci_evt_le_phy_update_complete *evt = (void *)buf->data;
uint16_t handle = sys_le16_to_cpu(evt->handle);
struct bt_conn *conn;
conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_LE);
if (!conn) {
LOG_ERR("Unable to lookup conn for handle %u", handle);
return;
}
LOG_DBG("PHY updated: status: 0x%02x %s, tx: %u, rx: %u",
evt->status, bt_hci_err_to_str(evt->status), evt->tx_phy,
evt->rx_phy);
#if defined(CONFIG_BT_USER_PHY_UPDATE)
conn->le.phy.tx_phy = bt_get_phy(evt->tx_phy);
conn->le.phy.rx_phy = bt_get_phy(evt->rx_phy);
notify_le_phy_updated(conn);
#endif
bt_conn_unref(conn);
}
#endif /* CONFIG_BT_PHY_UPDATE */
bool bt_le_conn_params_valid(const struct bt_le_conn_param *param)
{
if (IS_ENABLED(CONFIG_BT_CONN_PARAM_ANY)) {
return true;
}
/* All limits according to BT Core spec 5.0 [Vol 2, Part E, 7.8.12] */
if (param->interval_min > param->interval_max ||
param->interval_min < 6 || param->interval_max > 3200) {
return false;
}
if (param->latency > 499) {
return false;
}
if (param->timeout < 10 || param->timeout > 3200 ||
((param->timeout * 4U) <=
((1U + param->latency) * param->interval_max))) {
return false;
}
return true;
}
static void le_conn_param_neg_reply(uint16_t handle, uint8_t reason)
{
struct bt_hci_cp_le_conn_param_req_neg_reply *cp;
struct net_buf *buf;
buf = bt_hci_cmd_create(BT_HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY,
sizeof(*cp));
if (!buf) {
LOG_ERR("Unable to allocate buffer");
return;
}
cp = net_buf_add(buf, sizeof(*cp));
cp->handle = sys_cpu_to_le16(handle);
cp->reason = sys_cpu_to_le16(reason);
bt_hci_cmd_send(BT_HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, buf);
}
static int le_conn_param_req_reply(uint16_t handle,
const struct bt_le_conn_param *param)
{
struct bt_hci_cp_le_conn_param_req_reply *cp;
struct net_buf *buf;
buf = bt_hci_cmd_create(BT_HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(*cp));
if (!buf) {
return -ENOBUFS;
}
cp = net_buf_add(buf, sizeof(*cp));
(void)memset(cp, 0, sizeof(*cp));
cp->handle = sys_cpu_to_le16(handle);
cp->interval_min = sys_cpu_to_le16(param->interval_min);
cp->interval_max = sys_cpu_to_le16(param->interval_max);
cp->latency = sys_cpu_to_le16(param->latency);
cp->timeout = sys_cpu_to_le16(param->timeout);
return bt_hci_cmd_send(BT_HCI_OP_LE_CONN_PARAM_REQ_REPLY, buf);
}
static void le_conn_param_req(struct net_buf *buf)
{
struct bt_hci_evt_le_conn_param_req *evt = (void *)buf->data;
struct bt_le_conn_param param;
struct bt_conn *conn;
uint16_t handle;
handle = sys_le16_to_cpu(evt->handle);
param.interval_min = sys_le16_to_cpu(evt->interval_min);
param.interval_max = sys_le16_to_cpu(evt->interval_max);
param.latency = sys_le16_to_cpu(evt->latency);
param.timeout = sys_le16_to_cpu(evt->timeout);
conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_LE);
if (!conn) {
LOG_ERR("Unable to lookup conn for handle %u", handle);
le_conn_param_neg_reply(handle, BT_HCI_ERR_UNKNOWN_CONN_ID);
return;
}
if (!le_param_req(conn, ¶m)) {
le_conn_param_neg_reply(handle, BT_HCI_ERR_INVALID_LL_PARAM);
} else {
le_conn_param_req_reply(handle, ¶m);
}
bt_conn_unref(conn);
}
static void le_conn_update_complete(struct net_buf *buf)
{
struct bt_hci_evt_le_conn_update_complete *evt = (void *)buf->data;
struct bt_conn *conn;
uint16_t handle;
handle = sys_le16_to_cpu(evt->handle);
LOG_DBG("status 0x%02x %s, handle %u",
evt->status, bt_hci_err_to_str(evt->status), handle);
conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_LE);
if (!conn) {
LOG_ERR("Unable to lookup conn for handle %u", handle);
return;
}
if (evt->status == BT_HCI_ERR_UNSUPP_REMOTE_FEATURE &&
conn->role == BT_HCI_ROLE_PERIPHERAL &&
!atomic_test_and_set_bit(conn->flags,
BT_CONN_PERIPHERAL_PARAM_L2CAP)) {
/* CPR not supported, let's try L2CAP CPUP instead */
struct bt_le_conn_param param;
param.interval_min = conn->le.interval_min;
param.interval_max = conn->le.interval_max;
param.latency = conn->le.pending_latency;
param.timeout = conn->le.pending_timeout;
bt_l2cap_update_conn_param(conn, ¶m);
} else {
if (!evt->status) {
conn->le.interval = sys_le16_to_cpu(evt->interval);
conn->le.latency = sys_le16_to_cpu(evt->latency);
conn->le.timeout = sys_le16_to_cpu(evt->supv_timeout);
if (!IS_ENABLED(CONFIG_BT_CONN_PARAM_ANY)) {
if (!IN_RANGE(conn->le.interval, BT_HCI_LE_INTERVAL_MIN,
BT_HCI_LE_INTERVAL_MAX)) {
LOG_WRN("interval exceeds the valid range 0x%04x",
conn->le.interval);
}
if (conn->le.latency > BT_HCI_LE_PERIPHERAL_LATENCY_MAX) {
LOG_WRN("latency exceeds the valid range 0x%04x",
conn->le.latency);
}
if (!IN_RANGE(conn->le.timeout, BT_HCI_LE_SUPERVISON_TIMEOUT_MIN,
BT_HCI_LE_SUPERVISON_TIMEOUT_MAX)) {
LOG_WRN("supv_timeout exceeds the valid range 0x%04x",
conn->le.timeout);
}
}
#if defined(CONFIG_BT_GAP_AUTO_UPDATE_CONN_PARAMS)
atomic_clear_bit(conn->flags,
BT_CONN_PERIPHERAL_PARAM_AUTO_UPDATE);
} else if (atomic_test_bit(conn->flags,
BT_CONN_PERIPHERAL_PARAM_AUTO_UPDATE) &&
evt->status == BT_HCI_ERR_UNSUPP_LL_PARAM_VAL &&
conn->le.conn_param_retry_countdown) {
conn->le.conn_param_retry_countdown--;
k_work_schedule(&conn->deferred_work,
K_MSEC(CONFIG_BT_CONN_PARAM_RETRY_TIMEOUT));
} else {
atomic_clear_bit(conn->flags,
BT_CONN_PERIPHERAL_PARAM_AUTO_UPDATE);
#endif /* CONFIG_BT_GAP_AUTO_UPDATE_CONN_PARAMS */
}
notify_le_param_updated(conn);
}
bt_conn_unref(conn);
}
#if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
static int set_flow_control(void)
{
struct bt_hci_cp_host_buffer_size *hbs;
struct net_buf *buf;
int err;
/* Check if host flow control is actually supported */
if (!BT_CMD_TEST(bt_dev.supported_commands, 10, 5)) {
LOG_WRN("Controller to host flow control not supported");
return 0;
}
buf = bt_hci_cmd_create(BT_HCI_OP_HOST_BUFFER_SIZE,
sizeof(*hbs));
if (!buf) {
return -ENOBUFS;
}
hbs = net_buf_add(buf, sizeof(*hbs));
(void)memset(hbs, 0, sizeof(*hbs));
hbs->acl_mtu = sys_cpu_to_le16(CONFIG_BT_BUF_ACL_RX_SIZE);
hbs->acl_pkts = sys_cpu_to_le16(CONFIG_BT_BUF_ACL_RX_COUNT);
err = bt_hci_cmd_send_sync(BT_HCI_OP_HOST_BUFFER_SIZE, buf, NULL);
if (err) {
return err;
}
buf = bt_hci_cmd_create(BT_HCI_OP_SET_CTL_TO_HOST_FLOW, 1);
if (!buf) {
return -ENOBUFS;
}
net_buf_add_u8(buf, BT_HCI_CTL_TO_HOST_FLOW_ENABLE);
return bt_hci_cmd_send_sync(BT_HCI_OP_SET_CTL_TO_HOST_FLOW, buf, NULL);
}
#endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
static void unpair(uint8_t id, const bt_addr_le_t *addr)
{
struct bt_keys *keys = NULL;
struct bt_conn *conn = bt_conn_lookup_addr_le(id, addr);
if (conn) {
/* Clear the conn->le.keys pointer since we'll invalidate it,
* and don't want any subsequent code (like disconnected
* callbacks) accessing it.
*/
if (conn->type == BT_CONN_TYPE_LE) {
keys = conn->le.keys;
conn->le.keys = NULL;
}
bt_conn_disconnect(conn, BT_HCI_ERR_REMOTE_USER_TERM_CONN);
bt_conn_unref(conn);
}
if (IS_ENABLED(CONFIG_BT_CLASSIC)) {
/* LE Public may indicate BR/EDR as well */
if (addr->type == BT_ADDR_LE_PUBLIC) {
bt_keys_link_key_clear_addr(&addr->a);
}
}
if (IS_ENABLED(CONFIG_BT_SMP)) {
if (!keys) {
keys = bt_keys_find_addr(id, addr);
}
if (keys) {
bt_keys_clear(keys);
}
}
bt_gatt_clear(id, addr);
#if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_CLASSIC)
struct bt_conn_auth_info_cb *listener, *next;
SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&bt_auth_info_cbs, listener,
next, node) {
if (listener->bond_deleted) {
listener->bond_deleted(id, addr);
}
}
#endif /* defined(CONFIG_BT_SMP) || defined(CONFIG_BT_CLASSIC) */
}
static void unpair_remote(const struct bt_bond_info *info, void *data)
{
uint8_t *id = (uint8_t *) data;
unpair(*id, &info->addr);
}
int bt_unpair(uint8_t id, const bt_addr_le_t *addr)
{
if (id >= CONFIG_BT_ID_MAX) {
return -EINVAL;
}
if (IS_ENABLED(CONFIG_BT_SMP)) {
if (!addr || bt_addr_le_eq(addr, BT_ADDR_LE_ANY)) {
bt_foreach_bond(id, unpair_remote, &id);
} else {
unpair(id, addr);
}
} else {
CHECKIF(addr == NULL) {
LOG_DBG("addr is NULL");
return -EINVAL;
}
unpair(id, addr);
}
return 0;
}
#endif /* CONFIG_BT_CONN */
#if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_CLASSIC)
enum bt_security_err bt_security_err_get(uint8_t hci_err)
{
switch (hci_err) {
case BT_HCI_ERR_SUCCESS:
return BT_SECURITY_ERR_SUCCESS;
case BT_HCI_ERR_AUTH_FAIL:
return BT_SECURITY_ERR_AUTH_FAIL;
case BT_HCI_ERR_PIN_OR_KEY_MISSING:
return BT_SECURITY_ERR_PIN_OR_KEY_MISSING;
case BT_HCI_ERR_PAIRING_NOT_SUPPORTED:
return BT_SECURITY_ERR_PAIR_NOT_SUPPORTED;
case BT_HCI_ERR_PAIRING_NOT_ALLOWED:
return BT_SECURITY_ERR_PAIR_NOT_ALLOWED;
case BT_HCI_ERR_INVALID_PARAM:
return BT_SECURITY_ERR_INVALID_PARAM;
default:
return BT_SECURITY_ERR_UNSPECIFIED;
}
}
#endif /* defined(CONFIG_BT_SMP) || defined(CONFIG_BT_CLASSIC) */
#if defined(CONFIG_BT_SMP)
static bool update_sec_level(struct bt_conn *conn)
{
if (conn->le.keys && (conn->le.keys->flags & BT_KEYS_AUTHENTICATED)) {
if (conn->le.keys->flags & BT_KEYS_SC &&
conn->le.keys->enc_size == BT_SMP_MAX_ENC_KEY_SIZE) {
conn->sec_level = BT_SECURITY_L4;
} else {
conn->sec_level = BT_SECURITY_L3;
}
} else {
conn->sec_level = BT_SECURITY_L2;
}
return !(conn->required_sec_level > conn->sec_level);
}
#endif /* CONFIG_BT_SMP */
#if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_CLASSIC)
static void hci_encrypt_change(struct net_buf *buf)
{
struct bt_hci_evt_encrypt_change *evt = (void *)buf->data;
uint16_t handle = sys_le16_to_cpu(evt->handle);
uint8_t status = evt->status;
struct bt_conn *conn;
LOG_DBG("status 0x%02x %s handle %u encrypt 0x%02x",
evt->status, bt_hci_err_to_str(evt->status), handle, evt->encrypt);
conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_ALL);
if (!conn) {
LOG_ERR("Unable to look up conn with handle %u", handle);
return;
}
if (status) {
bt_conn_security_changed(conn, status,
bt_security_err_get(status));
bt_conn_unref(conn);
return;
}
if (conn->encrypt == evt->encrypt) {
LOG_WRN("No change to encryption state (encrypt 0x%02x)", evt->encrypt);
bt_conn_unref(conn);
return;
}
conn->encrypt = evt->encrypt;
#if defined(CONFIG_BT_SMP)
if (conn->type == BT_CONN_TYPE_LE) {
/*
* we update keys properties only on successful encryption to
* avoid losing valid keys if encryption was not successful.
*
* Update keys with last pairing info for proper sec level
* update. This is done only for LE transport, for BR/EDR keys