/*
* Copyright (c) 2018-2021 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stddef.h>
#include <zephyr/zephyr.h>
#include <soc.h>
#include <zephyr/bluetooth/bluetooth.h>
#include <zephyr/bluetooth/hci.h>
#include <zephyr/sys/byteorder.h>
#include "hal/cpu.h"
#include "hal/ecb.h"
#include "hal/ccm.h"
#include "hal/ticker.h"
#include "util/util.h"
#include "util/mem.h"
#include "util/memq.h"
#include "util/mfifo.h"
#include "util/mayfly.h"
#include "util/dbuf.h"
#include "ticker/ticker.h"
#include "pdu.h"
#include "lll.h"
#include "lll_clock.h"
#include "lll/lll_df_types.h"
#include "lll_conn.h"
#include "lll_conn_iso.h"
#if !defined(CONFIG_BT_LL_SW_LLCP_LEGACY)
#include "ull_tx_queue.h"
#endif /* CONFIG_BT_LL_SW_LLCP_LEGACY */
#include "isoal.h"
#include "ull_iso_types.h"
#include "ull_conn_types.h"
#include "ull_conn_iso_types.h"
#if defined(CONFIG_BT_CTLR_USER_EXT)
#include "ull_vendor.h"
#endif /* CONFIG_BT_CTLR_USER_EXT */
#include "ull_internal.h"
#include "ull_sched_internal.h"
#include "ull_chan_internal.h"
#include "ull_conn_internal.h"
#include "ull_periph_internal.h"
#include "ull_central_internal.h"
#include "ull_iso_internal.h"
#include "ull_conn_iso_internal.h"
#include "ull_peripheral_iso_internal.h"
#include "ll.h"
#include "ll_feat.h"
#include "ll_settings.h"
#if !defined(CONFIG_BT_LL_SW_LLCP_LEGACY)
#include "ull_llcp.h"
#include "ull_llcp_features.h"
#endif /* CONFIG_BT_LL_SW_LLCP_LEGACY */
#define BT_DBG_ENABLED IS_ENABLED(CONFIG_BT_DEBUG_HCI_DRIVER)
#define LOG_MODULE_NAME bt_ctlr_ull_conn
#include "common/log.h"
#include "hal/debug.h"
static int init_reset(void);
#if defined(CONFIG_BT_CTLR_RX_ENQUEUE_HOLD)
static bool rx_hold_is_done(struct ll_conn *conn);
static void rx_hold_flush(struct ll_conn *conn);
#endif /* CONFIG_BT_CTLR_RX_ENQUEUE_HOLD */
#if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
static void tx_demux_sched(struct ll_conn *conn);
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
static void tx_demux(void *param);
static struct node_tx *tx_ull_dequeue(struct ll_conn *conn, struct node_tx *tx);
static void ticker_update_conn_op_cb(uint32_t status, void *param);
static void ticker_stop_conn_op_cb(uint32_t status, void *param);
static void ticker_start_conn_op_cb(uint32_t status, void *param);
static void conn_setup_adv_scan_disabled_cb(void *param);
static inline void disable(uint16_t handle);
static void conn_cleanup(struct ll_conn *conn, uint8_t reason);
static void conn_cleanup_finalize(struct ll_conn *conn);
static void tx_ull_flush(struct ll_conn *conn);
static void ticker_stop_op_cb(uint32_t status, void *param);
static void conn_disable(void *param);
static void disabled_cb(void *param);
static void tx_lll_flush(void *param);
#if defined(CONFIG_BT_CTLR_LLID_DATA_START_EMPTY)
static int empty_data_start_release(struct ll_conn *conn, struct node_tx *tx);
#endif /* CONFIG_BT_CTLR_LLID_DATA_START_EMPTY */
#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
/* Connection context pointer used as CPR mutex to serialize connection
* parameter requests procedures across simulataneous connections so that
* offsets exchanged to the peer do not get changed.
*/
struct ll_conn *conn_upd_curr;
#endif /* defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) */
#if defined(CONFIG_BT_LL_SW_LLCP_LEGACY)
static inline void ctrl_tx_enqueue(struct ll_conn *conn, struct node_tx *tx);
static inline void event_fex_prep(struct ll_conn *conn);
static inline void event_vex_prep(struct ll_conn *conn);
static inline int event_conn_upd_prep(struct ll_conn *conn, uint16_t lazy,
uint32_t ticks_at_expire);
static inline void event_ch_map_prep(struct ll_conn *conn,
uint16_t event_counter);
#if defined(CONFIG_BT_CTLR_LE_ENC)
static inline void ctrl_tx_check_and_resume(struct ll_conn *conn);
static bool is_enc_req_pause_tx(struct ll_conn *conn);
static inline void event_enc_prep(struct ll_conn *conn);
#if defined(CONFIG_BT_PERIPHERAL)
static int enc_rsp_send(struct ll_conn *conn);
#endif /* CONFIG_BT_PERIPHERAL */
static int start_enc_rsp_send(struct ll_conn *conn,
struct pdu_data *pdu_ctrl_tx);
static inline bool ctrl_is_unexpected(struct ll_conn *conn, uint8_t opcode);
#endif /* CONFIG_BT_CTLR_LE_ENC */
#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
static inline void event_conn_param_prep(struct ll_conn *conn,
uint16_t event_counter,
uint32_t ticks_at_expire);
#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
#if defined(CONFIG_BT_CTLR_LE_PING)
static inline void event_ping_prep(struct ll_conn *conn);
#endif /* CONFIG_BT_CTLR_LE_PING */
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
static inline void event_len_prep(struct ll_conn *conn);
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
#if defined(CONFIG_BT_CTLR_PHY)
static inline void event_phy_req_prep(struct ll_conn *conn);
static inline void event_phy_upd_ind_prep(struct ll_conn *conn,
uint16_t event_counter);
#endif /* CONFIG_BT_CTLR_PHY */
#if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
static inline void event_send_cis_rsp(struct ll_conn *conn,
uint16_t event_counter);
static inline void event_peripheral_iso_prep(struct ll_conn *conn,
uint16_t event_counter,
uint32_t ticks_at_expire);
#endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
static inline void ctrl_tx_pre_ack(struct ll_conn *conn,
struct pdu_data *pdu_tx);
static inline void ctrl_tx_ack(struct ll_conn *conn, struct node_tx **tx,
struct pdu_data *pdu_tx);
static inline int ctrl_rx(memq_link_t *link, struct node_rx_pdu **rx,
struct pdu_data *pdu_rx, struct ll_conn *conn);
#endif /* CONFIG_BT_LL_SW_LLCP_LEGACY */
#if defined(CONFIG_BT_CTLR_FORCE_MD_AUTO)
static uint8_t force_md_cnt_calc(struct lll_conn *lll_conn, uint32_t tx_rate);
#endif /* CONFIG_BT_CTLR_FORCE_MD_AUTO */
#if !defined(BT_CTLR_USER_TX_BUFFER_OVERHEAD)
#define BT_CTLR_USER_TX_BUFFER_OVERHEAD 0
#endif /* BT_CTLR_USER_TX_BUFFER_OVERHEAD */
#define CONN_TX_BUF_SIZE MROUND(offsetof(struct node_tx, pdu) + \
offsetof(struct pdu_data, lldata) + \
(LL_LENGTH_OCTETS_TX_MAX + \
BT_CTLR_USER_TX_BUFFER_OVERHEAD))
/**
* One connection may take up to 4 TX buffers for procedures
* simultaneously, for example 2 for encryption, 1 for termination,
* and 1 one that is in flight and has not been returned to the pool
*/
#define CONN_TX_CTRL_BUFFERS (4 * CONFIG_BT_CTLR_LLCP_CONN)
#define CONN_TX_CTRL_BUF_SIZE MROUND(offsetof(struct node_tx, pdu) + \
offsetof(struct pdu_data, llctrl) + \
sizeof(struct pdu_data_llctrl))
/* Terminate procedure state values */
#define TERM_REQ 1
#define TERM_ACKED 3
/* CIS Establishment procedure state values */
#define CIS_REQUEST_AWAIT_HOST 2
static MFIFO_DEFINE(conn_tx, sizeof(struct lll_tx), CONFIG_BT_BUF_ACL_TX_COUNT);
static MFIFO_DEFINE(conn_ack, sizeof(struct lll_tx),
(CONFIG_BT_BUF_ACL_TX_COUNT + CONN_TX_CTRL_BUFFERS));
static struct {
void *free;
uint8_t pool[CONN_TX_BUF_SIZE * CONFIG_BT_BUF_ACL_TX_COUNT];
} mem_conn_tx;
static struct {
void *free;
uint8_t pool[CONN_TX_CTRL_BUF_SIZE * CONN_TX_CTRL_BUFFERS];
} mem_conn_tx_ctrl;
static struct {
void *free;
uint8_t pool[sizeof(memq_link_t) *
(CONFIG_BT_BUF_ACL_TX_COUNT + CONN_TX_CTRL_BUFFERS)];
} mem_link_tx;
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
static uint16_t default_tx_octets;
static uint16_t default_tx_time;
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
#if defined(CONFIG_BT_CTLR_PHY)
static uint8_t default_phy_tx;
static uint8_t default_phy_rx;
#endif /* CONFIG_BT_CTLR_PHY */
static struct ll_conn conn_pool[CONFIG_BT_MAX_CONN];
static void *conn_free;
struct ll_conn *ll_conn_acquire(void)
{
return mem_acquire(&conn_free);
}
void ll_conn_release(struct ll_conn *conn)
{
mem_release(conn, &conn_free);
}
uint16_t ll_conn_handle_get(struct ll_conn *conn)
{
return mem_index_get(conn, conn_pool, sizeof(struct ll_conn));
}
struct ll_conn *ll_conn_get(uint16_t handle)
{
return mem_get(conn_pool, sizeof(struct ll_conn), handle);
}
struct ll_conn *ll_connected_get(uint16_t handle)
{
struct ll_conn *conn;
if (handle >= CONFIG_BT_MAX_CONN) {
return NULL;
}
conn = ll_conn_get(handle);
if (conn->lll.handle != handle) {
return NULL;
}
return conn;
}
uint16_t ll_conn_free_count_get(void)
{
return mem_free_count_get(conn_free);
}
void *ll_tx_mem_acquire(void)
{
return mem_acquire(&mem_conn_tx.free);
}
void ll_tx_mem_release(void *tx)
{
mem_release(tx, &mem_conn_tx.free);
}
int ll_tx_mem_enqueue(uint16_t handle, void *tx)
{
#if defined(CONFIG_BT_CTLR_THROUGHPUT)
#define BT_CTLR_THROUGHPUT_PERIOD 1000000000UL
static uint32_t tx_rate;
static uint32_t tx_cnt;
#endif /* CONFIG_BT_CTLR_THROUGHPUT */
struct lll_tx *lll_tx;
struct ll_conn *conn;
uint8_t idx;
conn = ll_connected_get(handle);
if (!conn) {
return -EINVAL;
}
idx = MFIFO_ENQUEUE_GET(conn_tx, (void **) &lll_tx);
if (!lll_tx) {
return -ENOBUFS;
}
lll_tx->handle = handle;
lll_tx->node = tx;
MFIFO_ENQUEUE(conn_tx, idx);
#if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
if (ull_ref_get(&conn->ull)) {
#if defined(CONFIG_BT_CTLR_FORCE_MD_AUTO)
if (tx_cnt >= CONFIG_BT_BUF_ACL_TX_COUNT) {
uint8_t previous, force_md_cnt;
force_md_cnt = force_md_cnt_calc(&conn->lll, tx_rate);
previous = lll_conn_force_md_cnt_set(force_md_cnt);
if (previous != force_md_cnt) {
BT_INFO("force_md_cnt: old= %u, new= %u.",
previous, force_md_cnt);
}
}
#endif /* CONFIG_BT_CTLR_FORCE_MD_AUTO */
tx_demux_sched(conn);
#if defined(CONFIG_BT_CTLR_FORCE_MD_AUTO)
} else {
lll_conn_force_md_cnt_set(0U);
#endif /* CONFIG_BT_CTLR_FORCE_MD_AUTO */
}
#endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL */
if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
ull_periph_latency_cancel(conn, handle);
}
#if defined(CONFIG_BT_CTLR_THROUGHPUT)
static uint32_t last_cycle_stamp;
static uint32_t tx_len;
struct pdu_data *pdu;
uint32_t cycle_stamp;
uint64_t delta;
cycle_stamp = k_cycle_get_32();
delta = k_cyc_to_ns_floor64(cycle_stamp - last_cycle_stamp);
if (delta > BT_CTLR_THROUGHPUT_PERIOD) {
BT_INFO("incoming Tx: count= %u, len= %u, rate= %u bps.",
tx_cnt, tx_len, tx_rate);
last_cycle_stamp = cycle_stamp;
tx_cnt = 0U;
tx_len = 0U;
}
pdu = (void *)((struct node_tx *)tx)->pdu;
tx_len += pdu->len;
tx_rate = ((uint64_t)tx_len << 3) * BT_CTLR_THROUGHPUT_PERIOD / delta;
tx_cnt++;
#endif /* CONFIG_BT_CTLR_THROUGHPUT */
return 0;
}
uint8_t ll_conn_update(uint16_t handle, uint8_t cmd, uint8_t status, uint16_t interval_min,
uint16_t interval_max, uint16_t latency, uint16_t timeout)
{
struct ll_conn *conn;
conn = ll_connected_get(handle);
if (!conn) {
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
#if defined(CONFIG_BT_LL_SW_LLCP_LEGACY)
if (!cmd) {
#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
if (!conn->llcp_conn_param.disabled &&
(!conn->common.fex_valid ||
(conn->llcp_feature.features_conn &
BIT64(BT_LE_FEAT_BIT_CONN_PARAM_REQ)))) {
cmd++;
} else if (conn->lll.role) {
return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
}
#else /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
if (conn->lll.role) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
#endif /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
}
if (!cmd) {
if (conn->llcp_cu.req != conn->llcp_cu.ack) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
conn->llcp_cu.win_size = 1U;
conn->llcp_cu.win_offset_us = 0U;
conn->llcp_cu.interval = interval_max;
conn->llcp_cu.latency = latency;
conn->llcp_cu.timeout = timeout;
conn->llcp_cu.state = LLCP_CUI_STATE_USE;
conn->llcp_cu.cmd = 1U;
conn->llcp_cu.req++;
} else {
#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
cmd--;
if (cmd) {
if ((conn->llcp_conn_param.req ==
conn->llcp_conn_param.ack) ||
(conn->llcp_conn_param.state !=
LLCP_CPR_STATE_APP_WAIT)) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
conn->llcp_conn_param.status = status;
conn->llcp_conn_param.state = cmd;
conn->llcp_conn_param.cmd = 1U;
} else {
if (conn->llcp_conn_param.req !=
conn->llcp_conn_param.ack) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
conn->llcp_conn_param.status = 0U;
conn->llcp_conn_param.interval_min = interval_min;
conn->llcp_conn_param.interval_max = interval_max;
conn->llcp_conn_param.latency = latency;
conn->llcp_conn_param.timeout = timeout;
conn->llcp_conn_param.state = cmd;
conn->llcp_conn_param.cmd = 1U;
conn->llcp_conn_param.req++;
if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
conn->lll.role) {
ull_periph_latency_cancel(conn, handle);
}
}
#else /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
/* CPR feature not supported */
return BT_HCI_ERR_CMD_DISALLOWED;
#endif /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
}
#else /* CONFIG_BT_LL_SW_LLCP_LEGACY */
if (cmd == 0U) {
uint8_t err;
err = ull_cp_conn_update(conn, interval_min, interval_max, latency, timeout);
if (err) {
return err;
}
if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
conn->lll.role) {
ull_periph_latency_cancel(conn, handle);
}
} else if (cmd == 2U) {
#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
if (status == 0U) {
ull_cp_conn_param_req_reply(conn);
} else {
ull_cp_conn_param_req_neg_reply(conn, status);
}
return BT_HCI_ERR_SUCCESS;
#else /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
/* CPR feature not supported */
return BT_HCI_ERR_CMD_DISALLOWED;
#endif /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
} else {
return BT_HCI_ERR_UNKNOWN_CMD;
}
#endif /* CONFIG_BT_LL_SW_LLCP_LEGACY */
return 0;
}
uint8_t ll_chm_get(uint16_t handle, uint8_t *chm)
{
struct ll_conn *conn;
conn = ll_connected_get(handle);
if (!conn) {
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
#if defined(CONFIG_BT_LL_SW_LLCP_LEGACY)
/* Iterate until we are sure the ISR did not modify the value while
* we were reading it from memory.
*/
do {
conn->chm_updated = 0U;
memcpy(chm, conn->lll.data_chan_map,
sizeof(conn->lll.data_chan_map));
} while (conn->chm_updated);
#else /* CONFIG_BT_LL_SW_LLCP_LEGACY */
/*
* Core Spec 5.2 Vol4: 7.8.20:
* The HCI_LE_Read_Channel_Map command returns the current Channel_Map
* for the specified Connection_Handle. The returned value indicates the state of
* the Channel_Map specified by the last transmitted or received Channel_Map
* (in a CONNECT_IND or LL_CHANNEL_MAP_IND message) for the specified
* Connection_Handle, regardless of whether the Central has received an
* acknowledgment
*/
const uint8_t *pending_chm;
pending_chm = ull_cp_chan_map_update_pending(conn);
if (pending_chm) {
memcpy(chm, pending_chm, sizeof(conn->lll.data_chan_map));
} else {
memcpy(chm, conn->lll.data_chan_map, sizeof(conn->lll.data_chan_map));
}
#endif /* CONFIG_BT_LL_SW_LLCP_LEGACY */
return 0;
}
static bool is_valid_disconnect_reason(uint8_t reason)
{
switch (reason) {
case BT_HCI_ERR_AUTH_FAIL:
case BT_HCI_ERR_REMOTE_USER_TERM_CONN:
case BT_HCI_ERR_REMOTE_LOW_RESOURCES:
case BT_HCI_ERR_REMOTE_POWER_OFF:
case BT_HCI_ERR_UNSUPP_REMOTE_FEATURE:
case BT_HCI_ERR_PAIRING_NOT_SUPPORTED:
case BT_HCI_ERR_UNACCEPT_CONN_PARAM:
return true;
default:
return false;
}
}
uint8_t ll_terminate_ind_send(uint16_t handle, uint8_t reason)
{
struct ll_conn *conn;
if (!IS_ACL_HANDLE(handle)) {
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
conn = ll_connected_get(handle);
if (!conn) {
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
#if defined(CONFIG_BT_LL_SW_LLCP_LEGACY)
if (conn->llcp_terminate.req != conn->llcp_terminate.ack) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
#endif /* CONFIG_BT_LL_SW_LLCP_LEGACY */
if (!is_valid_disconnect_reason(reason)) {
return BT_HCI_ERR_INVALID_PARAM;
}
#if defined(CONFIG_BT_LL_SW_LLCP_LEGACY)
conn->llcp_terminate.reason_own = reason;
conn->llcp_terminate.req++; /* (req - ack) == 1, TERM_REQ */
#else /* CONFIG_BT_LL_SW_LLCP_LEGACY */
uint8_t err;
err = ull_cp_terminate(conn, reason);
if (err) {
return err;
}
#endif /* CONFIG_BT_LL_SW_LLCP_LEGACY */
if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
ull_periph_latency_cancel(conn, handle);
}
return 0;
}
#if defined(CONFIG_BT_CENTRAL) || defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG)
uint8_t ll_feature_req_send(uint16_t handle)
{
struct ll_conn *conn;
conn = ll_connected_get(handle);
if (!conn) {
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
#if defined(CONFIG_BT_LL_SW_LLCP_LEGACY)
if (conn->llcp_feature.req != conn->llcp_feature.ack) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
conn->llcp_feature.req++;
#else /* CONFIG_BT_LL_SW_LLCP_LEGACY */
uint8_t err;
err = ull_cp_feature_exchange(conn);
if (err) {
return err;
}
#endif
if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
IS_ENABLED(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG) &&
conn->lll.role) {
ull_periph_latency_cancel(conn, handle);
}
return 0;
}
#endif /* CONFIG_BT_CENTRAL || CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG */
uint8_t ll_version_ind_send(uint16_t handle)
{
struct ll_conn *conn;
conn = ll_connected_get(handle);
if (!conn) {
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
#if defined(CONFIG_BT_LL_SW_LLCP_LEGACY)
if (conn->llcp_version.req != conn->llcp_version.ack) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
conn->llcp_version.req++;
#else /* CONFIG_BT_LL_SW_LLCP_LEGACY */
uint8_t err;
err = ull_cp_version_exchange(conn);
if (err) {
return err;
}
#endif
if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
ull_periph_latency_cancel(conn, handle);
}
return 0;
}
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
uint32_t ll_length_req_send(uint16_t handle, uint16_t tx_octets,
uint16_t tx_time)
{
struct ll_conn *conn;
if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
((tx_octets > LL_LENGTH_OCTETS_TX_MAX) ||
(tx_time > PDU_DC_PAYLOAD_TIME_MAX_CODED))) {
return BT_HCI_ERR_INVALID_PARAM;
}
conn = ll_connected_get(handle);
if (!conn) {
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
#if defined(CONFIG_BT_LL_SW_LLCP_LEGACY)
if (conn->llcp_length.disabled ||
(conn->common.fex_valid &&
!(conn->llcp_feature.features_conn & BIT64(BT_LE_FEAT_BIT_DLE)))) {
return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
}
#if defined(CONFIG_BT_CTLR_PHY)
#if defined(CONFIG_BT_CTLR_PHY_CODED)
const uint16_t tx_time_max =
PDU_DC_MAX_US(LL_LENGTH_OCTETS_TX_MAX, PHY_CODED);
#else /* !CONFIG_BT_CTLR_PHY_CODED */
const uint16_t tx_time_max =
PDU_DC_MAX_US(LL_LENGTH_OCTETS_TX_MAX, PHY_1M);
#endif /* !CONFIG_BT_CTLR_PHY_CODED */
if (tx_time > tx_time_max) {
tx_time = tx_time_max;
}
#endif /* CONFIG_BT_CTLR_PHY */
if (conn->llcp_length.req != conn->llcp_length.ack) {
switch (conn->llcp_length.state) {
case LLCP_LENGTH_STATE_RSP_ACK_WAIT:
case LLCP_LENGTH_STATE_RESIZE_RSP:
case LLCP_LENGTH_STATE_RESIZE_RSP_ACK_WAIT:
/* cached until peer procedure completes */
if (!conn->llcp_length.cache.tx_octets) {
conn->llcp_length.cache.tx_octets = tx_octets;
#if defined(CONFIG_BT_CTLR_PHY)
conn->llcp_length.cache.tx_time = tx_time;
#endif /* CONFIG_BT_CTLR_PHY */
return 0;
}
__fallthrough;
default:
return BT_HCI_ERR_CMD_DISALLOWED;
}
}
/* TODO: parameter check tx_octets and tx_time */
conn->llcp_length.state = LLCP_LENGTH_STATE_REQ;
conn->llcp_length.tx_octets = tx_octets;
#if defined(CONFIG_BT_CTLR_PHY)
conn->llcp_length.tx_time = tx_time;
#endif /* CONFIG_BT_CTLR_PHY */
conn->llcp_length.req++;
#else /* CONFIG_BT_LL_SW_LLCP_LEGACY */
if (!feature_dle(conn)) {
return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
}
uint8_t err;
err = ull_cp_data_length_update(conn, tx_octets, tx_time);
if (err) {
return err;
}
#endif /* CONFIG_BT_LL_SW_LLCP_LEGACY */
if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
ull_periph_latency_cancel(conn, handle);
}
return 0;
}
void ll_length_default_get(uint16_t *max_tx_octets, uint16_t *max_tx_time)
{
*max_tx_octets = default_tx_octets;
*max_tx_time = default_tx_time;
}
uint32_t ll_length_default_set(uint16_t max_tx_octets, uint16_t max_tx_time)
{
/* TODO: parameter check (for BT 5.0 compliance) */
default_tx_octets = max_tx_octets;
default_tx_time = max_tx_time;
return 0;
}
void ll_length_max_get(uint16_t *max_tx_octets, uint16_t *max_tx_time,
uint16_t *max_rx_octets, uint16_t *max_rx_time)
{
#if defined(CONFIG_BT_CTLR_PHY) && defined(CONFIG_BT_CTLR_PHY_CODED)
#define PHY (PHY_CODED)
#else /* CONFIG_BT_CTLR_PHY && CONFIG_BT_CTLR_PHY_CODED */
#define PHY (PHY_1M)
#endif /* CONFIG_BT_CTLR_PHY && CONFIG_BT_CTLR_PHY_CODED */
*max_tx_octets = LL_LENGTH_OCTETS_RX_MAX;
*max_rx_octets = LL_LENGTH_OCTETS_RX_MAX;
*max_tx_time = PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, PHY);
*max_rx_time = PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, PHY);
#undef PHY
}
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
#if defined(CONFIG_BT_CTLR_PHY)
uint8_t ll_phy_get(uint16_t handle, uint8_t *tx, uint8_t *rx)
{
struct ll_conn *conn;
conn = ll_connected_get(handle);
if (!conn) {
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
/* TODO: context safe read */
*tx = conn->lll.phy_tx;
*rx = conn->lll.phy_rx;
return 0;
}
uint8_t ll_phy_default_set(uint8_t tx, uint8_t rx)
{
/* TODO: validate against supported phy */
default_phy_tx = tx;
default_phy_rx = rx;
return 0;
}
uint8_t ll_phy_req_send(uint16_t handle, uint8_t tx, uint8_t flags, uint8_t rx)
{
struct ll_conn *conn;
conn = ll_connected_get(handle);
if (!conn) {
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
#if defined(CONFIG_BT_LL_SW_LLCP_LEGACY)
if (conn->llcp_phy.disabled ||
(conn->common.fex_valid &&
!(conn->llcp_feature.features_conn & BIT64(BT_LE_FEAT_BIT_PHY_2M)) &&
!(conn->llcp_feature.features_conn &
BIT64(BT_LE_FEAT_BIT_PHY_CODED)))) {
return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
}
if (conn->llcp_phy.req != conn->llcp_phy.ack) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
conn->llcp_phy.state = LLCP_PHY_STATE_REQ;
conn->llcp_phy.cmd = 1U;
conn->llcp_phy.tx = tx;
conn->llcp_phy.flags = flags;
conn->llcp_phy.rx = rx;
conn->llcp_phy.req++;
#else /* CONFIG_BT_LL_SW_LLCP_LEGACY */
if (!feature_phy_2m(conn) && !feature_phy_coded(conn)) {
return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
}
uint8_t err;
err = ull_cp_phy_update(conn, tx, flags, rx, 1U);
if (err) {
return err;
}
#endif /* CONFIG_BT_LL_SW_LLCP_LEGACY */
if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
ull_periph_latency_cancel(conn, handle);
}
return 0;
}
#endif /* CONFIG_BT_CTLR_PHY */
#if defined(CONFIG_BT_CTLR_CONN_RSSI)
uint8_t ll_rssi_get(uint16_t handle, uint8_t *rssi)
{
struct ll_conn *conn;
conn = ll_connected_get(handle);
if (!conn) {
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
*rssi = conn->lll.rssi_latest;
return 0;
}
#endif /* CONFIG_BT_CTLR_CONN_RSSI */
#if defined(CONFIG_BT_CTLR_LE_PING)
uint8_t ll_apto_get(uint16_t handle, uint16_t *apto)
{
struct ll_conn *conn;
conn = ll_connected_get(handle);
if (!conn) {
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
*apto = conn->apto_reload * conn->lll.interval * 125U / 1000;
return 0;
}
uint8_t ll_apto_set(uint16_t handle, uint16_t apto)
{
struct ll_conn *conn;
conn = ll_connected_get(handle);
if (!conn) {
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
conn->apto_reload = RADIO_CONN_EVENTS(apto * 10U * 1000U,
conn->lll.interval *
CONN_INT_UNIT_US);
return 0;
}
#endif /* CONFIG_BT_CTLR_LE_PING */
int ull_conn_init(void)
{
int err;
err = init_reset();
if (err) {
return err;
}
return 0;
}
int ull_conn_reset(void)
{
uint16_t handle;
int err;
#if defined(CONFIG_BT_CENTRAL)
/* Reset initiator */
(void)ull_central_reset();
#endif /* CONFIG_BT_CENTRAL */
for (handle = 0U; handle < CONFIG_BT_MAX_CONN; handle++) {
disable(handle);
}
/* Re-initialize the Tx mfifo */
MFIFO_INIT(conn_tx);
/* Re-initialize the Tx Ack mfifo */
MFIFO_INIT(conn_ack);
err = init_reset();
if (err) {
return err;
}
return 0;
}
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
uint16_t ull_conn_default_tx_octets_get(void)
{
return default_tx_octets;
}
#if defined(CONFIG_BT_CTLR_PHY)
uint16_t ull_conn_default_tx_time_get(void)
{
return default_tx_time;
}
#endif /* CONFIG_BT_CTLR_PHY */
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
#if defined(CONFIG_BT_CTLR_PHY)
uint8_t ull_conn_default_phy_tx_get(void)
{
return default_phy_tx;
}
uint8_t ull_conn_default_phy_rx_get(void)
{
return default_phy_rx;
}
#endif /* CONFIG_BT_CTLR_PHY */
#if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
bool ull_conn_peer_connected(uint8_t const own_id_addr_type,
uint8_t const *const own_id_addr,
uint8_t const peer_id_addr_type,
uint8_t const *const peer_id_addr)
{
uint16_t handle;
for (handle = 0U; handle < CONFIG_BT_MAX_CONN; handle++) {
struct ll_conn *conn = ll_connected_get(handle);
if (conn &&
conn->peer_id_addr_type == peer_id_addr_type &&
!memcmp(conn->peer_id_addr, peer_id_addr, BDADDR_SIZE) &&
conn->own_id_addr_type == own_id_addr_type &&
!memcmp(conn->own_id_addr, own_id_addr, BDADDR_SIZE)) {
return true;
}
}
return false;
}
#endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
void ull_conn_setup(memq_link_t *rx_link, struct node_rx_hdr *rx)
{
struct node_rx_ftr *ftr;
struct ull_hdr *hdr;
/* Store the link in the node rx so that when done event is
* processed it can be used to enqueue node rx towards LL context
*/
rx->link = rx_link;
/* NOTE: LLL conn context SHALL be after lll_hdr in
* struct lll_adv and struct lll_scan.
*/
ftr = &(rx->rx_ftr);
/* Check for reference count and decide to setup connection
* here or when done event arrives.
*/
hdr = HDR_LLL2ULL(ftr->param);
if (ull_ref_get(hdr)) {
/* Setup connection in ULL disabled callback,
* pass the node rx as disabled callback parameter.
*/
LL_ASSERT(!hdr->disabled_cb);
hdr->disabled_param = rx;
hdr->disabled_cb = conn_setup_adv_scan_disabled_cb;
} else {
conn_setup_adv_scan_disabled_cb(rx);
}
}
int ull_conn_rx(memq_link_t *link, struct node_rx_pdu **rx)
{
struct pdu_data *pdu_rx;
struct ll_conn *conn;
conn = ll_connected_get((*rx)->hdr.handle);
if (!conn) {
/* Mark for buffer for release */
(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
return 0;
}
#if defined(CONFIG_BT_CTLR_RX_ENQUEUE_HOLD)
if (conn->llcp_rx_hold && rx_hold_is_done(conn)) {
rx_hold_flush(conn);
}
#endif /* CONFIG_BT_CTLR_RX_ENQUEUE_HOLD */
pdu_rx = (void *)(*rx)->pdu;
switch (pdu_rx->ll_id) {
case PDU_DATA_LLID_CTRL:
{
#if defined(CONFIG_BT_LL_SW_LLCP_LEGACY)
int nack;
nack = ctrl_rx(link, rx, pdu_rx, conn);
return nack;
#else /* CONFIG_BT_LL_SW_LLCP_LEGACY */
ARG_UNUSED(link);
ARG_UNUSED(pdu_rx);
ull_cp_rx(conn, *rx);
/* Mark buffer for release */
(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
return 0;
#endif /* CONFIG_BT_LL_SW_LLCP_LEGACY */
}
case PDU_DATA_LLID_DATA_CONTINUE:
case PDU_DATA_LLID_DATA_START:
#if defined(CONFIG_BT_CTLR_LE_ENC)
#if defined(CONFIG_BT_LL_SW_LLCP_LEGACY)
if (conn->llcp_enc.pause_rx) {
#else /* CONFIG_BT_LL_SW_LLCP_LEGACY */
if (conn->pause_rx_data) {
#endif /* CONFIG_BT_LL_SW_LLCP_LEGACY */
conn->llcp_terminate.reason_final =
BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL;
/* Mark for buffer for release */
(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
}
#endif /* CONFIG_BT_CTLR_LE_ENC */
break;
case PDU_DATA_LLID_RESV:
default:
#if defined(CONFIG_BT_CTLR_LE_ENC)
#if defined(CONFIG_BT_LL_SW_LLCP_LEGACY)
if (conn->llcp_enc.pause_rx) {
#else /* CONFIG_BT_LL_SW_LLCP_LEGACY */
if (conn->pause_rx_data) {
#endif /* CONFIG_BT_LL_SW_LLCP_LEGACY */
conn->llcp_terminate.reason_final =
BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL;
}
#endif /* CONFIG_BT_CTLR_LE_ENC */
/* Invalid LL id, drop it. */
/* Mark for buffer for release */
(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
break;
}
return 0;
}
int ull_conn_llcp(struct ll_conn *conn, uint32_t ticks_at_expire, uint16_t lazy)
{
#if defined(CONFIG_BT_LL_SW_LLCP_LEGACY)
/* Check if no other procedure with instant is requested and not in
* Encryption setup.
*/
if ((conn->llcp_ack == conn->llcp_req) &&
#if defined(CONFIG_BT_CTLR_LE_ENC)
#if defined(CONFIG_BT_PERIPHERAL)
(!conn->lll.role || (conn->periph.llcp_type == LLCP_NONE)) &&
#endif /* CONFIG_BT_PERIPHERAL */
!conn->llcp_enc.pause_rx) {
#else /* !CONFIG_BT_CTLR_LE_ENC */
1) {
#endif /* !CONFIG_BT_CTLR_LE_ENC */
/* TODO: Optimize the checks below, maybe have common flag */
/* check if connection update procedure is requested */
if (conn->llcp_cu.ack != conn->llcp_cu.req) {
/* switch to LLCP_CONN_UPD state machine */
conn->llcp_type = LLCP_CONN_UPD;
conn->llcp_ack -= 2U;
/* check if feature exchange procedure is requested */
} else if (conn->llcp_feature.ack != conn->llcp_feature.req) {
/* handle feature exchange state machine */
event_fex_prep(conn);
/* check if version info procedure is requested */
} else if (conn->llcp_version.ack != conn->llcp_version.req) {
/* handle version info state machine */
event_vex_prep(conn);
#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
/* check if CPR procedure is requested */
} else if (conn->llcp_conn_param.ack !=
conn->llcp_conn_param.req) {
struct lll_conn *lll = &conn->lll;
uint16_t event_counter;
/* Calculate current event counter */
event_counter = lll->event_counter +
lll->latency_prepare + lazy;
/* handle CPR state machine */
event_conn_param_prep(conn, event_counter,
ticks_at_expire);
#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
/* check if DLE procedure is requested */
} else if (conn->llcp_length.ack != conn->llcp_length.req) {
/* handle DLU state machine */
event_len_prep(conn);
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
#if defined(CONFIG_BT_CTLR_PHY)
/* check if PHY Req procedure is requested */
} else if (conn->llcp_phy.ack != conn->llcp_phy.req) {
/* handle PHY Upd state machine */
event_phy_req_prep(conn);
#endif /* CONFIG_BT_CTLR_PHY */
#if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
} else if (conn->llcp_cis.req != conn->llcp_cis.ack) {
struct lll_conn *lll = &conn->lll;
uint16_t event_counter;
/* Calculate current event counter */
event_counter = lll->event_counter +
lll->latency_prepare + lazy;
if (conn->llcp_cis.state == LLCP_CIS_STATE_RSP_WAIT) {
/* Handle CIS response */
event_send_cis_rsp(conn, event_counter);
} else if (conn->llcp_cis.state ==
LLCP_CIS_STATE_INST_WAIT) {
/* Start CIS peripheral */
event_peripheral_iso_prep(conn,
event_counter,
ticks_at_expire);
}
#endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
}
}
/* Check if procedures with instant or encryption setup is requested or
* active.
*/
if (((conn->llcp_req - conn->llcp_ack) & 0x03) == 0x02) {
/* Process parallel procedures that are active */
if (0) {
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
/* Check if DLE in progress */
} else if (conn->llcp_length.ack != conn->llcp_length.req) {
if ((conn->llcp_length.state ==
LLCP_LENGTH_STATE_RESIZE) ||
(conn->llcp_length.state ==
LLCP_LENGTH_STATE_RESIZE_RSP)) {
/* handle DLU state machine */
event_len_prep(conn);
}
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
}
/* Process procedures with instants or encryption setup */
/* FIXME: Make LE Ping cacheable */
switch (conn->llcp_type) {
case LLCP_CONN_UPD:
{
if (event_conn_upd_prep(conn, lazy,
ticks_at_expire) == 0) {
return -ECANCELED;
}
}
break;
case LLCP_CHAN_MAP:
{
struct lll_conn *lll = &conn->lll;
uint16_t event_counter;
/* Calculate current event counter */
event_counter = lll->event_counter +
lll->latency_prepare + lazy;
event_ch_map_prep(conn, event_counter);
}
break;
#if defined(CONFIG_BT_CTLR_LE_ENC)
case LLCP_ENCRYPTION:
event_enc_prep(conn);
break;
#endif /* CONFIG_BT_CTLR_LE_ENC */
#if defined(CONFIG_BT_CTLR_LE_PING)
case LLCP_PING:
event_ping_prep(conn);
break;
#endif /* CONFIG_BT_CTLR_LE_PING */
#if defined(CONFIG_BT_CTLR_PHY)
case LLCP_PHY_UPD:
{
struct lll_conn *lll = &conn->lll;
uint16_t event_counter;
/* Calculate current event counter */
event_counter = lll->event_counter +
lll->latency_prepare + lazy;
event_phy_upd_ind_prep(conn, event_counter);
}
break;
#endif /* CONFIG_BT_CTLR_PHY */
default:
LL_ASSERT(0);
break;
}
}
#if defined(CONFIG_BT_PERIPHERAL) && defined(CONFIG_BT_CTLR_LE_ENC)
/* Run any pending local peripheral role initiated procedure stored when
* peer central initiated a encryption procedure
*/
if (conn->lll.role && (conn->periph.llcp_type != LLCP_NONE)) {
switch (conn->periph.llcp_type) {
case LLCP_CONN_UPD:
{
if (event_conn_upd_prep(conn, lazy,
ticks_at_expire) == 0) {
return -ECANCELED;
}
}
break;
case LLCP_CHAN_MAP:
{
struct lll_conn *lll = &conn->lll;
uint16_t event_counter;
/* Calculate current event counter */
event_counter = lll->event_counter +
lll->latency_prepare + lazy;
event_ch_map_prep(conn, event_counter);
}
break;
#if defined(CONFIG_BT_CTLR_PHY)
case LLCP_PHY_UPD:
{
struct lll_conn *lll = &conn->lll;
uint16_t event_counter;
/* Calculate current event counter */
event_counter = lll->event_counter +
lll->latency_prepare + lazy;
event_phy_upd_ind_prep(conn, event_counter);
}
break;
#endif /* CONFIG_BT_CTLR_PHY */
default:
LL_ASSERT(0);
break;
}
}
#endif /* CONFIG_BT_PERIPHERAL && CONFIG_BT_CTLR_LE_ENC */
/* Terminate Procedure Request */
if (((conn->llcp_terminate.req - conn->llcp_terminate.ack) & 0xFF) ==
TERM_REQ) {
struct node_tx *tx;
tx = mem_acquire(&mem_conn_tx_ctrl.free);
if (tx) {
struct pdu_data *pdu_tx = (void *)tx->pdu;
ull_pdu_data_init(pdu_tx);
/* Terminate Procedure initiated,
* make (req - ack) == 2
*/
conn->llcp_terminate.ack--;
/* place the terminate ind packet in tx queue */
pdu_tx->ll_id = PDU_DATA_LLID_CTRL;
pdu_tx->len = offsetof(struct pdu_data_llctrl,
terminate_ind) +
sizeof(struct pdu_data_llctrl_terminate_ind);
pdu_tx->llctrl.opcode =
PDU_DATA_LLCTRL_TYPE_TERMINATE_IND;
pdu_tx->llctrl.terminate_ind.error_code =
conn->llcp_terminate.reason_own;
ctrl_tx_enqueue(conn, tx);
}
if (!conn->procedure_expire) {
/* Terminate Procedure timeout is started, will
* replace any other timeout running
*/
conn->procedure_expire = conn->supervision_reload;
/* NOTE: if supervision timeout equals connection
* interval, dont timeout in current event.
*/
if (conn->procedure_expire <= 1U) {
conn->procedure_expire++;
}
}
}
return 0;
#else /* CONFIG_BT_LL_SW_LLCP_LEGACY */
LL_ASSERT(conn->lll.handle != LLL_HANDLE_INVALID);
#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
conn->llcp.prep.ticks_at_expire = ticks_at_expire;
#else /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
ARG_UNUSED(ticks_at_expire);
#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
conn->llcp.prep.lazy = lazy;
ull_cp_run(conn);
if (conn->cancel_prepare) {
/* Reset signal */
conn->cancel_prepare = 0U;
/* Cancel prepare */
return -ECANCELED;
}
/* Continue prepare */
return 0;
#endif /* CONFIG_BT_LL_SW_LLCP_LEGACY */
}
void ull_conn_done(struct node_rx_event_done *done)
{
uint32_t ticks_drift_minus;