/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stddef.h>
#include <zephyr.h>
#include <device.h>
#include <entropy.h>
#include <bluetooth/bluetooth.h>
#include <misc/byteorder.h>
#include "hal/ecb.h"
#include "hal/ccm.h"
#include "hal/ticker.h"
#include "util/util.h"
#include "util/mem.h"
#include "util/memq.h"
#include "util/mfifo.h"
#include "util/mayfly.h"
#include "ticker/ticker.h"
#include "pdu.h"
#include "lll.h"
#include "lll_tim_internal.h"
#include "lll_conn.h"
#include "ull_conn_types.h"
#include "ull_internal.h"
#include "ull_sched_internal.h"
#include "ull_conn_internal.h"
#include "ull_slave_internal.h"
#include "ull_master_internal.h"
#include "ll.h"
#include "ll_feat.h"
#define LOG_MODULE_NAME bt_ctlr_llsw_ull_conn
#include "common/log.h"
#include <soc.h>
#include "hal/debug.h"
/* Macro to return PDU time */
#if defined(CONFIG_BT_CTLR_PHY_CODED)
#define PKT_US(octets, phy) \
(((phy) & BIT(2)) ? \
(80 + 256 + 16 + 24 + ((((2 + (octets) + 4) * 8) + 24 + 3) * 8)) : \
(((octets) + 14) * 8 / BIT(((phy) & 0x03) >> 1)))
#else /* !CONFIG_BT_CTLR_PHY_CODED */
#define PKT_US(octets, phy) \
(((octets) + 14) * 8 / BIT(((phy) & 0x03) >> 1))
#endif /* !CONFIG_BT_CTLR_PHY_CODED */
static int init_reset(void);
static void ticker_op_update_cb(u32_t status, void *param);
static inline void disable(u16_t handle);
static void conn_cleanup(struct ll_conn *conn);
static void ctrl_tx_enqueue(struct ll_conn *conn, struct node_tx *tx);
static inline void event_fex_prep(struct ll_conn *conn);
static inline void event_vex_prep(struct ll_conn *conn);
static inline int event_conn_upd_prep(struct ll_conn *conn,
u16_t event_counter,
u32_t ticks_at_expire);
static inline void event_ch_map_prep(struct ll_conn *conn,
u16_t event_counter);
static void terminate_ind_rx_enqueue(struct ll_conn *conn, u8_t reason);
#if defined(CONFIG_BT_CTLR_LE_ENC)
static inline void event_enc_prep(struct ll_conn *conn);
static int enc_rsp_send(struct ll_conn *conn);
static int start_enc_rsp_send(struct ll_conn *conn,
struct pdu_data *pdu_ctrl_tx);
static inline bool ctrl_is_unexpected(struct ll_conn *conn, u8_t opcode);
#endif /* CONFIG_BT_CTLR_LE_ENC */
#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
static inline void event_conn_param_prep(struct ll_conn *conn,
u16_t event_counter,
u32_t ticks_at_expire);
#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
#if defined(CONFIG_BT_CTLR_LE_PING)
static inline void event_ping_prep(struct ll_conn *conn);
#endif /* CONFIG_BT_CTLR_LE_PING */
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
static inline void event_len_prep(struct ll_conn *conn);
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
#if defined(CONFIG_BT_CTLR_PHY)
static inline void event_phy_req_prep(struct ll_conn *conn);
static inline void event_phy_upd_ind_prep(struct ll_conn *conn,
u16_t event_counter);
#endif /* CONFIG_BT_CTLR_PHY */
static inline void ctrl_tx_ack(struct ll_conn *conn, struct node_tx **tx,
struct pdu_data *pdu_tx);
static inline int ctrl_rx(memq_link_t *link, struct node_rx_pdu **rx,
struct pdu_data *pdu_rx, struct ll_conn *conn);
static void ticker_op_cb(u32_t status, void *params);
#define CONN_TX_BUF_SIZE MROUND(offsetof(struct node_tx, pdu) + \
offsetof(struct pdu_data, lldata) + \
CONFIG_BT_CTLR_TX_BUFFER_SIZE)
#define CONN_TX_CTRL_BUFFERS 2
#define CONN_TX_CTRL_BUF_SIZE (MROUND(offsetof(struct node_tx, pdu) + \
offsetof(struct pdu_data, llctrl) + \
sizeof(struct pdu_data_llctrl)) * \
CONN_TX_CTRL_BUFFERS)
static MFIFO_DEFINE(conn_tx, sizeof(struct lll_tx),
CONFIG_BT_CTLR_TX_BUFFERS);
static struct {
void *free;
u8_t pool[CONN_TX_BUF_SIZE * CONFIG_BT_CTLR_TX_BUFFERS];
} mem_conn_tx;
static struct {
void *free;
u8_t pool[CONN_TX_CTRL_BUF_SIZE * CONN_TX_CTRL_BUFFERS];
} mem_conn_tx_ctrl;
static struct {
void *free;
u8_t pool[sizeof(memq_link_t) *
(CONFIG_BT_CTLR_TX_BUFFERS + CONN_TX_CTRL_BUFFERS)];
} mem_link_tx;
static u8_t data_chan_map[5] = {0xFF, 0xFF, 0xFF, 0xFF, 0x1F};
static u8_t data_chan_count = 37U;
#if defined(CONFIG_BT_CTLR_PHY)
static u8_t default_phy_tx;
static u8_t default_phy_rx;
#endif /* CONFIG_BT_CTLR_PHY */
static struct ll_conn conn_pool[CONFIG_BT_MAX_CONN];
static struct ll_conn *conn_upd_curr;
static void *conn_free;
static struct device *entropy;
struct ll_conn *ll_conn_acquire(void)
{
return mem_acquire(&conn_free);
}
void ll_conn_release(struct ll_conn *conn)
{
mem_release(conn, &conn_free);
}
u16_t ll_conn_handle_get(struct ll_conn *conn)
{
return mem_index_get(conn, conn_pool, sizeof(struct ll_conn));
}
struct ll_conn *ll_conn_get(u16_t handle)
{
return mem_get(conn_pool, sizeof(struct ll_conn), handle);
}
struct ll_conn *ll_connected_get(u16_t handle)
{
struct ll_conn *conn;
if (handle >= CONFIG_BT_MAX_CONN) {
return NULL;
}
conn = ll_conn_get(handle);
if (conn->lll.handle != handle) {
return NULL;
}
return conn;
}
u8_t ull_conn_allowed_check(void *conn)
{
struct ll_conn * const conn_hdr = conn;
if (conn_hdr->llcp_req != conn_hdr->llcp_ack) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
conn_hdr->llcp_req++;
if (((conn_hdr->llcp_req - conn_hdr->llcp_ack) & 0x03) != 1U) {
conn_hdr->llcp_req--;
return BT_HCI_ERR_CMD_DISALLOWED;
}
return 0;
}
void *ll_tx_mem_acquire(void)
{
return mem_acquire(&mem_conn_tx.free);
}
void ll_tx_mem_release(void *tx)
{
mem_release(tx, &mem_conn_tx.free);
}
int ll_tx_mem_enqueue(u16_t handle, void *tx)
{
struct lll_tx *lll_tx;
struct ll_conn *conn;
u8_t idx;
conn = ll_connected_get(handle);
if (!conn) {
return -EINVAL;
}
idx = MFIFO_ENQUEUE_GET(conn_tx, (void **) &lll_tx);
if (!lll_tx) {
return -ENOBUFS;
}
lll_tx->handle = handle;
lll_tx->node = tx;
MFIFO_ENQUEUE(conn_tx, idx);
return 0;
}
u8_t ll_conn_update(u16_t handle, u8_t cmd, u8_t status, u16_t interval_min,
u16_t interval_max, u16_t latency, u16_t timeout)
{
struct ll_conn *conn;
u8_t ret;
conn = ll_connected_get(handle);
if (!conn) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
if (!cmd) {
#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
if (!conn->llcp_conn_param.disabled &&
(!conn->common.fex_valid ||
(conn->llcp_features &
BIT(BT_LE_FEAT_BIT_CONN_PARAM_REQ)))) {
cmd++;
} else if (conn->lll.role) {
return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
}
#else /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
if (conn->lll.role) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
#endif /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
}
if (!cmd) {
ret = ull_conn_allowed_check(conn);
if (ret) {
return ret;
}
conn->llcp.conn_upd.win_size = 1U;
conn->llcp.conn_upd.win_offset_us = 0U;
conn->llcp.conn_upd.interval = interval_max;
conn->llcp.conn_upd.latency = latency;
conn->llcp.conn_upd.timeout = timeout;
/* conn->llcp.conn_upd.instant = 0; */
conn->llcp.conn_upd.state = LLCP_CUI_STATE_USE;
conn->llcp.conn_upd.is_internal = 0U;
conn->llcp_type = LLCP_CONN_UPD;
conn->llcp_req++;
} else {
#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
cmd--;
if (cmd) {
if ((conn->llcp_conn_param.req ==
conn->llcp_conn_param.ack) ||
(conn->llcp_conn_param.state !=
LLCP_CPR_STATE_APP_WAIT)) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
conn->llcp_conn_param.status = status;
conn->llcp_conn_param.state = cmd;
conn->llcp_conn_param.cmd = 1U;
} else {
if (conn->llcp_conn_param.req !=
conn->llcp_conn_param.ack) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
conn->llcp_conn_param.status = 0U;
conn->llcp_conn_param.interval_min = interval_min;
conn->llcp_conn_param.interval_max = interval_max;
conn->llcp_conn_param.latency = latency;
conn->llcp_conn_param.timeout = timeout;
conn->llcp_conn_param.state = cmd;
conn->llcp_conn_param.cmd = 1U;
conn->llcp_conn_param.req++;
}
#else /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
/* CPR feature not supported */
return BT_HCI_ERR_CMD_DISALLOWED;
#endif /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
}
return 0;
}
u8_t ll_chm_get(u16_t handle, u8_t *chm)
{
struct ll_conn *conn;
conn = ll_connected_get(handle);
if (!conn) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
/* Iterate until we are sure the ISR did not modify the value while
* we were reading it from memory.
*/
do {
conn->chm_updated = 0U;
memcpy(chm, conn->lll.data_chan_map,
sizeof(conn->lll.data_chan_map));
} while (conn->chm_updated);
return 0;
}
u8_t ll_terminate_ind_send(u16_t handle, u8_t reason)
{
struct ll_conn *conn;
conn = ll_connected_get(handle);
if (!conn) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
conn->llcp_terminate.reason_own = reason;
conn->llcp_terminate.req++;
return 0;
}
u8_t ll_feature_req_send(u16_t handle)
{
struct ll_conn *conn;
u8_t ret;
conn = ll_connected_get(handle);
if (!conn) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
ret = ull_conn_allowed_check(conn);
if (ret) {
return ret;
}
conn->llcp_type = LLCP_FEATURE_EXCHANGE;
conn->llcp_req++;
return 0;
}
u8_t ll_version_ind_send(u16_t handle)
{
struct ll_conn *conn;
u8_t ret;
conn = ll_connected_get(handle);
if (!conn) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
ret = ull_conn_allowed_check(conn);
if (ret) {
return ret;
}
conn->llcp_type = LLCP_VERSION_EXCHANGE;
conn->llcp_req++;
return 0;
}
#if defined(CONFIG_BT_CTLR_PHY)
u8_t ll_phy_get(u16_t handle, u8_t *tx, u8_t *rx)
{
struct ll_conn *conn;
conn = ll_connected_get(handle);
if (!conn) {
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
/* TODO: context safe read */
*tx = conn->lll.phy_tx;
*rx = conn->lll.phy_rx;
return 0;
}
u8_t ll_phy_default_set(u8_t tx, u8_t rx)
{
/* TODO: validate against supported phy */
default_phy_tx = tx;
default_phy_rx = rx;
return 0;
}
u8_t ll_phy_req_send(u16_t handle, u8_t tx, u8_t flags, u8_t rx)
{
struct ll_conn *conn;
conn = ll_connected_get(handle);
if (!conn) {
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
if ((conn->llcp_req != conn->llcp_ack) ||
(conn->llcp_phy.req != conn->llcp_phy.ack)) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
conn->llcp_phy.state = LLCP_PHY_STATE_REQ;
conn->llcp_phy.cmd = 1U;
conn->llcp_phy.tx = tx;
conn->llcp_phy.flags = flags;
conn->llcp_phy.rx = rx;
conn->llcp_phy.req++;
return 0;
}
#endif /* CONFIG_BT_CTLR_PHY */
#if defined(CONFIG_BT_CTLR_CONN_RSSI)
u8_t ll_rssi_get(u16_t handle, u8_t *rssi)
{
struct ll_conn *conn;
conn = ll_connected_get(handle);
if (!conn) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
*rssi = conn->lll.rssi_latest;
return 0;
}
#endif /* CONFIG_BT_CTLR_CONN_RSSI */
#if defined(CONFIG_BT_CTLR_LE_PING)
u8_t ll_apto_get(u16_t handle, u16_t *apto)
{
struct ll_conn *conn;
conn = ll_connected_get(handle);
if (!conn) {
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
*apto = conn->apto_reload * conn->lll.interval * 125U / 1000;
return 0;
}
u8_t ll_apto_set(u16_t handle, u16_t apto)
{
struct ll_conn *conn;
conn = ll_connected_get(handle);
if (!conn) {
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
conn->apto_reload = RADIO_CONN_EVENTS(apto * 10U * 1000U,
conn->lll.interval * 1250);
return 0;
}
#endif /* CONFIG_BT_CTLR_LE_PING */
int ull_conn_init(void)
{
int err;
entropy = device_get_binding(CONFIG_ENTROPY_NAME);
if (!entropy) {
return -ENODEV;
}
err = init_reset();
if (err) {
return err;
}
return 0;
}
int ull_conn_reset(void)
{
u16_t handle;
int err;
for (handle = 0U; handle < CONFIG_BT_MAX_CONN; handle++) {
disable(handle);
}
/* initialise connection channel map */
data_chan_map[0] = 0xFF;
data_chan_map[1] = 0xFF;
data_chan_map[2] = 0xFF;
data_chan_map[3] = 0xFF;
data_chan_map[4] = 0x1F;
data_chan_count = 37U;
/* Re-initialize the Tx mfifo */
MFIFO_INIT(conn_tx);
/* Reset the current conn update conn context pointer */
conn_upd_curr = NULL;
err = init_reset();
if (err) {
return err;
}
return 0;
}
u8_t ull_conn_chan_map_cpy(u8_t *chan_map)
{
memcpy(chan_map, data_chan_map, sizeof(data_chan_map));
return data_chan_count;
}
void ull_conn_chan_map_set(u8_t *chan_map)
{
memcpy(data_chan_map, chan_map, sizeof(data_chan_map));
data_chan_count = util_ones_count_get(data_chan_map,
sizeof(data_chan_map));
}
#if defined(CONFIG_BT_CTLR_PHY)
u8_t ull_conn_default_phy_tx_get(void)
{
return default_phy_tx;
}
u8_t ull_conn_default_phy_rx_get(void)
{
return default_phy_rx;
}
#endif /* CONFIG_BT_CTLR_PHY */
void ull_conn_setup(memq_link_t *link, struct node_rx_hdr *rx)
{
struct node_rx_ftr *ftr;
struct lll_conn *lll;
ftr = (void *)((u8_t *)((struct node_rx_pdu *)rx)->pdu +
(offsetof(struct pdu_adv, connect_ind) +
sizeof(struct pdu_adv_connect_ind)));
lll = *((struct lll_conn **)((u8_t *)ftr->param +
sizeof(struct lll_hdr)));
switch (lll->role) {
#if defined(CONFIG_BT_CENTRAL)
case 0:
ull_master_setup(link, rx, ftr, lll);
break;
#endif /* CONFIG_BT_CENTRAL */
#if defined(CONFIG_BT_PERIPHERAL)
case 1:
ull_slave_setup(link, rx, ftr, lll);
break;
#endif /* CONFIG_BT_PERIPHERAL */
default:
LL_ASSERT(0);
break;
}
}
int ull_conn_rx(memq_link_t *link, struct node_rx_pdu **rx)
{
struct pdu_data *pdu_rx;
struct ll_conn *conn;
conn = ll_conn_get((*rx)->hdr.handle);
LL_ASSERT(conn);
pdu_rx = (void *)(*rx)->pdu;
switch (pdu_rx->ll_id) {
case PDU_DATA_LLID_CTRL:
{
int nack;
nack = ctrl_rx(link, rx, pdu_rx, conn);
return nack;
}
case PDU_DATA_LLID_DATA_CONTINUE:
case PDU_DATA_LLID_DATA_START:
#if defined(CONFIG_BT_CTLR_LE_ENC)
if (conn->pause_rx) {
conn->llcp_terminate.reason_peer =
BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL;
/* Mark for buffer for release */
(*rx)->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE;
}
#endif /* CONFIG_BT_CTLR_LE_ENC */
break;
case PDU_DATA_LLID_RESV:
default:
#if defined(CONFIG_BT_CTLR_LE_ENC)
if (conn->pause_rx) {
conn->llcp_terminate.reason_peer =
BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL;
}
#endif /* CONFIG_BT_CTLR_LE_ENC */
/* Invalid LL id, drop it. */
/* Mark for buffer for release */
(*rx)->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE;
break;
}
return 0;
}
int ull_conn_llcp(struct ll_conn *conn, u32_t ticks_at_expire, u16_t lazy)
{
LL_ASSERT(conn->lll.handle != 0xFFFF);
#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) || defined(CONFIG_BT_CTLR_PHY)
/* Check if no other procedure with instant is requested and not in
* Encryption setup.
*/
if ((conn->llcp_ack == conn->llcp_req) &&
#if defined(CONFIG_BT_CTLR_LE_ENC)
!conn->pause_rx) {
#else /* !CONFIG_BT_CTLR_LE_ENC */
1) {
#endif /* !CONFIG_BT_CTLR_LE_ENC */
if (0) {
#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
/* check if CPR procedure is requested */
} else if (conn->llcp_conn_param.ack !=
conn->llcp_conn_param.req) {
struct lll_conn *lll = &conn->lll;
u16_t event_counter;
/* Calculate current event counter */
event_counter = lll->event_counter +
lll->latency_prepare + lazy;
/* handle CPR state machine */
event_conn_param_prep(conn, event_counter,
ticks_at_expire);
#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
#if defined(CONFIG_BT_CTLR_PHY)
/* check if PHY Req procedure is requested */
} else if (conn->llcp_phy.ack != conn->llcp_phy.req) {
/* handle PHY Upd state machine */
event_phy_req_prep(conn);
#endif /* CONFIG_BT_CTLR_PHY */
}
}
#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ || CONFIG_BT_CTLR_PHY */
/* check if procedure is requested */
if (((conn->llcp_req - conn->llcp_ack) & 0x03) == 0x02) {
switch (conn->llcp_type) {
case LLCP_CONN_UPD:
{
struct lll_conn *lll = &conn->lll;
u16_t event_counter;
/* Calculate current event counter */
event_counter = lll->event_counter +
lll->latency_prepare + lazy;
if (event_conn_upd_prep(conn, event_counter,
ticks_at_expire) == 0) {
return -ECANCELED;
}
}
break;
case LLCP_CHAN_MAP:
{
struct lll_conn *lll = &conn->lll;
u16_t event_counter;
/* Calculate current event counter */
event_counter = lll->event_counter +
lll->latency_prepare + lazy;
event_ch_map_prep(conn, event_counter);
}
break;
#if defined(CONFIG_BT_CTLR_LE_ENC)
case LLCP_ENCRYPTION:
event_enc_prep(conn);
break;
#endif /* CONFIG_BT_CTLR_LE_ENC */
case LLCP_FEATURE_EXCHANGE:
event_fex_prep(conn);
break;
case LLCP_VERSION_EXCHANGE:
event_vex_prep(conn);
break;
#if defined(CONFIG_BT_CTLR_LE_PING)
case LLCP_PING:
event_ping_prep(conn);
break;
#endif /* CONFIG_BT_CTLR_LE_PING */
#if defined(CONFIG_BT_CTLR_PHY)
case LLCP_PHY_UPD:
{
struct lll_conn *lll = &conn->lll;
u16_t event_counter;
/* Calculate current event counter */
event_counter = lll->event_counter +
lll->latency_prepare + lazy;
event_phy_upd_ind_prep(conn, event_counter);
}
break;
#endif /* CONFIG_BT_CTLR_PHY */
default:
LL_ASSERT(0);
break;
}
}
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
/* check if procedure is requested */
if (conn->llcp_length.ack != conn->llcp_length.req) {
/* handle DLU state machine */
event_len_prep(conn);
}
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
/* Terminate Procedure Request */
if (conn->llcp_terminate.ack != conn->llcp_terminate.req) {
struct node_tx *tx;
tx = mem_acquire(&mem_conn_tx_ctrl.free);
if (tx) {
struct pdu_data *pdu_tx = (void *)tx->pdu;
/* Terminate Procedure acked */
conn->llcp_terminate.ack = conn->llcp_terminate.req;
/* place the terminate ind packet in tx queue */
pdu_tx->ll_id = PDU_DATA_LLID_CTRL;
pdu_tx->len = offsetof(struct pdu_data_llctrl,
terminate_ind) +
sizeof(struct pdu_data_llctrl_terminate_ind);
pdu_tx->llctrl.opcode =
PDU_DATA_LLCTRL_TYPE_TERMINATE_IND;
pdu_tx->llctrl.terminate_ind.error_code =
conn->llcp_terminate.reason_own;
ctrl_tx_enqueue(conn, tx);
}
if (!conn->procedure_expire) {
/* Terminate Procedure timeout is started, will
* replace any other timeout running
*/
conn->procedure_expire = conn->supervision_reload;
/* NOTE: if supervision timeout equals connection
* interval, dont timeout in current event.
*/
if (conn->procedure_expire <= 1U) {
conn->procedure_expire++;
}
}
}
return 0;
}
void ull_conn_done(struct node_rx_event_done *done)
{
struct lll_conn *lll = (void *)HDR_ULL2LLL(done->param);
struct ll_conn *conn = (void *)HDR_LLL2EVT(lll);
u32_t ticks_drift_minus;
u32_t ticks_drift_plus;
u16_t latency_event;
u16_t elapsed_event;
u8_t reason_peer;
u16_t lazy;
u8_t force;
/* Skip if connection terminated by local host */
if (lll->handle == 0xFFFF) {
return;
}
#if defined(CONFIG_BT_CTLR_LE_ENC)
/* Check authenticated payload expiry or MIC failure */
switch (done->extra.mic_state) {
case LLL_CONN_MIC_NONE:
#if defined(CONFIG_BT_CTLR_LE_PING)
if (lll->enc_rx || conn->pause_rx) {
u16_t appto_reload_new;
/* check for change in apto */
appto_reload_new = (conn->apto_reload >
(lll->latency + 6)) ?
(conn->apto_reload -
(lll->latency + 6)) :
conn->apto_reload;
if (conn->appto_reload != appto_reload_new) {
conn->appto_reload = appto_reload_new;
conn->apto_expire = 0U;
}
/* start authenticated payload (pre) timeout */
if (conn->apto_expire == 0U) {
conn->appto_expire = conn->appto_reload;
conn->apto_expire = conn->apto_reload;
}
}
#endif /* CONFIG_BT_CTLR_LE_PING */
break;
case LLL_CONN_MIC_PASS:
#if defined(CONFIG_BT_CTLR_LE_PING)
conn->appto_expire = conn->apto_expire = 0U;
#endif /* CONFIG_BT_CTLR_LE_PING */
break;
case LLL_CONN_MIC_FAIL:
conn->llcp_terminate.reason_peer =
BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL;
break;
}
#endif /* CONFIG_BT_CTLR_LE_ENC */
/* Master transmitted ack for the received terminate ind or
* Slave received terminate ind or MIC failure
*/
reason_peer = conn->llcp_terminate.reason_peer;
if (reason_peer && (lll->role || lll->master.terminate_ack)) {
terminate_ind_rx_enqueue(conn, reason_peer);
conn_cleanup(conn);
return;
}
/* Slave drift compensation calc or master terminate acked */
ticks_drift_plus = 0U;
ticks_drift_minus = 0U;
if (done->extra.trx_cnt) {
if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && lll->role) {
ull_slave_done(done, &ticks_drift_plus,
&ticks_drift_minus);
} else if (reason_peer) {
lll->master.terminate_ack = 1;
}
/* Reset connection failed to establish countdown */
conn->connect_expire = 0U;
}
/* Events elapsed used in timeout checks below */
latency_event = lll->latency_event;
elapsed_event = latency_event + 1;
/* Reset supervision countdown */
if (done->extra.crc_valid) {
conn->supervision_expire = 0U;
}
/* check connection failed to establish */
else if (conn->connect_expire) {
if (conn->connect_expire > elapsed_event) {
conn->connect_expire -= elapsed_event;
} else {
terminate_ind_rx_enqueue(conn, 0x3e);
conn_cleanup(conn);
return;
}
}
/* if anchor point not sync-ed, start supervision timeout, and break
* latency if any.
*/
else {
/* Start supervision timeout, if not started already */
if (!conn->supervision_expire) {
conn->supervision_expire = conn->supervision_reload;
}
}
/* check supervision timeout */
force = 0U;
if (conn->supervision_expire) {
if (conn->supervision_expire > elapsed_event) {
conn->supervision_expire -= elapsed_event;
/* break latency */
lll->latency_event = 0;
/* Force both master and slave when close to
* supervision timeout.
*/
if (conn->supervision_expire <= 6U) {
force = 1U;
}
/* use randomness to force slave role when anchor
* points are being missed.
*/
else if (lll->role) {
if (latency_event) {
force = 1U;
} else {
/* FIXME:*/
#if 0
force = lll->slave.force & 0x01;
/* rotate force bits */
lll->slave.force >>= 1;
if (force) {
lll->slave.force |= BIT(31);
}
#endif
}
}
} else {
terminate_ind_rx_enqueue(conn, 0x08);
conn_cleanup(conn);
return;
}
}
/* check procedure timeout */
if (conn->procedure_expire != 0U) {
if (conn->procedure_expire > elapsed_event) {
conn->procedure_expire -= elapsed_event;
} else {
terminate_ind_rx_enqueue(conn, 0x22);
conn_cleanup(conn);
return;
}
}
#if defined(CONFIG_BT_CTLR_LE_PING)
/* check apto */
if (conn->apto_expire != 0U) {
if (conn->apto_expire > elapsed_event) {
conn->apto_expire -= elapsed_event;
} else {
struct node_rx_hdr *rx;
rx = ll_pdu_rx_alloc();
if (rx) {
conn->apto_expire = 0U;
rx->handle = lll->handle;
rx->type = NODE_RX_TYPE_APTO;
/* enqueue apto event into rx queue */
ll_rx_put(rx->link, rx);
ll_rx_sched();
} else {
conn->apto_expire = 1U;
}
}
}
/* check appto */
if (conn->appto_expire != 0U) {
if (conn->appto_expire > elapsed_event) {
conn->appto_expire -= elapsed_event;
} else {
conn->appto_expire = 0U;
if ((conn->procedure_expire == 0U) &&
(conn->llcp_req == conn->llcp_ack)) {
conn->llcp_type = LLCP_PING;
conn->llcp_ack -= 2U;
}
}
}
#endif /* CONFIG_BT_CTLR_LE_PING */
#if defined(CONFIG_BT_CTLR_CONN_RSSI)
/* generate RSSI event */
if (lll->rssi_sample_count == 0) {
struct node_rx_pdu *rx;
struct pdu_data *pdu_data_rx;
rx = ll_pdu_rx_alloc();
if (rx) {
lll->rssi_reported = lll->rssi_latest;
lll->rssi_sample_count = LLL_CONN_RSSI_SAMPLE_COUNT;
/* Prepare the rx packet structure */
rx->hdr.handle = lll->handle;
rx->hdr.type = NODE_RX_TYPE_RSSI;
/* prepare connection RSSI structure */
pdu_data_rx = (void *)rx->pdu;
pdu_data_rx->rssi = lll->rssi_reported;
/* enqueue connection RSSI structure into queue */
ll_rx_put(rx->hdr.link, rx);
ll_rx_sched();
}
}
#endif /* CONFIG_BT_CTLR_CONN_RSSI */
/* break latency based on ctrl procedure pending */
if ((((conn->llcp_req - conn->llcp_ack) & 0x03) == 0x02) &&
((conn->llcp_type == LLCP_CONN_UPD) ||
(conn->llcp_type == LLCP_CHAN_MAP))) {
lll->latency_event = 0;
}
/* check if latency needs update */
lazy = 0U;
if ((force) || (latency_event != lll->latency_event)) {
lazy = lll->latency_event + 1;
}
/* update conn ticker */
if ((ticks_drift_plus != 0U) || (ticks_drift_minus != 0U) ||
(lazy != 0U) || (force != 0U)) {
u8_t ticker_id = TICKER_ID_CONN_BASE + lll->handle;
struct ll_conn *conn = lll->hdr.parent;
u32_t ticker_status;
/* Call to ticker_update can fail under the race
* condition where in the Slave role is being stopped but
* at the same time it is preempted by Slave event that
* gets into close state. Accept failure when Slave role
* is being stopped.
*/
ticker_status = ticker_update(TICKER_INSTANCE_ID_CTLR,
TICKER_USER_ID_ULL_HIGH,
ticker_id,
ticks_drift_plus,
ticks_drift_minus, 0, 0,
lazy, force,
ticker_op_update_cb,
conn);
LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
(ticker_status == TICKER_STATUS_BUSY) ||
((void *)conn == ull_disable_mark_get()));
}
}
void ull_conn_tx_demux(u8_t count)
{
do {
struct ll_conn *conn;
struct lll_tx *lll_tx;
lll_tx = MFIFO_DEQUEUE_GET(conn_tx);
if (!lll_tx) {
break;
}
conn = ll_conn_get(lll_tx->handle);
if (conn->lll.handle == lll_tx->handle) {
struct node_tx *tx = lll_tx->node;
tx->next = NULL;
if (!conn->tx_data) {
conn->tx_data = tx;
if (!conn->tx_head) {
conn->tx_head = tx;
conn->tx_data_last = NULL;
}
}
if (conn->tx_data_last) {
conn->tx_data_last->next = tx;
}
conn->tx_data_last = tx;
} else {
struct node_tx *tx = lll_tx->node;
struct pdu_data *p = (void *)tx->pdu;
p->ll_id = PDU_DATA_LLID_RESV;
ll_tx_ack_put(0xFFFF, tx);
}
MFIFO_DEQUEUE(conn_tx);
} while (--count);
}
void ull_conn_tx_lll_enqueue(struct ll_conn *conn, u8_t count)
{
struct node_tx *tx;
tx = conn->tx_head;
#if defined(CONFIG_BT_CTLR_LE_ENC)
while (tx && (!conn->pause_tx || (tx == conn->tx_ctrl)) && count--) {
#else /* !CONFIG_BT_CTLR_LE_ENC */
while (tx && count--) {
#endif /* !CONFIG_BT_CTLR_LE_ENC */
struct node_tx *tx_lll;
memq_link_t *link;
tx_lll = tx;
if (tx == conn->tx_ctrl) {
tx = conn->tx_head = conn->tx_head->next;
if (conn->tx_ctrl == conn->tx_ctrl_last) {
conn->tx_ctrl = NULL;
conn->tx_ctrl_last = NULL;
} else {
conn->tx_ctrl = tx;
}
/* point to self to indicate a control PDU mem alloc */
tx_lll->next = tx_lll;
} else {
if (tx == conn->tx_data) {
conn->tx_data = conn->tx_data->next;
}
tx = conn->tx_head = conn->tx_head->next;
}
link = mem_acquire(&mem_link_tx.free);
LL_ASSERT(link);
memq_enqueue(link, tx_lll, &conn->lll.memq_tx.tail);
}
}
void ull_conn_link_tx_release(void *link)
{
mem_release(link, &mem_link_tx.free);
}
void ull_conn_tx_ack(struct ll_conn *conn, memq_link_t *link,
struct node_tx *tx)
{
struct pdu_data *pdu_tx;
pdu_tx = (void *)tx->pdu;
LL_ASSERT(pdu_tx->len);
if (pdu_tx->ll_id == PDU_DATA_LLID_CTRL) {
ctrl_tx_ack(conn, &tx, pdu_tx);
/* release mem if points to itself */
if (link->next == (void *)tx) {
mem_release(tx, &mem_conn_tx_ctrl.free);
return;
} else if (!tx) {
return;
}
}
ll_tx_ack_put(conn->lll.handle, tx);
}
static int init_reset(void)
{
/* Initialize conn pool. */
mem_init(conn_pool, sizeof(struct ll_conn),
sizeof(conn_pool) / sizeof(struct ll_conn), &conn_free);
/* Initialize tx pool. */
mem_init(mem_conn_tx.pool, CONN_TX_BUF_SIZE, CONFIG_BT_CTLR_TX_BUFFERS,
&mem_conn_tx.free);
/* Initialize tx ctrl pool. */
mem_init(mem_conn_tx_ctrl.pool, CONN_TX_CTRL_BUF_SIZE,
CONN_TX_CTRL_BUFFERS, &mem_conn_tx_ctrl.free);
/* Initialize tx link pool. */
mem_init(mem_link_tx.pool, sizeof(memq_link_t),
CONFIG_BT_CTLR_TX_BUFFERS + CONN_TX_CTRL_BUFFERS,
&mem_link_tx.free);
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
/* Initialize the DLE defaults */
_radio.default_tx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
_radio.default_tx_time = RADIO_PKT_TIME(PDU_DC_PAYLOAD_SIZE_MIN, 0);
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
#if defined(CONFIG_BT_CTLR_PHY)
/* Initialize the PHY defaults */
default_phy_tx = BIT(0);
default_phy_rx = BIT(0);
#if defined(CONFIG_BT_CTLR_PHY_2M)
default_phy_tx |= BIT(1);
default_phy_rx |= BIT(1);
#endif /* CONFIG_BT_CTLR_PHY_2M */
#if defined(CONFIG_BT_CTLR_PHY_CODED)
default_phy_tx |= BIT(2);
default_phy_rx |= BIT(2);
#endif /* CONFIG_BT_CTLR_PHY_CODED */
#endif /* CONFIG_BT_CTLR_PHY */
return 0;
}
static void ticker_op_update_cb(u32_t status, void *param)
{
LL_ASSERT(status == TICKER_STATUS_SUCCESS ||
param == ull_disable_mark_get());
}
static void ticker_op_stop_cb(u32_t status, void *param)
{
u32_t retval;
static memq_link_t link;
static struct mayfly mfy = {0, 0, &link, NULL, lll_conn_tx_flush};
LL_ASSERT(status == TICKER_STATUS_SUCCESS);
mfy.param = param;
/* Flush pending tx PDUs in LLL (using a mayfly) */
retval = mayfly_enqueue(TICKER_USER_ID_ULL_LOW, TICKER_USER_ID_LLL, 1,
&mfy);
LL_ASSERT(!retval);
}
static inline void disable(u16_t handle)
{
volatile u32_t ret_cb = TICKER_STATUS_BUSY;
struct ll_conn *conn;
void *mark;
u32_t ret;
conn = ll_conn_get(handle);
mark = ull_disable_mark(conn);
LL_ASSERT(mark == conn);
ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
TICKER_ID_CONN_BASE + handle,
ull_ticker_status_give, (void *)&ret_cb);
ret = ull_ticker_status_take(ret, &ret_cb);
if (!ret) {
ret = ull_disable(&conn->lll);
LL_ASSERT(!ret);
}
conn->lll.link_tx_free = NULL;
mark = ull_disable_unmark(conn);
LL_ASSERT(mark == conn);
}
static void conn_cleanup(struct ll_conn *conn)
{
struct lll_conn *lll = &conn->lll;
struct node_rx_pdu *rx;
u32_t ticker_status;
/* release any llcp reserved rx node */
rx = conn->llcp_rx;
if (rx) {
/* Mark for buffer for release */
rx->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE;
/* enqueue rx node towards Thread */
ll_rx_put(rx->hdr.link, rx);
}
/* Enable Ticker Job, we are in a radio event which disabled it if
* worker0 and job0 priority where same.
*/
mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1);
/* Stop Master or Slave role ticker */
ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR,
TICKER_USER_ID_ULL_HIGH,
TICKER_ID_CONN_BASE + lll->handle,
ticker_op_stop_cb, (void *)lll);
LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
(ticker_status == TICKER_STATUS_BUSY));
/* Invalidate the connection context */
lll->handle = 0xFFFF;
}
static void ctrl_tx_data_last_enqueue(struct ll_conn *conn,
struct node_tx *tx)
{
tx->next = conn->tx_ctrl_last->next;
conn->tx_ctrl_last->next = tx;
conn->tx_ctrl_last = tx;
}
static void ctrl_tx_enqueue(struct ll_conn *conn, struct node_tx *tx)
{
/* check if a packet was tx-ed and not acked by peer */
if (
/* data/ctrl packet is in the head */
conn->tx_head &&
#if defined(CONFIG_BT_CTLR_LE_ENC)
/* data PDU tx is not paused */
!conn->pause_tx) {
#else /* !CONFIG_BT_CTLR_LE_ENC */
1) {
#endif /* !CONFIG_BT_CTLR_LE_ENC */
/* data or ctrl may have been transmitted once, but not acked
* by peer, hence place this new ctrl after head
*/
/* if data transmited once, keep it at head of the tx list,
* as we will insert a ctrl after it, hence advance the
* data pointer
*/
if (conn->tx_head == conn->tx_data) {
conn->tx_data = conn->tx_data->next;
}
/* if no ctrl packet already queued, new ctrl added will be
* the ctrl pointer and is inserted after head.
*/
if (!conn->tx_ctrl) {
tx->next = conn->tx_head->next;
conn->tx_head->next = tx;
conn->tx_ctrl = tx;
conn->tx_ctrl_last = tx;
} else {
ctrl_tx_data_last_enqueue(conn, tx);
}
} else {
/* No packet needing ACK. */
/* If first ctrl packet then add it as head else add it to the
* tail of the ctrl packets.
*/
if (!conn->tx_ctrl) {
tx->next = conn->tx_head;
conn->tx_head = tx;
conn->tx_ctrl = tx;
conn->tx_ctrl_last = tx;
} else {
ctrl_tx_data_last_enqueue(conn, tx);
}
}
/* Update last pointer if ctrl added at end of tx list */
if (tx->next == 0) {
conn->tx_data_last = tx;
}
}
static void ctrl_tx_sec_enqueue(struct ll_conn *conn, struct node_tx *tx)
{
#if defined(CONFIG_BT_CTLR_LE_ENC)
if (conn->pause_tx) {
if (!conn->tx_ctrl) {
tx->next = conn->tx_head;
conn->tx_head = tx;
} else {
tx->next = conn->tx_ctrl_last->next;
conn->tx_ctrl_last->next = tx;
}
} else
#endif /* CONFIG_BT_CTLR_LE_ENC */
{
ctrl_tx_enqueue(conn, tx);
}
}
static inline void event_conn_upd_init(struct ll_conn *conn,
u16_t event_counter,
u32_t ticks_at_expire,
struct pdu_data *pdu_ctrl_tx,
struct mayfly *mfy_sched_offset,
void (*fp_mfy_select_or_use)(void *))
{
/* move to in progress */
conn->llcp.conn_upd.state = LLCP_CUI_STATE_INPROG;
/* set instant */
conn->llcp.conn_upd.instant = event_counter + conn->lll.latency + 6;
/* place the conn update req packet as next in tx queue */
pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL;
pdu_ctrl_tx->len = offsetof(struct pdu_data_llctrl, conn_update_ind) +
sizeof(struct pdu_data_llctrl_conn_update_ind);
pdu_ctrl_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_CONN_UPDATE_IND;
pdu_ctrl_tx->llctrl.conn_update_ind.win_size =
conn->llcp.conn_upd.win_size;
pdu_ctrl_tx->llctrl.conn_update_ind.win_offset =
sys_cpu_to_le16(conn->llcp.conn_upd.win_offset_us / 1250U);
pdu_ctrl_tx->llctrl.conn_update_ind.interval =
sys_cpu_to_le16(conn->llcp.conn_upd.interval);
pdu_ctrl_tx->llctrl.conn_update_ind.latency =
sys_cpu_to_le16(conn->llcp.conn_upd.latency);
pdu_ctrl_tx->llctrl.conn_update_ind.timeout =
sys_cpu_to_le16(conn->llcp.conn_upd.timeout);
pdu_ctrl_tx->llctrl.conn_update_ind.instant =
sys_cpu_to_le16(conn->llcp.conn_upd.instant);
#if defined(CONFIG_BT_CTLR_SCHED_ADVANCED)
{
u32_t retval;
/* calculate window offset that places the connection in the
* next available slot after existing masters.
*/
conn->llcp.conn_upd.ticks_anchor = ticks_at_expire;
#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
if (conn->evt.ticks_xtal_to_start & XON_BITMASK) {
u32_t ticks_prepare_to_start =
MAX(conn->evt.ticks_active_to_start,
conn->evt.ticks_preempt_to_start);
conn->llcp.conn_upd.ticks_anchor -=
(conn->evt.ticks_xtal_to_start &
~XON_BITMASK) - ticks_prepare_to_start;
}
#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
conn->llcp.conn_upd.pdu_win_offset = (u16_t *)
&pdu_ctrl_tx->llctrl.conn_update_ind.win_offset;
mfy_sched_offset->fp = fp_mfy_select_or_use;
mfy_sched_offset->param = (void *)conn;
retval = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
TICKER_USER_ID_ULL_LOW, 1,
mfy_sched_offset);
LL_ASSERT(!retval);
}
#else /* !CONFIG_BT_CTLR_SCHED_ADVANCED */
ARG_UNUSED(ticks_at_expire);
ARG_UNUSED(mfy_sched_offset);
ARG_UNUSED(fp_mfy_select_or_use);
#endif /* !CONFIG_BT_CTLR_SCHED_ADVANCED */
}
static inline int event_conn_upd_prep(struct ll_conn *conn,
u16_t event_counter,
u32_t ticks_at_expire)
{
struct ll_conn *conn_upd;
u16_t instant_latency;
conn_upd = conn_upd_curr;
/* set mutex */
if (!conn_upd) {
conn_upd_curr = conn;
}
instant_latency = (event_counter - conn->llcp.conn_upd.instant) &
0xffff;
if (conn->llcp.conn_upd.state != LLCP_CUI_STATE_INPROG) {
#if defined(CONFIG_BT_CTLR_SCHED_ADVANCED)
static memq_link_t s_link;
static struct mayfly s_mfy_sched_offset = {0, 0,
&s_link, 0, 0 };
void (*fp_mfy_select_or_use)(void *) = NULL;
#endif /* CONFIG_BT_CTLR_SCHED_ADVANCED */
struct pdu_data *pdu_ctrl_tx;
struct node_rx_pdu *rx;
struct node_tx *tx;
LL_ASSERT(!conn->llcp_rx);
rx = ll_pdu_rx_alloc_peek(1);
if (!rx) {
return -ENOBUFS;
}
tx = mem_acquire(&mem_conn_tx_ctrl.free);
if (!tx) {
return -ENOBUFS;
}
(void)ll_pdu_rx_alloc();
conn->llcp_rx = rx;
pdu_ctrl_tx = (void *)tx->pdu;
#if defined(CONFIG_BT_CTLR_SCHED_ADVANCED)
switch (conn->llcp.conn_upd.state) {
case LLCP_CUI_STATE_USE:
fp_mfy_select_or_use = ull_sched_mfy_win_offset_use;
break;
#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
case LLCP_CUI_STATE_SELECT:
fp_mfy_select_or_use = ull_sched_mfy_win_offset_select;
break;
#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
default:
LL_ASSERT(0);
break;
}
event_conn_upd_init(conn, event_counter, ticks_at_expire,
pdu_ctrl_tx, &s_mfy_sched_offset,
fp_mfy_select_or_use);
#else /* !CONFIG_BT_CTLR_SCHED_ADVANCED */
event_conn_upd_init(conn, event_counter, ticks_at_expire,
pdu_ctrl_tx, NULL, NULL);
#endif /* !CONFIG_BT_CTLR_SCHED_ADVANCED */
ctrl_tx_enqueue(conn, tx);
} else if (instant_latency <= 0x7FFF) {
u32_t mayfly_was_enabled;
u16_t conn_interval_old;
u16_t conn_interval_new;
u32_t ticks_slot_offset;
u32_t ticks_win_offset;
u32_t conn_interval_us;
struct node_rx_pdu *rx;
struct lll_conn *lll;
u8_t ticker_id_conn;
u32_t ticker_status;
u32_t periodic_us;
u16_t latency;
/* procedure request acked */
conn->llcp_ack = conn->llcp_req;
#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
if ((conn->llcp_conn_param.req != conn->llcp_conn_param.ack) &&
(conn->llcp_conn_param.state == LLCP_CPR_STATE_UPD)) {
conn->llcp_conn_param.ack = conn->llcp_conn_param.req;
/* Stop procedure timeout */
conn->procedure_expire = 0U;
}
#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
/* reset mutex */
if (conn_upd_curr == conn) {
conn_upd_curr = NULL;
}
lll = &conn->lll;
/* Acquire Rx node */
rx = conn->llcp_rx;
conn->llcp_rx = NULL;
LL_ASSERT(rx && rx->hdr.link);
/* Prepare the rx packet structure */
if ((conn->llcp.conn_upd.interval != lll->interval) ||
(conn->llcp.conn_upd.latency != lll->latency) ||
(RADIO_CONN_EVENTS(conn->llcp.conn_upd.timeout * 10000U,
lll->interval * 1250) !=
conn->supervision_reload)) {
struct node_rx_cu *cu;
rx->hdr.handle = lll->handle;
rx->hdr.type = NODE_RX_TYPE_CONN_UPDATE;
/* prepare connection update complete structure */
cu = (void *)rx->pdu;
cu->status = 0x00;
cu->interval = conn->llcp.conn_upd.interval;
cu->latency = conn->llcp.conn_upd.latency;
cu->timeout = conn->llcp.conn_upd.timeout;
} else {
/* Mark for buffer for release */
rx->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE;
}
/* enqueue rx node towards Thread */
ll_rx_put(rx->hdr.link, rx);
ll_rx_sched();
#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
/* restore to normal prepare */
if (conn->evt.ticks_xtal_to_start & XON_BITMASK) {
u32_t ticks_prepare_to_start =
MAX(conn->evt.ticks_active_to_start,
conn->evt.ticks_preempt_to_start);
conn->evt.ticks_xtal_to_start &= ~XON_BITMASK;
ticks_at_expire -= (conn->evt.ticks_xtal_to_start -
ticks_prepare_to_start);
}
#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
/* compensate for instant_latency due to laziness */
conn_interval_old = instant_latency * lll->interval;
latency = conn_interval_old /
conn->llcp.conn_upd.interval;
conn_interval_new = latency *
conn->llcp.conn_upd.interval;
if (conn_interval_new > conn_interval_old) {
ticks_at_expire += HAL_TICKER_US_TO_TICKS(
(conn_interval_new - conn_interval_old) * 1250U);
} else {
ticks_at_expire -= HAL_TICKER_US_TO_TICKS(
(conn_interval_old - conn_interval_new) * 1250U);
}
lll->latency_prepare -= (instant_latency - latency);
/* calculate the offset, window widening and interval */
ticks_slot_offset = MAX(conn->evt.ticks_active_to_start,
conn->evt.ticks_xtal_to_start);
conn_interval_us = conn->llcp.conn_upd.interval * 1250U;
periodic_us = conn_interval_us;
if (lll->role) {
lll->slave.window_widening_prepare_us -=
lll->slave.window_widening_periodic_us *
instant_latency;
lll->slave.window_widening_periodic_us =
(((lll_conn_ppm_local_get() +
lll_conn_ppm_get(lll->slave.sca)) *
conn_interval_us) + (1000000 - 1)) / 1000000U;
lll->slave.window_widening_max_us =
(conn_interval_us >> 1) - TIFS_US;
lll->slave.window_size_prepare_us =
conn->llcp.conn_upd.win_size * 1250U;
conn->slave.ticks_to_offset = 0U;
lll->slave.window_widening_prepare_us +=
lll->slave.window_widening_periodic_us *
latency;
if (lll->slave.window_widening_prepare_us >
lll->slave.window_widening_max_us) {
lll->slave.window_widening_prepare_us =
lll->slave.window_widening_max_us;
}
ticks_at_expire -= HAL_TICKER_US_TO_TICKS(
lll->slave.window_widening_periodic_us *
latency);
ticks_win_offset = HAL_TICKER_US_TO_TICKS(
(conn->llcp.conn_upd.win_offset_us / 1250U) *
1250U);
periodic_us -= lll->slave.window_widening_periodic_us;
} else {
ticks_win_offset = HAL_TICKER_US_TO_TICKS(
conn->llcp.conn_upd.win_offset_us);
/* Workaround: Due to the missing remainder param in
* ticker_start function for first interval; add a
* tick so as to use the ceiled value.
*/
ticks_win_offset += 1U;
}
lll->interval = conn->llcp.conn_upd.interval;
lll->latency = conn->llcp.conn_upd.latency;
conn->supervision_reload =
RADIO_CONN_EVENTS((conn->llcp.conn_upd.timeout
* 10U * 1000U), conn_interval_us);
conn->procedure_reload =
RADIO_CONN_EVENTS((40 * 1000 * 1000), conn_interval_us);
#if defined(CONFIG_BT_CTLR_LE_PING)
/* APTO in no. of connection events */
conn->apto_reload = RADIO_CONN_EVENTS((30 * 1000 * 1000),
conn_interval_us);
/* Dispatch LE Ping PDU 6 connection events (that peer would
* listen to) before 30s timeout
* TODO: "peer listens to" is greater than 30s due to latency
*/
conn->appto_reload = (conn->apto_reload > (lll->latency + 6)) ?
(conn->apto_reload - (lll->latency + 6)) :
conn->apto_reload;
#endif /* CONFIG_BT_CTLR_LE_PING */
if (!conn->llcp.conn_upd.is_internal) {
conn->supervision_expire = 0U;
}
/* disable ticker job, in order to chain stop and start
* to avoid RTC being stopped if no tickers active.
*/
mayfly_was_enabled = mayfly_is_enabled(TICKER_USER_ID_ULL_HIGH,
TICKER_USER_ID_ULL_LOW);
mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW,
0);
/* start slave/master with new timings */
ticker_id_conn = TICKER_ID_CONN_BASE + ll_conn_handle_get(conn);
ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR,
TICKER_USER_ID_ULL_HIGH,
ticker_id_conn, ticker_op_cb,
(void *)__LINE__);
LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
(ticker_status == TICKER_STATUS_BUSY));
ticker_status =
ticker_start(TICKER_INSTANCE_ID_CTLR,
TICKER_USER_ID_ULL_HIGH,
ticker_id_conn,
ticks_at_expire, ticks_win_offset,
HAL_TICKER_US_TO_TICKS(periodic_us),
HAL_TICKER_REMAINDER(periodic_us),
TICKER_NULL_LAZY,
(ticks_slot_offset + conn->evt.ticks_slot),
#if defined(CONFIG_BT_PERIPHERAL) && defined(CONFIG_BT_CENTRAL)
lll->role ? ull_slave_ticker_cb :
ull_master_ticker_cb,
#elif defined(CONFIG_BT_PERIPHERAL)
ull_slave_ticker_cb,
#else
ull_master_ticker_cb,
#endif
conn, ticker_op_cb, (void *)__LINE__);
LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
(ticker_status == TICKER_STATUS_BUSY));
/* enable ticker job, if disabled in this function */
if (mayfly_was_enabled) {
mayfly_enable(TICKER_USER_ID_ULL_HIGH,
TICKER_USER_ID_ULL_LOW, 1);
}
return 0;
}
return -EINPROGRESS;
}
static inline void event_ch_map_prep(struct ll_conn *conn,
u16_t event_counter)
{
if (conn->llcp.chan_map.initiate) {
struct node_tx *tx;
tx = mem_acquire(&mem_conn_tx_ctrl.free);
if (tx) {
struct pdu_data *pdu_ctrl_tx = (void *)tx->pdu;
/* reset initiate flag */
conn->llcp.chan_map.initiate = 0U;
/* set instant */
conn->llcp.chan_map.instant = event_counter +
conn->lll.latency + 6;
/* place the channel map req packet as next in
* tx queue
*/
pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL;
pdu_ctrl_tx->len = offsetof(struct pdu_data_llctrl,
chan_map_ind) +
sizeof(struct pdu_data_llctrl_chan_map_ind);
pdu_ctrl_tx->llctrl.opcode =
PDU_DATA_LLCTRL_TYPE_CHAN_MAP_IND;
memcpy(&pdu_ctrl_tx->llctrl.chan_map_ind.chm[0],
&conn->llcp.chan_map.chm[0],
sizeof(pdu_ctrl_tx->llctrl.chan_map_ind.chm));
pdu_ctrl_tx->llctrl.chan_map_ind.instant =
sys_cpu_to_le16(conn->llcp.chan_map.instant);
ctrl_tx_enqueue(conn, tx);
}
} else if (((event_counter - conn->llcp.chan_map.instant) & 0xFFFF)
<= 0x7FFF) {
struct lll_conn *lll = &conn->lll;
/* procedure request acked */
conn->llcp_ack = conn->llcp_req;
/* copy to active channel map */
memcpy(&lll->data_chan_map[0],
&conn->llcp.chan_map.chm[0],
sizeof(lll->data_chan_map));
lll->data_chan_count =
util_ones_count_get(&lll->data_chan_map[0],
sizeof(lll->data_chan_map));
conn->chm_updated = 1U;
}
}
#if defined(CONFIG_BT_CTLR_LE_ENC)
static inline void event_enc_reject_prep(struct ll_conn *conn,
struct pdu_data *pdu)
{
pdu->ll_id = PDU_DATA_LLID_CTRL;
if (conn->common.fex_valid &&
(conn->llcp_features & BIT(BT_LE_FEAT_BIT_EXT_REJ_IND))) {
struct pdu_data_llctrl_reject_ext_ind *p;
pdu->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND;
p = (void *)&pdu->llctrl.reject_ext_ind;
p->reject_opcode = PDU_DATA_LLCTRL_TYPE_ENC_REQ;
p->error_code = conn->llcp.encryption.error_code;
pdu->len = sizeof(struct pdu_data_llctrl_reject_ext_ind);
} else {
struct pdu_data_llctrl_reject_ind *p;
pdu->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_REJECT_IND;
p = (void *)&pdu->llctrl.reject_ind;
p->error_code = conn->llcp.encryption.error_code;
pdu->len = sizeof(struct pdu_data_llctrl_reject_ind);
}
pdu->len += offsetof(struct pdu_data_llctrl, reject_ind);
conn->llcp.encryption.error_code = 0U;
}
static inline void event_enc_prep(struct ll_conn *conn)
{
struct pdu_data *pdu_ctrl_tx;
struct node_tx *tx;
struct lll_conn *lll;
if (conn->llcp.encryption.initiate) {
return;
}
tx = mem_acquire(&mem_conn_tx_ctrl.free);
if (!tx) {
return;
}
lll = &conn->lll;
pdu_ctrl_tx = (void *)tx->pdu;
/* master sends encrypted enc start rsp in control priority */
if (!lll->role) {
/* calc the Session Key */
ecb_encrypt(&conn->llcp.encryption.ltk[0],
&conn->llcp.encryption.skd[0],
NULL, &lll->ccm_rx.key[0]);
/* copy the Session Key */
memcpy(&lll->ccm_tx.key[0], &lll->ccm_rx.key[0],
sizeof(lll->ccm_tx.key));
/* copy the IV */
memcpy(&lll->ccm_tx.iv[0], &lll->ccm_rx.iv[0],
sizeof(lll->ccm_tx.iv));
/* initialise counter */
lll->ccm_rx.counter = 0;
lll->ccm_tx.counter = 0;
/* set direction: slave to master = 0,
* master to slave = 1
*/
lll->ccm_rx.direction = 0;
lll->ccm_tx.direction = 1;
/* enable receive encryption */
lll->enc_rx = 1;
/* send enc start resp */
start_enc_rsp_send(conn, pdu_ctrl_tx);
}
/* slave send reject ind or start enc req at control priority */
#if defined(CONFIG_BT_CTLR_FAST_ENC)
else {
#else /* !CONFIG_BT_CTLR_FAST_ENC */
else if (!conn->pause_tx || conn->refresh) {
#endif /* !CONFIG_BT_CTLR_FAST_ENC */
/* place the reject ind packet as next in tx queue */
if (conn->llcp.encryption.error_code) {
event_enc_reject_prep(conn, pdu_ctrl_tx);
}
/* place the start enc req packet as next in tx queue */
else {
#if !defined(CONFIG_BT_CTLR_FAST_ENC)
u8_t err;
/* TODO BT Spec. text: may finalize the sending
* of additional data channel PDUs queued in the
* controller.
*/
err = enc_rsp_send(conn);
if (err) {
mem_release(tx, &mem_conn_tx_ctrl.free);
return;
}
#endif /* !CONFIG_BT_CTLR_FAST_ENC */
/* calc the Session Key */
ecb_encrypt(&conn->llcp.encryption.ltk[0],
&conn->llcp.encryption.skd[0], NULL,
&lll->ccm_rx.key[0]);
/* copy the Session Key */
memcpy(&lll->ccm_tx.key[0],
&lll->ccm_rx.key[0],
sizeof(lll->ccm_tx.key));
/* copy the IV */
memcpy(&lll->ccm_tx.iv[0], &lll->ccm_rx.iv[0],
sizeof(lll->ccm_tx.iv));
/* initialise counter */
lll->ccm_rx.counter = 0;
lll->ccm_tx.counter = 0;
/* set direction: slave to master = 0,
* master to slave = 1
*/
lll->ccm_rx.direction = 1;
lll->ccm_tx.direction = 0;
/* enable receive encryption (transmit turned
* on when start enc resp from master is
* received)
*/
lll->enc_rx = 1;
/* prepare the start enc req */
pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL;
pdu_ctrl_tx->len = offsetof(struct pdu_data_llctrl,
start_enc_req) +
sizeof(struct pdu_data_llctrl_start_enc_req);
pdu_ctrl_tx->llctrl.opcode =
PDU_DATA_LLCTRL_TYPE_START_ENC_REQ;
}
#if !defined(CONFIG_BT_CTLR_FAST_ENC)
} else {
start_enc_rsp_send(conn, pdu_ctrl_tx);
/* resume data packet rx and tx */
conn->pause_rx = 0;
conn->pause_tx = 0;
#endif /* !CONFIG_BT_CTLR_FAST_ENC */
}
ctrl_tx_enqueue(conn, tx);
/* procedure request acked */
conn->llcp_ack = conn->llcp_req;
}
#endif /* CONFIG_BT_CTLR_LE_ENC */
static inline void event_fex_prep(struct ll_conn *conn)
{
struct node_tx *tx;
tx = mem_acquire(&mem_conn_tx_ctrl.free);
if (tx) {
struct pdu_data *pdu = (void *)tx->pdu;
/* procedure request acked */
conn->llcp_ack = conn->llcp_req;
/* use initial feature bitmap */
conn->llcp_features = LL_FEAT;
/* place the feature exchange req packet as next in tx queue */
pdu->ll_id = PDU_DATA_LLID_CTRL;
pdu->len = offsetof(struct pdu_data_llctrl, feature_req) +
sizeof(struct pdu_data_llctrl_feature_req);
pdu->llctrl.opcode = !conn->lll.role ?
PDU_DATA_LLCTRL_TYPE_FEATURE_REQ :
PDU_DATA_LLCTRL_TYPE_SLAVE_FEATURE_REQ;
(void)memset(&pdu->llctrl.feature_req.features[0],
0x00,
sizeof(pdu->llctrl.feature_req.features));
pdu->llctrl.feature_req.features[0] =
conn->llcp_features & 0xFF;
pdu->llctrl.feature_req.features[1] =
(conn->llcp_features >> 8) & 0xFF;
pdu->llctrl.feature_req.features[2] =
(conn->llcp_features >> 16) & 0xFF;
ctrl_tx_enqueue(conn, tx);
/* Start Procedure Timeout (TODO: this shall not replace
* terminate procedure)
*/
conn->procedure_expire = conn->procedure_reload;
}
}
static inline void event_vex_prep(struct ll_conn *conn)
{
if (conn->llcp_version.tx == 0U) {
struct node_tx *tx;
tx = mem_acquire(&mem_conn_tx_ctrl.free);
if (tx) {
struct pdu_data *pdu = (void *)tx->pdu;
u16_t cid;
u16_t svn;
/* procedure request acked */
conn->llcp_ack = conn->llcp_req;
/* set version ind tx-ed flag */
conn->llcp_version.tx = 1U;
/* place the version ind packet as next in tx queue */
pdu->ll_id = PDU_DATA_LLID_CTRL;
pdu->len =
offsetof(struct pdu_data_llctrl, version_ind) +
sizeof(struct pdu_data_llctrl_version_ind);
pdu->llctrl.opcode =
PDU_DATA_LLCTRL_TYPE_VERSION_IND;
pdu->llctrl.version_ind.version_number =
LL_VERSION_NUMBER;
cid = sys_cpu_to_le16(CONFIG_BT_CTLR_COMPANY_ID);
svn = sys_cpu_to_le16(CONFIG_BT_CTLR_SUBVERSION_NUMBER);
pdu->llctrl.version_ind.company_id = cid;
pdu->llctrl.version_ind.sub_version_number = svn;
ctrl_tx_enqueue(conn, tx);
/* Start Procedure Timeout (TODO: this shall not
* replace terminate procedure)
*/
conn->procedure_expire = conn->procedure_reload;
}
} else if (conn->llcp_version.rx) {
struct node_rx_pdu *rx;
struct pdu_data *pdu;
/* get a rx node for ULL->LL */
rx = ll_pdu_rx_alloc();
if (!rx) {
return;
};
/* procedure request acked */
conn->llcp_ack = conn->llcp_req;
rx->hdr.handle = conn->lll.handle;
rx->hdr.type = NODE_RX_TYPE_DC_PDU;
/* prepare version ind structure */
pdu = (void *)rx->pdu;
pdu->ll_id = PDU_DATA_LLID_CTRL;
pdu->len = offsetof(struct pdu_data_llctrl, version_ind) +
sizeof(struct pdu_data_llctrl_version_ind);
pdu->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_VERSION_IND;
pdu->llctrl.version_ind.version_number =
conn->llcp_version.version_number;
pdu->llctrl.version_ind.company_id =
sys_cpu_to_le16(conn->llcp_version.company_id);
pdu->llctrl.version_ind.sub_version_number =
sys_cpu_to_le16(conn->llcp_version.sub_version_number);
/* enqueue version ind structure into rx queue */
ll_rx_put(rx->hdr.link, rx);
ll_rx_sched();
} else {
/* tx-ed but no rx, and new request placed */
LL_ASSERT(0);
}
}
#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
static inline void event_conn_param_req(struct ll_conn *conn,
u16_t event_counter,
u32_t ticks_at_expire)
{
struct pdu_data_llctrl_conn_param_req *p;
struct pdu_data *pdu_ctrl_tx;
struct node_tx *tx;
tx = mem_acquire(&mem_conn_tx_ctrl.free);
if (!tx) {
return;
}
/* move to wait for conn_update/rsp/rej */
conn->llcp_conn_param.state = LLCP_CPR_STATE_RSP_WAIT;
/* place the conn param req packet as next in tx queue */
pdu_ctrl_tx = (void *)tx->pdu;
pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL;
pdu_ctrl_tx->len = offsetof(struct pdu_data_llctrl, conn_param_req) +
sizeof(struct pdu_data_llctrl_conn_param_req);
pdu_ctrl_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ;
p = (void *)&pdu_ctrl_tx->llctrl.conn_param_req;
p->interval_min = sys_cpu_to_le16(conn->llcp_conn_param.interval_min);
p->interval_max = sys_cpu_to_le16(conn->llcp_conn_param.interval_max);
p->latency = sys_cpu_to_le16(conn->llcp_conn_param.latency);
p->timeout = sys_cpu_to_le16(conn->llcp_conn_param.timeout);
p->preferred_periodicity = 0U;
p->reference_conn_event_count = sys_cpu_to_le16(event_counter);
p->offset0 = sys_cpu_to_le16(0x0000);
p->offset1 = sys_cpu_to_le16(0xffff);
p->offset2 = sys_cpu_to_le16(0xffff);
p->offset3 = sys_cpu_to_le16(0xffff);
p->offset4 = sys_cpu_to_le16(0xffff);
p->offset5 = sys_cpu_to_le16(0xffff);
ctrl_tx_enqueue(conn, tx);
/* set CUI/CPR mutex */
conn_upd_curr = conn;
/* Start Procedure Timeout (TODO: this shall not replace
* terminate procedure).
*/
conn->procedure_expire = conn->procedure_reload;
#if defined(CONFIG_BT_CTLR_SCHED_ADVANCED)
{
static memq_link_t s_link;
static struct mayfly s_mfy_sched_offset = {0, 0, &s_link, NULL,
ull_sched_mfy_free_win_offset_calc};
u32_t retval;
conn->llcp_conn_param.ticks_ref = ticks_at_expire;
#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
if (conn->evt.ticks_xtal_to_start & XON_BITMASK) {
u32_t ticks_prepare_to_start =
MAX(conn->evt.ticks_active_to_start,
conn->evt.ticks_preempt_to_start);
conn->llcp_conn_param.ticks_ref -=
(conn->evt.ticks_xtal_to_start &
~XON_BITMASK) - ticks_prepare_to_start;
}
#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
conn->llcp_conn_param.pdu_win_offset0 = (u16_t *)&p->offset0;
s_mfy_sched_offset.param = (void *)conn;
retval = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
TICKER_USER_ID_ULL_LOW, 1,
&s_mfy_sched_offset);
LL_ASSERT(!retval);
}
#else /* !CONFIG_BT_CTLR_SCHED_ADVANCED */
ARG_UNUSED(ticks_at_expire);
#endif /* !CONFIG_BT_CTLR_SCHED_ADVANCED */
}
static inline void event_conn_param_rsp(struct ll_conn *conn)
{
struct pdu_data_llctrl_conn_param_rsp *rsp;
struct node_tx *tx;
struct pdu_data *pdu;
/* handle rejects */
if (conn->llcp_conn_param.status) {
struct pdu_data_llctrl_reject_ext_ind *rej;
tx = mem_acquire(&mem_conn_tx_ctrl.free);
if (!tx) {
return;
}
/* master/slave response with reject ext ind */
pdu = (void *)tx->pdu;
pdu->ll_id = PDU_DATA_LLID_CTRL;
pdu->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND;
pdu->len = offsetof(struct pdu_data_llctrl, reject_ext_ind) +
sizeof(struct pdu_data_llctrl_reject_ext_ind);
rej = (void *)&pdu->llctrl.reject_ext_ind;
rej->reject_opcode = PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ;
rej->error_code = conn->llcp_conn_param.status;
ctrl_tx_enqueue(conn, tx);
/* procedure request acked */
conn->llcp_conn_param.ack = conn->llcp_conn_param.req;
/* reset mutex */
conn_upd_curr = NULL;
return;
}
/* master respond with connection update */
if (!conn