/*
* Copyright (c) 2016 Intel Corporation.
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <logging/log.h>
LOG_MODULE_REGISTER(net_if, CONFIG_NET_IF_LOG_LEVEL);
#include <init.h>
#include <kernel.h>
#include <linker/sections.h>
#include <syscall_handler.h>
#include <stdlib.h>
#include <string.h>
#include <net/net_core.h>
#include <net/net_pkt.h>
#include <net/net_if.h>
#include <net/net_mgmt.h>
#include <net/ethernet.h>
#include "net_private.h"
#include "ipv6.h"
#include "ipv4_autoconf_internal.h"
#include "net_stats.h"
#define REACHABLE_TIME K_SECONDS(30) /* in ms */
/*
* split the min/max random reachable factors into numerator/denominator
* so that integer-based math works better
*/
#define MIN_RANDOM_NUMER (1)
#define MIN_RANDOM_DENOM (2)
#define MAX_RANDOM_NUMER (3)
#define MAX_RANDOM_DENOM (2)
/* net_if dedicated section limiters */
extern struct net_if __net_if_start[];
extern struct net_if __net_if_end[];
extern struct net_if_dev __net_if_dev_start[];
extern struct net_if_dev __net_if_dev_end[];
#if defined(CONFIG_NET_IPV4) || defined(CONFIG_NET_IPV6)
static struct net_if_router routers[CONFIG_NET_MAX_ROUTERS];
static struct k_delayed_work router_timer;
static sys_slist_t active_router_timers;
#endif
#if defined(CONFIG_NET_IPV6)
/* Timer that triggers network address renewal */
static struct k_delayed_work address_lifetime_timer;
/* Track currently active address lifetime timers */
static sys_slist_t active_address_lifetime_timers;
/* Timer that triggers IPv6 prefix lifetime */
static struct k_delayed_work prefix_lifetime_timer;
/* Track currently active IPv6 prefix lifetime timers */
static sys_slist_t active_prefix_lifetime_timers;
#if defined(CONFIG_NET_IPV6_DAD)
/** Duplicate address detection (DAD) timer */
static struct k_delayed_work dad_timer;
static sys_slist_t active_dad_timers;
#endif
#if defined(CONFIG_NET_IPV6_ND)
static struct k_delayed_work rs_timer;
static sys_slist_t active_rs_timers;
#endif
static struct {
struct net_if_ipv6 ipv6;
struct net_if *iface;
} ipv6_addresses[CONFIG_NET_IF_MAX_IPV6_COUNT];
#endif /* CONFIG_NET_IPV6 */
#if defined(CONFIG_NET_IPV4)
static struct {
struct net_if_ipv4 ipv4;
struct net_if *iface;
} ipv4_addresses[CONFIG_NET_IF_MAX_IPV4_COUNT];
#endif /* CONFIG_NET_IPV4 */
/* We keep track of the link callbacks in this list.
*/
static sys_slist_t link_callbacks;
#if defined(CONFIG_NET_IPV6)
/* Multicast join/leave tracking.
*/
static sys_slist_t mcast_monitor_callbacks;
#endif
#if defined(CONFIG_NET_PKT_TIMESTAMP_THREAD)
#if !defined(CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE)
#define CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE 1024
#endif
NET_STACK_DEFINE(TIMESTAMP, tx_ts_stack,
CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE,
CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE);
K_FIFO_DEFINE(tx_ts_queue);
static struct k_thread tx_thread_ts;
/* We keep track of the timestamp callbacks in this list.
*/
static sys_slist_t timestamp_callbacks;
#endif /* CONFIG_NET_PKT_TIMESTAMP_THREAD */
#if CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG
#define debug_check_packet(pkt) \
do { \
NET_DBG("Processing (pkt %p, prio %d) network packet", \
pkt, net_pkt_priority(pkt)); \
\
NET_ASSERT(pkt->frags); \
} while (0)
#else
#define debug_check_packet(...)
#endif /* CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG */
static inline void net_context_send_cb(struct net_context *context,
int status)
{
if (!context) {
return;
}
if (context->send_cb) {
context->send_cb(context, status, context->user_data);
}
if (IS_ENABLED(CONFIG_NET_UDP) &&
net_context_get_ip_proto(context) == IPPROTO_UDP) {
net_stats_update_udp_sent(net_context_get_iface(context));
} else if (IS_ENABLED(CONFIG_NET_TCP) &&
net_context_get_ip_proto(context) == IPPROTO_TCP) {
net_stats_update_tcp_seg_sent(net_context_get_iface(context));
}
}
static bool net_if_tx(struct net_if *iface, struct net_pkt *pkt)
{
struct net_linkaddr *dst;
struct net_context *context;
int status;
#if defined(CONFIG_NET_CONTEXT_TIMESTAMP)
/* Timestamp of the current network packet sent */
struct net_ptp_time start_timestamp;
u32_t curr_time = 0;
/* We collect send statistics for each socket priority */
u8_t pkt_priority;
#endif
if (!pkt) {
return false;
}
debug_check_packet(pkt);
dst = net_pkt_lladdr_dst(pkt);
context = net_pkt_context(pkt);
if (net_if_flag_is_set(iface, NET_IF_UP)) {
if (IS_ENABLED(CONFIG_NET_TCP) &&
net_pkt_family(pkt) != AF_UNSPEC) {
net_pkt_set_sent(pkt, true);
net_pkt_set_queued(pkt, false);
}
#if defined(CONFIG_NET_CONTEXT_TIMESTAMP)
if (context) {
if (net_context_get_timestamp(context, pkt,
&start_timestamp) < 0) {
start_timestamp.nanosecond = 0;
} else {
pkt_priority = net_pkt_priority(pkt);
}
}
#endif
status = net_if_l2(iface)->send(iface, pkt);
#if defined(CONFIG_NET_CONTEXT_TIMESTAMP)
if (status >= 0 && context) {
if (start_timestamp.nanosecond > 0) {
curr_time = k_cycle_get_32();
}
}
#endif
} else {
/* Drop packet if interface is not up */
NET_WARN("iface %p is down", iface);
status = -ENETDOWN;
}
if (status < 0) {
net_pkt_unref(pkt);
} else {
net_stats_update_bytes_sent(iface, status);
}
if (context) {
NET_DBG("Calling context send cb %p status %d",
context, status);
net_context_send_cb(context, status);
#if defined(CONFIG_NET_CONTEXT_TIMESTAMP)
if (status >= 0 && start_timestamp.nanosecond &&
curr_time > 0) {
/* So we know now how long the network packet was in
* transit from when it was allocated to when we
* got information that it was sent successfully.
*/
net_stats_update_tc_tx_time(iface,
pkt_priority,
start_timestamp.nanosecond,
curr_time);
}
#endif
}
if (dst->addr) {
net_if_call_link_cb(iface, dst, status);
}
return true;
}
static void process_tx_packet(struct k_work *work)
{
struct net_pkt *pkt;
pkt = CONTAINER_OF(work, struct net_pkt, work);
net_if_tx(net_pkt_iface(pkt), pkt);
}
void net_if_queue_tx(struct net_if *iface, struct net_pkt *pkt)
{
u8_t prio = net_pkt_priority(pkt);
u8_t tc = net_tx_priority2tc(prio);
k_work_init(net_pkt_work(pkt), process_tx_packet);
net_stats_update_tc_sent_pkt(iface, tc);
net_stats_update_tc_sent_bytes(iface, tc, net_pkt_get_len(pkt));
net_stats_update_tc_sent_priority(iface, tc, prio);
#if NET_TC_TX_COUNT > 1
NET_DBG("TC %d with prio %d pkt %p", tc, prio, pkt);
#endif
net_tc_submit_to_tx_queue(tc, pkt);
}
static inline void init_iface(struct net_if *iface)
{
const struct net_if_api *api = net_if_get_device(iface)->driver_api;
if (!api || !api->init) {
NET_ERR("Iface %p driver API init NULL", iface);
return;
}
NET_DBG("On iface %p", iface);
api->init(iface);
}
enum net_verdict net_if_send_data(struct net_if *iface, struct net_pkt *pkt)
{
struct net_context *context = net_pkt_context(pkt);
struct net_linkaddr *dst = net_pkt_lladdr_dst(pkt);
enum net_verdict verdict = NET_OK;
int status = -EIO;
if (!net_if_flag_is_set(iface, NET_IF_UP)) {
/* Drop packet if interface is not up */
NET_WARN("iface %p is down", iface);
verdict = NET_DROP;
status = -ENETDOWN;
goto done;
}
/* If the ll address is not set at all, then we must set
* it here.
* Workaround Linux bug, see:
* https://github.com/zephyrproject-rtos/zephyr/issues/3111
*/
if (!net_if_flag_is_set(iface, NET_IF_POINTOPOINT) &&
!net_pkt_lladdr_src(pkt)->addr) {
net_pkt_lladdr_src(pkt)->addr = net_pkt_lladdr_if(pkt)->addr;
net_pkt_lladdr_src(pkt)->len = net_pkt_lladdr_if(pkt)->len;
}
#if defined(CONFIG_NET_LOOPBACK)
/* If the packet is destined back to us, then there is no need to do
* additional checks, so let the packet through.
*/
if (net_if_l2(iface) == &NET_L2_GET_NAME(DUMMY)) {
goto done;
}
#endif
/* If the ll dst address is not set check if it is present in the nbr
* cache.
*/
if (IS_ENABLED(CONFIG_NET_IPV6) && net_pkt_family(pkt) == AF_INET6) {
verdict = net_ipv6_prepare_for_send(pkt);
}
done:
/* NET_OK in which case packet has checked successfully. In this case
* the net_context callback is called after successful delivery in
* net_if_tx_thread().
*
* NET_DROP in which case we call net_context callback that will
* give the status to user application.
*
* NET_CONTINUE in which case the sending of the packet is delayed.
* This can happen for example if we need to do IPv6 ND to figure
* out link layer address.
*/
if (verdict == NET_DROP) {
if (context) {
NET_DBG("Calling ctx send cb %p verdict %d",
context, verdict);
net_context_send_cb(context, status);
}
if (dst->addr) {
net_if_call_link_cb(iface, dst, status);
}
} else if (verdict == NET_OK) {
/* Packet is ready to be sent by L2, let's queue */
net_if_queue_tx(iface, pkt);
}
return verdict;
}
struct net_if *net_if_get_by_link_addr(struct net_linkaddr *ll_addr)
{
struct net_if *iface;
for (iface = __net_if_start; iface != __net_if_end; iface++) {
if (!memcmp(net_if_get_link_addr(iface)->addr, ll_addr->addr,
ll_addr->len)) {
return iface;
}
}
return NULL;
}
struct net_if *net_if_lookup_by_dev(struct device *dev)
{
struct net_if *iface;
for (iface = __net_if_start; iface != __net_if_end; iface++) {
if (net_if_get_device(iface) == dev) {
return iface;
}
}
return NULL;
}
struct net_if *net_if_get_default(void)
{
struct net_if *iface = NULL;
if (__net_if_start == __net_if_end) {
return NULL;
}
#if defined(CONFIG_NET_DEFAULT_IF_ETHERNET)
iface = net_if_get_first_by_type(&NET_L2_GET_NAME(ETHERNET));
#endif
#if defined(CONFIG_NET_DEFAULT_IF_IEEE802154)
iface = net_if_get_first_by_type(&NET_L2_GET_NAME(IEEE802154));
#endif
#if defined(CONFIG_NET_DEFAULT_IF_BLUETOOTH)
iface = net_if_get_first_by_type(&NET_L2_GET_NAME(BLUETOOTH));
#endif
#if defined(CONFIG_NET_DEFAULT_IF_DUMMY)
iface = net_if_get_first_by_type(&NET_L2_GET_NAME(DUMMY));
#endif
#if defined(CONFIG_NET_DEFAULT_IF_OFFLOAD)
iface = net_if_get_first_by_type(NULL);
#endif
#if defined(CONFIG_NET_DEFAULT_IF_CANBUS_RAW)
iface = net_if_get_first_by_type(&NET_L2_GET_NAME(CANBUS_RAW));
#endif
#if defined(CONFIG_NET_DEFAULT_IF_CANBUS)
iface = net_if_get_first_by_type(&NET_L2_GET_NAME(CANBUS));
#endif
return iface ? iface : __net_if_start;
}
struct net_if *net_if_get_first_by_type(const struct net_l2 *l2)
{
struct net_if *iface;
for (iface = __net_if_start; iface != __net_if_end; iface++) {
if (IS_ENABLED(CONFIG_NET_OFFLOAD) &&
!l2 && net_if_offload(iface)) {
return iface;
}
if (net_if_l2(iface) == l2) {
return iface;
}
}
return NULL;
}
static enum net_l2_flags l2_flags_get(struct net_if *iface)
{
enum net_l2_flags flags = 0;
if (net_if_l2(iface)->get_flags) {
flags = net_if_l2(iface)->get_flags(iface);
}
return flags;
}
#if defined(CONFIG_NET_IPV4) || defined(CONFIG_NET_IPV6)
/* Return how many bits are shared between two IP addresses */
static u8_t get_ipaddr_diff(const u8_t *src, const u8_t *dst, int addr_len)
{
u8_t j, k, xor;
u8_t len = 0U;
for (j = 0U; j < addr_len; j++) {
if (src[j] == dst[j]) {
len += 8U;
} else {
xor = src[j] ^ dst[j];
for (k = 0U; k < 8; k++) {
if (!(xor & 0x80)) {
len++;
xor <<= 1;
} else {
break;
}
}
break;
}
}
return len;
}
static struct net_if_router *iface_router_lookup(struct net_if *iface,
u8_t family, void *addr)
{
int i;
for (i = 0; i < CONFIG_NET_MAX_ROUTERS; i++) {
if (!routers[i].is_used ||
routers[i].address.family != family ||
routers[i].iface != iface) {
continue;
}
if ((IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6 &&
net_ipv6_addr_cmp(net_if_router_ipv6(&routers[i]),
(struct in6_addr *)addr)) ||
(IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET &&
net_ipv4_addr_cmp(net_if_router_ipv4(&routers[i]),
(struct in_addr *)addr))) {
return &routers[i];
}
}
return NULL;
}
static void iface_router_notify_deletion(struct net_if_router *router,
const char *delete_reason)
{
if (IS_ENABLED(CONFIG_NET_IPV6) &&
router->address.family == AF_INET6) {
NET_DBG("IPv6 router %s %s",
log_strdup(net_sprint_ipv6_addr(
net_if_router_ipv6(router))),
delete_reason);
net_mgmt_event_notify_with_info(NET_EVENT_IPV6_ROUTER_DEL,
router->iface,
&router->address.in6_addr,
sizeof(struct in6_addr));
} else if (IS_ENABLED(CONFIG_NET_IPV4) &&
router->address.family == AF_INET) {
NET_DBG("IPv4 router %s %s",
log_strdup(net_sprint_ipv4_addr(
net_if_router_ipv4(router))),
delete_reason);
net_mgmt_event_notify_with_info(NET_EVENT_IPV4_ROUTER_DEL,
router->iface,
&router->address.in_addr,
sizeof(struct in6_addr));
}
}
static void iface_router_run_timer(u32_t current_time)
{
struct net_if_router *router, *next;
u32_t new_timer = UINT_MAX;
if (k_delayed_work_remaining_get(&router_timer)) {
k_delayed_work_cancel(&router_timer);
}
SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_router_timers,
router, next, node) {
u32_t current_timer = router->life_start +
K_SECONDS(router->lifetime) - current_time;
new_timer = MIN(current_timer, new_timer);
}
if (new_timer != UINT_MAX) {
k_delayed_work_submit(&router_timer, new_timer);
}
}
static void iface_router_expired(struct k_work *work)
{
u32_t current_time = k_uptime_get_32();
struct net_if_router *router, *next;
ARG_UNUSED(work);
SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_router_timers,
router, next, node) {
if ((s32_t)(router->life_start +
K_SECONDS(router->lifetime) - current_time) > 0) {
/* We have to loop on all active routers as their
* lifetime differ from each other.
*/
continue;
}
iface_router_notify_deletion(router, "has expired");
router->is_used = false;
}
iface_router_run_timer(current_time);
}
static struct net_if_router *iface_router_add(struct net_if *iface,
u8_t family, void *addr,
bool is_default,
u16_t lifetime)
{
int i;
for (i = 0; i < CONFIG_NET_MAX_ROUTERS; i++) {
if (routers[i].is_used) {
continue;
}
routers[i].is_used = true;
routers[i].iface = iface;
routers[i].address.family = family;
if (lifetime) {
routers[i].is_default = true;
routers[i].is_infinite = false;
routers[i].lifetime = lifetime;
routers[i].life_start = k_uptime_get_32();
sys_slist_append(&active_router_timers,
&routers[i].node);
iface_router_run_timer(routers[i].life_start);
} else {
routers[i].is_default = false;
routers[i].is_infinite = true;
routers[i].lifetime = 0;
}
if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6) {
memcpy(net_if_router_ipv6(&routers[i]), addr,
sizeof(struct in6_addr));
net_mgmt_event_notify_with_info(
NET_EVENT_IPV6_ROUTER_ADD, iface,
&routers[i].address.in6_addr,
sizeof(struct in6_addr));
NET_DBG("interface %p router %s lifetime %u default %d "
"added", iface,
log_strdup(net_sprint_ipv6_addr(
(struct in6_addr *)addr)),
lifetime, routers[i].is_default);
} else if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET) {
memcpy(net_if_router_ipv4(&routers[i]), addr,
sizeof(struct in_addr));
routers[i].is_default = is_default;
net_mgmt_event_notify_with_info(
NET_EVENT_IPV4_ROUTER_ADD, iface,
&routers[i].address.in_addr,
sizeof(struct in_addr));
NET_DBG("interface %p router %s lifetime %u default %d "
"added", iface,
log_strdup(net_sprint_ipv4_addr(
(struct in_addr *)addr)),
lifetime, is_default);
}
return &routers[i];
}
return NULL;
}
static bool iface_router_rm(struct net_if_router *router)
{
if (!router->is_used) {
return false;
}
iface_router_notify_deletion(router, "has been removed");
/* We recompute the timer if only the router was time limited */
if (sys_slist_find_and_remove(&active_router_timers, &router->node)) {
iface_router_run_timer(k_uptime_get_32());
}
router->is_used = false;
return true;
}
static struct net_if_router *iface_router_find_default(struct net_if *iface,
u8_t family, void *addr)
{
int i;
/* Todo: addr will need to be handled */
ARG_UNUSED(addr);
for (i = 0; i < CONFIG_NET_MAX_ROUTERS; i++) {
if (!routers[i].is_used ||
!routers[i].is_default ||
routers[i].address.family != family) {
continue;
}
if (iface && iface != routers[i].iface) {
continue;
}
return &routers[i];
}
return NULL;
}
static void iface_router_init(void)
{
k_delayed_work_init(&router_timer, iface_router_expired);
sys_slist_init(&active_router_timers);
}
#else
#define iface_router_init(...)
#endif
#if defined(CONFIG_NET_IPV6)
int net_if_config_ipv6_get(struct net_if *iface, struct net_if_ipv6 **ipv6)
{
int i;
if (iface->config.ip.ipv6) {
if (ipv6) {
*ipv6 = iface->config.ip.ipv6;
}
return 0;
}
for (i = 0; i < ARRAY_SIZE(ipv6_addresses); i++) {
if (ipv6_addresses[i].iface) {
continue;
}
iface->config.ip.ipv6 = &ipv6_addresses[i].ipv6;
ipv6_addresses[i].iface = iface;
if (ipv6) {
*ipv6 = &ipv6_addresses[i].ipv6;
}
return 0;
}
return -ESRCH;
}
int net_if_config_ipv6_put(struct net_if *iface)
{
int i;
if (!iface->config.ip.ipv6) {
return -EALREADY;
}
for (i = 0; i < ARRAY_SIZE(ipv6_addresses); i++) {
if (ipv6_addresses[i].iface != iface) {
continue;
}
iface->config.ip.ipv6 = NULL;
ipv6_addresses[i].iface = NULL;
return 0;
}
return -ESRCH;
}
#if defined(CONFIG_NET_IPV6_MLD)
static void join_mcast_allnodes(struct net_if *iface)
{
struct in6_addr addr;
int ret;
net_ipv6_addr_create_ll_allnodes_mcast(&addr);
ret = net_ipv6_mld_join(iface, &addr);
if (ret < 0 && ret != -EALREADY) {
NET_ERR("Cannot join all nodes address %s (%d)",
log_strdup(net_sprint_ipv6_addr(&addr)), ret);
}
}
static void join_mcast_solicit_node(struct net_if *iface,
struct in6_addr *my_addr)
{
struct in6_addr addr;
int ret;
/* Join to needed multicast groups, RFC 4291 ch 2.8 */
net_ipv6_addr_create_solicited_node(my_addr, &addr);
ret = net_ipv6_mld_join(iface, &addr);
if (ret < 0 && ret != -EALREADY) {
NET_ERR("Cannot join solicit node address %s (%d)",
log_strdup(net_sprint_ipv6_addr(&addr)), ret);
}
}
static void leave_mcast_all(struct net_if *iface)
{
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
int i;
if (!ipv6) {
return;
}
for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
if (!ipv6->mcast[i].is_used ||
!ipv6->mcast[i].is_joined) {
continue;
}
net_ipv6_mld_leave(iface, &ipv6->mcast[i].address.in6_addr);
}
}
static void join_mcast_nodes(struct net_if *iface, struct in6_addr *addr)
{
enum net_l2_flags flags = 0;
if (net_if_l2(iface)->get_flags) {
flags = net_if_l2(iface)->get_flags(iface);
}
if (flags & NET_L2_MULTICAST) {
join_mcast_allnodes(iface);
if (!(flags & NET_L2_MULTICAST_SKIP_JOIN_SOLICIT_NODE)) {
join_mcast_solicit_node(iface, addr);
}
}
}
#else
#define join_mcast_allnodes(...)
#define join_mcast_solicit_node(...)
#define leave_mcast_all(...)
#define join_mcast_nodes(...)
#endif /* CONFIG_NET_IPV6_MLD */
#if defined(CONFIG_NET_IPV6_DAD)
#define DAD_TIMEOUT K_MSEC(100)
static void dad_timeout(struct k_work *work)
{
u32_t current_time = k_uptime_get_32();
struct net_if_addr *ifaddr, *next;
ARG_UNUSED(work);
SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_dad_timers,
ifaddr, next, dad_node) {
struct net_if_addr *tmp;
struct net_if *iface;
if ((s32_t)(ifaddr->dad_start +
DAD_TIMEOUT - current_time) > 0) {
break;
}
/* Removing the ifaddr from active_dad_timers list */
sys_slist_remove(&active_dad_timers, NULL, &ifaddr->dad_node);
NET_DBG("DAD succeeded for %s",
log_strdup(net_sprint_ipv6_addr(
&ifaddr->address.in6_addr)));
ifaddr->addr_state = NET_ADDR_PREFERRED;
/* Because we do not know the interface at this point,
* we need to lookup for it.
*/
iface = NULL;
tmp = net_if_ipv6_addr_lookup(&ifaddr->address.in6_addr,
&iface);
if (tmp == ifaddr) {
net_mgmt_event_notify_with_info(
NET_EVENT_IPV6_DAD_SUCCEED,
iface, &ifaddr->address.in6_addr,
sizeof(struct in6_addr));
/* The address gets added to neighbor cache which is not
* needed in this case as the address is our own one.
*/
net_ipv6_nbr_rm(iface, &ifaddr->address.in6_addr);
}
ifaddr = NULL;
}
if (ifaddr) {
k_delayed_work_submit(&dad_timer,
ifaddr->dad_start +
DAD_TIMEOUT - current_time);
}
}
static void net_if_ipv6_start_dad(struct net_if *iface,
struct net_if_addr *ifaddr)
{
ifaddr->addr_state = NET_ADDR_TENTATIVE;
if (net_if_is_up(iface)) {
NET_DBG("Interface %p ll addr %s tentative IPv6 addr %s",
iface,
log_strdup(net_sprint_ll_addr(
net_if_get_link_addr(iface)->addr,
net_if_get_link_addr(iface)->len)),
log_strdup(net_sprint_ipv6_addr(
&ifaddr->address.in6_addr)));
ifaddr->dad_count = 1U;
if (!net_ipv6_start_dad(iface, ifaddr)) {
ifaddr->dad_start = k_uptime_get_32();
sys_slist_append(&active_dad_timers, &ifaddr->dad_node);
if (!k_delayed_work_remaining_get(&dad_timer)) {
k_delayed_work_submit(&dad_timer, DAD_TIMEOUT);
}
}
} else {
NET_DBG("Interface %p is down, starting DAD for %s later.",
iface,
log_strdup(net_sprint_ipv6_addr(
&ifaddr->address.in6_addr)));
}
}
void net_if_start_dad(struct net_if *iface)
{
struct net_if_addr *ifaddr;
struct net_if_ipv6 *ipv6;
struct in6_addr addr = { };
int i;
NET_DBG("Starting DAD for iface %p", iface);
if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
NET_WARN("Cannot do DAD IPv6 config is not valid.");
return;
}
if (!ipv6) {
return;
}
net_ipv6_addr_create_iid(&addr, net_if_get_link_addr(iface));
ifaddr = net_if_ipv6_addr_add(iface, &addr, NET_ADDR_AUTOCONF, 0);
if (!ifaddr) {
NET_ERR("Cannot add %s address to interface %p, DAD fails",
log_strdup(net_sprint_ipv6_addr(&addr)), iface);
}
/* Start DAD for all the addresses that were added earlier when
* the interface was down.
*/
for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
if (!ipv6->unicast[i].is_used ||
ipv6->unicast[i].address.family != AF_INET6 ||
&ipv6->unicast[i] == ifaddr) {
continue;
}
net_if_ipv6_start_dad(iface, &ipv6->unicast[i]);
}
}
void net_if_ipv6_dad_failed(struct net_if *iface, const struct in6_addr *addr)
{
struct net_if_addr *ifaddr;
ifaddr = net_if_ipv6_addr_lookup(addr, &iface);
if (!ifaddr) {
NET_ERR("Cannot find %s address in interface %p",
log_strdup(net_sprint_ipv6_addr(addr)), iface);
return;
}
sys_slist_find_and_remove(&active_dad_timers, &ifaddr->dad_node);
net_mgmt_event_notify_with_info(NET_EVENT_IPV6_DAD_FAILED, iface,
&ifaddr->address.in6_addr,
sizeof(struct in6_addr));
net_if_ipv6_addr_rm(iface, addr);
}
static inline void iface_ipv6_dad_init(void)
{
k_delayed_work_init(&dad_timer, dad_timeout);
sys_slist_init(&active_dad_timers);
}
#else
static inline void net_if_ipv6_start_dad(struct net_if *iface,
struct net_if_addr *ifaddr)
{
ifaddr->addr_state = NET_ADDR_PREFERRED;
}
#define iface_ipv6_dad_init(...)
#endif /* CONFIG_NET_IPV6_DAD */
#if defined(CONFIG_NET_IPV6_ND)
#define RS_TIMEOUT K_SECONDS(1)
#define RS_COUNT 3
static void rs_timeout(struct k_work *work)
{
u32_t current_time = k_uptime_get_32();
struct net_if_ipv6 *ipv6, *next;
ARG_UNUSED(work);
SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_rs_timers,
ipv6, next, rs_node) {
struct net_if *iface;
if ((s32_t)(ipv6->rs_start + RS_TIMEOUT - current_time) > 0) {
break;
}
/* Removing the ipv6 from active_rs_timers list */
sys_slist_remove(&active_rs_timers, NULL, &ipv6->rs_node);
/* Did not receive RA yet. */
ipv6->rs_count++;
for (iface = __net_if_start; iface != __net_if_end; iface++) {
if (iface->config.ip.ipv6 == ipv6) {
break;
}
}
if (iface != __net_if_end) {
NET_DBG("RS no respond iface %p count %d",
iface, ipv6->rs_count);
if (ipv6->rs_count < RS_COUNT) {
net_if_start_rs(iface);
}
} else {
NET_DBG("Interface IPv6 config %p not found", ipv6);
}
ipv6 = NULL;
}
if (ipv6) {
k_delayed_work_submit(&rs_timer,
ipv6->rs_start +
RS_TIMEOUT - current_time);
}
}
void net_if_start_rs(struct net_if *iface)
{
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
if (!ipv6) {
return;
}
NET_DBG("Starting ND/RS for iface %p", iface);
if (!net_ipv6_start_rs(iface)) {
ipv6->rs_start = k_uptime_get_32();
sys_slist_append(&active_rs_timers, &ipv6->rs_node);
if (!k_delayed_work_remaining_get(&rs_timer)) {
k_delayed_work_submit(&rs_timer, RS_TIMEOUT);
}
}
}
void net_if_stop_rs(struct net_if *iface)
{
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
if (!ipv6) {
return;
}
NET_DBG("Stopping ND/RS for iface %p", iface);
sys_slist_find_and_remove(&active_rs_timers, &ipv6->rs_node);
}
static inline void iface_ipv6_nd_init(void)
{
k_delayed_work_init(&rs_timer, rs_timeout);
sys_slist_init(&active_rs_timers);
}
#else
#define net_if_start_rs(...)
#define net_if_stop_rs(...)
#define iface_ipv6_nd_init(...)
#endif /* CONFIG_NET_IPV6_ND */
struct net_if_addr *net_if_ipv6_addr_lookup(const struct in6_addr *addr,
struct net_if **ret)
{
struct net_if *iface;
for (iface = __net_if_start; iface != __net_if_end; iface++) {
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
int i;
if (!ipv6) {
continue;
}
for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
if (!ipv6->unicast[i].is_used ||
ipv6->unicast[i].address.family != AF_INET6) {
continue;
}
if (net_ipv6_is_prefix(
addr->s6_addr,
ipv6->unicast[i].address.in6_addr.s6_addr,
128)) {
if (ret) {
*ret = iface;
}
return &ipv6->unicast[i];
}
}
}
return NULL;
}
struct net_if_addr *net_if_ipv6_addr_lookup_by_iface(struct net_if *iface,
struct in6_addr *addr)
{
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
int i;
if (!ipv6) {
return NULL;
}
for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
if (!ipv6->unicast[i].is_used ||
ipv6->unicast[i].address.family != AF_INET6) {
continue;
}
if (net_ipv6_is_prefix(
addr->s6_addr,
ipv6->unicast[i].address.in6_addr.s6_addr,
128)) {
return &ipv6->unicast[i];
}
}
return NULL;
}
int z_impl_net_if_ipv6_addr_lookup_by_index(const struct in6_addr *addr)
{
struct net_if *iface = NULL;
struct net_if_addr *if_addr;
if_addr = net_if_ipv6_addr_lookup(addr, &iface);
if (!if_addr) {
return 0;
}
return net_if_get_by_iface(iface);
}
#ifdef CONFIG_USERSPACE
Z_SYSCALL_HANDLER(net_if_ipv6_addr_lookup_by_index, addr)
{
struct in6_addr addr_v6;
Z_OOPS(z_user_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6)));
return z_impl_net_if_ipv6_addr_lookup_by_index(&addr_v6);
}
#endif
static bool check_timeout(u32_t start, s32_t timeout, u32_t counter,
u32_t current_time)
{
if (counter > 0) {
return false;
}
if ((s32_t)((start + (u32_t)timeout) - current_time) > 0) {
return false;
}
return true;
}
static void address_expired(struct net_if_addr *ifaddr)
{
NET_DBG("IPv6 address %s is deprecated",
log_strdup(net_sprint_ipv6_addr(&ifaddr->address.in6_addr)));
ifaddr->addr_state = NET_ADDR_DEPRECATED;
ifaddr->lifetime.timer_timeout = 0;
ifaddr->lifetime.wrap_counter = 0;
sys_slist_find_and_remove(&active_address_lifetime_timers,
&ifaddr->lifetime.node);
}
static bool address_manage_timeout(struct net_if_addr *ifaddr,
u32_t current_time, u32_t *next_wakeup)
{
if (check_timeout(ifaddr->lifetime.timer_start,
ifaddr->lifetime.timer_timeout,
ifaddr->lifetime.wrap_counter,
current_time)) {
address_expired(ifaddr);
return true;
}
if (current_time == NET_TIMEOUT_MAX_VALUE) {
ifaddr->lifetime.timer_start = k_uptime_get_32();
ifaddr->lifetime.wrap_counter--;
}
if (ifaddr->lifetime.wrap_counter > 0) {
*next_wakeup = NET_TIMEOUT_MAX_VALUE;
} else {
*next_wakeup = ifaddr->lifetime.timer_timeout;
}
return false;
}
static void address_lifetime_timeout(struct k_work *work)
{
u64_t timeout_update = UINT64_MAX;
u32_t current_time = k_uptime_get_32();
bool found = false;
struct net_if_addr *current, *next;
ARG_UNUSED(work);
SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_address_lifetime_timers,
current, next, lifetime.node) {
u32_t next_timeout;
bool is_timeout;
is_timeout = address_manage_timeout(current, current_time,
&next_timeout);
if (!is_timeout) {
if (next_timeout < timeout_update) {
timeout_update = next_timeout;
found = true;
}
}
if (current == next) {
break;
}
}
if (found) {
/* If we are near upper limit of s32_t timeout, then lower it
* a bit so that kernel timeout variable will not overflow.
*/
if (timeout_update >= NET_TIMEOUT_MAX_VALUE) {
timeout_update = NET_TIMEOUT_MAX_VALUE;
}
NET_DBG("Waiting for %d ms", (s32_t)timeout_update);
k_delayed_work_submit(&address_lifetime_timer, timeout_update);
}
}
#if defined(CONFIG_NET_TEST)
void net_address_lifetime_timeout(void)
{
address_lifetime_timeout(NULL);
}
#endif
static void address_submit_work(struct net_if_addr *ifaddr)
{
s32_t remaining;
remaining = k_delayed_work_remaining_get(&address_lifetime_timer);
if (!remaining || (ifaddr->lifetime.wrap_counter == 0 &&
ifaddr->lifetime.timer_timeout < remaining)) {
k_delayed_work_cancel(&address_lifetime_timer);
if (ifaddr->lifetime.wrap_counter > 0 && remaining == 0) {
k_delayed_work_submit(&address_lifetime_timer,
NET_TIMEOUT_MAX_VALUE);
} else {
k_delayed_work_submit(&address_lifetime_timer,
ifaddr->lifetime.timer_timeout);
}
NET_DBG("Next wakeup in %d ms",
k_delayed_work_remaining_get(&address_lifetime_timer));
}
}
static void address_start_timer(struct net_if_addr *ifaddr, u32_t vlifetime)
{
u64_t expire_timeout = K_SECONDS((u64_t)vlifetime);
sys_slist_append(&active_address_lifetime_timers,
&ifaddr->lifetime.node);
ifaddr->lifetime.timer_start = k_uptime_get_32();
ifaddr->lifetime.wrap_counter = expire_timeout /
(u64_t)NET_TIMEOUT_MAX_VALUE;
ifaddr->lifetime.timer_timeout = expire_timeout -
(u64_t)NET_TIMEOUT_MAX_VALUE *
(u64_t)ifaddr->lifetime.wrap_counter;
address_submit_work(ifaddr);
}
void net_if_ipv6_addr_update_lifetime(struct net_if_addr *ifaddr,
u32_t vlifetime)
{
NET_DBG("Updating expire time of %s by %u secs",
log_strdup(net_sprint_ipv6_addr(&ifaddr->address.in6_addr)),
vlifetime);
ifaddr->addr_state = NET_ADDR_PREFERRED;
address_start_timer(ifaddr, vlifetime);
}
static struct net_if_addr *ipv6_addr_find(struct net_if *iface,
struct in6_addr *addr)
{
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
int i;
for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
if (!ipv6->unicast[i].is_used) {
continue;
}
if (net_ipv6_addr_cmp(
addr, &ipv6->unicast[i].address.in6_addr)) {
return &ipv6->unicast[i];
}
}
return NULL;
}
static inline void net_if_addr_init(struct net_if_addr *ifaddr,
struct in6_addr *addr,
enum net_addr_type addr_type,
u32_t vlifetime)
{
ifaddr->is_used = true;
ifaddr->address.family = AF_INET6;
ifaddr->addr_type = addr_type;
net_ipaddr_copy(&ifaddr->address.in6_addr, addr);
/* FIXME - set the mcast addr for this node */
if (vlifetime) {
ifaddr->is_infinite = false;
NET_DBG("Expiring %s in %u secs",
log_strdup(net_sprint_ipv6_addr(addr)),
vlifetime);
net_if_ipv6_addr_update_lifetime(ifaddr, vlifetime);
} else {
ifaddr->is_infinite = true;
}
}
struct net_if_addr *net_if_ipv6_addr_add(struct net_if *iface,
struct in6_addr *addr,
enum net_addr_type addr_type,
u32_t vlifetime)
{
struct net_if_addr *ifaddr;
struct net_if_ipv6 *ipv6;
int i;
if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
return NULL;
}
ifaddr = ipv6_addr_find(iface, addr);
if (ifaddr) {
return ifaddr;
}
for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
if (ipv6->unicast[i].is_used) {
continue;
}
net_if_addr_init(&ipv6->unicast[i], addr, addr_type,
vlifetime);
NET_DBG("[%d] interface %p address %s type %s added", i,
iface, log_strdup(net_sprint_ipv6_addr(addr)),
net_addr_type2str(addr_type));
if (!(l2_flags_get(iface) & NET_L2_POINT_TO_POINT)) {
/* RFC 4862 5.4.2
* Before sending a Neighbor Solicitation, an interface
* MUST join the all-nodes multicast address and the
* solicited-node multicast address of the tentative
* address.
*/
/* The allnodes multicast group is only joined once as
* net_ipv6_mcast_join() checks if we have already
* joined.
*/
join_mcast_nodes(iface,
&ipv6->unicast[i].address.in6_addr);
net_if_ipv6_start_dad(iface, &ipv6->unicast[i]);
}
net_mgmt_event_notify_with_info(
NET_EVENT_IPV6_ADDR_ADD, iface,
&ipv6->unicast[i].address.in6_addr,
sizeof(struct in6_addr));
return &ipv6->unicast[i];
}
return NULL;
}
bool net_if_ipv6_addr_rm(struct net_if *iface, const struct in6_addr *addr)
{
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
int i;
NET_ASSERT(addr);
if (!ipv6) {
return false;
}
for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
struct in6_addr maddr;
if (!ipv6->unicast[i].is_used) {
continue;
}
if (!net_ipv6_addr_cmp(&ipv6->unicast[i].address.in6_addr,
addr)) {
continue;
}
if (!ipv6->unicast[i].is_infinite) {
sys_slist_find_and_remove(
&active_address_lifetime_timers,
&ipv6->unicast[i].lifetime.node);
if (sys_slist_is_empty(
&active_address_lifetime_timers)) {
k_delayed_work_cancel(&address_lifetime_timer);
}
}
ipv6->unicast[i].is_used = false;
net_ipv6_addr_create_solicited_node(addr, &maddr);
net_if_ipv6_maddr_rm(iface, &maddr);
NET_DBG("[%d] interface %p address %s type %s removed",
i, iface, log_strdup(net_sprint_ipv6_addr(addr)),
net_addr_type2str(ipv6->unicast[i].addr_type));
/* Using the IPv6 address pointer here can give false
* info if someone adds a new IP address into this position
* in the address array. This is quite unlikely thou.
*/
net_mgmt_event_notify_with_info(
NET_EVENT_IPV6_ADDR_DEL,
iface,
&ipv6->unicast[i].address.in6_addr,
sizeof(struct in6_addr));
return true;
}
return false;
}
bool z_impl_net_if_ipv6_addr_add_by_index(int index,
struct in6_addr *addr,
enum net_addr_type addr_type,
u32_t vlifetime)
{
struct net_if *iface;
iface = net_if_get_by_index(index);
if (!iface) {
return false;
}
return net_if_ipv6_addr_add(iface, addr, addr_type, vlifetime) ?
true : false;
}
#ifdef CONFIG_USERSPACE
Z_SYSCALL_HANDLER(net_if_ipv6_addr_add_by_index, index, addr, addr_type,
vlifetime)
{
#if defined(CONFIG_NET_IF_USERSPACE_ACCESS)
struct in6_addr addr_v6;
Z_OOPS(z_user_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6)));
return z_impl_net_if_ipv6_addr_add_by_index(index,
&addr_v6,
addr_type,
vlifetime);
#else
return false;
#endif /* CONFIG_NET_IF_USERSPACE_ACCESS */
}
#endif /* CONFIG_USERSPACE */
bool z_impl_net_if_ipv6_addr_rm_by_index(int index,
const struct in6_addr *addr)
{
struct net_if *iface;
iface = net_if_get_by_index(index);
if (!iface) {
return false;
}
return net_if_ipv6_addr_rm(iface, addr);
}
#ifdef CONFIG_USERSPACE
Z_SYSCALL_HANDLER(net_if_ipv6_addr_rm_by_index, index, addr)
{
#if defined(CONFIG_NET_IF_USERSPACE_ACCESS)
struct in6_addr addr_v6;
Z_OOPS(z_user_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6)));
return z_impl_net_if_ipv6_addr_rm_by_index(index, &addr_v6);
#else
return false;
#endif /* CONFIG_NET_IF_USERSPACE_ACCESS */
}
#endif /* CONFIG_USERSPACE */
struct net_if_mcast_addr *net_if_ipv6_maddr_add(struct net_if *iface,
const struct in6_addr *addr)
{
struct net_if_ipv6 *ipv6;
int i;
if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
return NULL;
}
if (!net_ipv6_is_addr_mcast(addr)) {
NET_DBG("Address %s is not a multicast address.",
log_strdup(net_sprint_ipv6_addr(addr)));
return NULL;
}
for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
if (ipv6->mcast[i].is_used) {
continue;
}
ipv6->mcast[i].is_used = true;
ipv6->mcast[i].address.family = AF_INET6;
memcpy(&ipv6->mcast[i].address.in6_addr, addr, 16);
NET_DBG("[%d] interface %p address %s added", i, iface,
log_strdup(net_sprint_ipv6_addr(addr)));
net_mgmt_event_notify_with_info(
NET_EVENT_IPV6_MADDR_ADD, iface,
&ipv6->mcast[i].address.in6_addr,
sizeof(struct in6_addr));
return &ipv6->mcast[i];
}
return NULL;
}
bool net_if_ipv6_maddr_rm(struct net_if *iface, const struct in6_addr *addr)
{
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
int i;
if (!ipv6) {
return false;
}
for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
if (!ipv6->mcast[i].is_used) {
continue;
}
if (!net_ipv6_addr_cmp(&ipv6->mcast[i].address.in6_addr,
addr)) {
continue;
}
ipv6->mcast[i].is_used = false;
NET_DBG("[%d] interface %p address %s removed",
i, iface, log_strdup(net_sprint_ipv6_addr(addr)));
net_mgmt_event_notify_with_info(
NET_EVENT_IPV6_MADDR_DEL, iface,
&ipv6->mcast[i].address.in6_addr,
sizeof(struct in6_addr));
return true;
}
return false;
}
struct net_if_mcast_addr *net_if_ipv6_maddr_lookup(const struct in6_addr *maddr,
struct net_if **ret)
{
struct net_if *iface;
for (iface = __net_if_start; iface != __net_if_end; iface++) {
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
int i;
if (ret && *ret && iface != *ret) {
continue;
}
if (!ipv6) {
continue;
}
for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
if (!ipv6->mcast[i].is_used ||
ipv6->mcast[i].address.family != AF_INET6) {
continue;
}
if (net_ipv6_is_prefix(
maddr->s6_addr,
ipv6->mcast[i].address.in6_addr.s6_addr,
128)) {
if (ret) {
*ret = iface;
}
return &ipv6->mcast[i];
}
}
}
return NULL;
}
void net_if_mcast_mon_register(struct net_if_mcast_monitor *mon,
struct net_if *iface,
net_if_mcast_callback_t cb)
{
sys_slist_find_and_remove(&mcast_monitor_callbacks, &mon->node);
sys_slist_prepend(&mcast_monitor_callbacks, &mon->node);
mon->iface = iface;
mon->cb = cb;
}
void net_if_mcast_mon_unregister(struct net_if_mcast_monitor *mon)
{
sys_slist_find_and_remove(&mcast_monitor_callbacks, &mon->node);
}
void net_if_mcast_monitor(struct net_if *iface,
const struct in6_addr *addr,
bool is_joined)
{
struct net_if_mcast_monitor *mon, *tmp;
SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&mcast_monitor_callbacks,
mon, tmp, node) {
if (iface == mon->iface) {
mon->cb(iface, addr, is_joined);
}
}
}
static void remove_prefix_addresses(struct net_if *iface,
struct net_if_ipv6 *ipv6,
struct in6_addr *addr,
u8_t len)
{
int i;
for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
if (!ipv6->unicast[i].is_used ||
ipv6->unicast[i].address.family != AF_INET6 ||
ipv6->unicast[i].addr_type != NET_ADDR_AUTOCONF) {
continue;
}
if (net_ipv6_is_prefix(
addr->s6_addr,
ipv6->unicast[i].address.in6_addr.s6_addr,
len)) {
net_if_ipv6_addr_rm(iface,
&ipv6->unicast[i].address.in6_addr);
}
}
}
static void prefix_lifetime_expired(struct net_if_ipv6_prefix *ifprefix)
{
struct net_if_ipv6 *ipv6;
NET_DBG("Prefix %s/%d expired",
log_strdup(net_sprint_ipv6_addr(&ifprefix->prefix)),
ifprefix->len);
ifprefix->is_used = false;
if (net_if_config_ipv6_get(ifprefix->iface, &ipv6) < 0) {
return;
}
/* Remove also all auto addresses if the they have the same prefix.
*/
remove_prefix_addresses(ifprefix->iface, ipv6, &ifprefix->prefix,
ifprefix->len);
net_mgmt_event_notify_with_info(
NET_EVENT_IPV6_PREFIX_DEL, ifprefix->iface,
&ifprefix->prefix, sizeof(struct in6_addr));
}
static void prefix_timer_remove(struct net_if_ipv6_prefix *ifprefix)
{
NET_DBG("IPv6 prefix %s/%d removed",
log_strdup(net_sprint_ipv6_addr(&ifprefix->prefix)),
ifprefix->len);
ifprefix->lifetime.timer_timeout = 0;
ifprefix->lifetime.wrap_counter = 0;
sys_slist_find_and_remove(&active_prefix_lifetime_timers,
&ifprefix->lifetime.node);
}
static bool prefix_manage_timeout(struct net_if_ipv6_prefix *ifprefix,
u32_t current_time, u32_t *next_wakeup)
{
if (check_timeout(ifprefix->lifetime.timer_start,
ifprefix->lifetime.timer_timeout,
ifprefix->lifetime.wrap_counter,
current_time)) {
prefix_lifetime_expired(ifprefix);
return true;
}
if (current_time == NET_TIMEOUT_MAX_VALUE) {
ifprefix->lifetime.wrap_counter--;
}
if (ifprefix->lifetime.wrap_counter > 0) {
*next_wakeup = NET_TIMEOUT_MAX_VALUE;
} else {
*next_wakeup = ifprefix->lifetime.timer_timeout;
}
return false;
}
static void prefix_lifetime_timeout(struct k_work *work)
{
u64_t timeout_update = UINT64_MAX;
u32_t current_time = k_uptime_get_32();
bool found = false;
struct net_if_ipv6_prefix *current, *next;
ARG_UNUSED(work);
SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_prefix_lifetime_timers,
current, next, lifetime.node) {
u32_t next_timeout;
bool is_timeout;
is_timeout = prefix_manage_timeout(current, current_time,
&next_timeout);
if (!is_timeout) {
if (next_timeout < timeout_update) {
timeout_update = next_timeout;
found = true;
}
}
if (current == next) {
break;
}
}
if (found) {
/* If we are near upper limit of s32_t timeout, then lower it
* a bit so that kernel timeout will not overflow.
*/
if (timeout_update >= NET_TIMEOUT_MAX_VALUE) {
timeout_update = NET_TIMEOUT_MAX_VALUE;
}
NET_DBG("Waiting for %d ms", (u32_t)timeout_update);
k_delayed_work_submit(&prefix_lifetime_timer, timeout_update);
}
}
static void prefix_submit_work(struct net_if_ipv6_prefix *ifprefix)
{
s32_t remaining;
remaining = k_delayed_work_remaining_get(&prefix_lifetime_timer);
if (!remaining || (ifprefix->lifetime.wrap_counter == 0 &&
ifprefix->lifetime.timer_timeout < remaining)) {
k_delayed_work_cancel(&prefix_lifetime_timer);
if (ifprefix->lifetime.wrap_counter > 0 && remaining == 0) {
k_delayed_work_submit(&prefix_lifetime_timer,
NET_TIMEOUT_MAX_VALUE);
} else {
k_delayed_work_submit(&prefix_lifetime_timer,
ifprefix->lifetime.timer_timeout);
}
NET_DBG("Next wakeup in %d ms",
k_delayed_work_remaining_get(&prefix_lifetime_timer));
}
}
static void prefix_start_timer(struct net_if_ipv6_prefix *ifprefix,
u32_t lifetime)
{
u64_t expire_timeout = K_SECONDS((u64_t)lifetime);
sys_slist_append(&active_prefix_lifetime_timers,
&ifprefix->lifetime.node);
ifprefix->lifetime.timer_start = k_uptime_get_32();
ifprefix->lifetime.wrap_counter = expire_timeout /
(u64_t)NET_TIMEOUT_MAX_VALUE;
ifprefix->lifetime.timer_timeout = expire_timeout -
(u64_t)NET_TIMEOUT_MAX_VALUE *
(u64_t)ifprefix->lifetime.wrap_counter;
prefix_submit_work(ifprefix);
}
static struct net_if_ipv6_prefix *ipv6_prefix_find(struct net_if *iface,
struct in6_addr *prefix,
u8_t prefix_len)
{
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
int i;
if (!ipv6) {
return NULL;
}
for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
if (!ipv6->unicast[i].is_used) {
continue;
}
if (net_ipv6_addr_cmp(prefix, &ipv6->prefix[i].prefix) &&
prefix_len == ipv6->prefix[i].len) {
return &ipv6->prefix[i];
}
}
return NULL;
}
static void net_if_ipv6_prefix_init(struct net_if *iface,
struct net_if_ipv6_prefix *ifprefix,
struct in6_addr *addr, u8_t len,
u32_t lifetime)
{
ifprefix->is_used = true;
ifprefix->len = len;
ifprefix->iface = iface;
net_ipaddr_copy(&ifprefix->prefix, addr);
if (lifetime == NET_IPV6_ND_INFINITE_LIFETIME) {
ifprefix->is_infinite = true;
} else {
ifprefix->is_infinite = false;
}
}
struct net_if_ipv6_prefix *net_if_ipv6_prefix_add(struct net_if *iface,
struct in6_addr *prefix,
u8_t len,
u32_t lifetime)
{
struct net_if_ipv6_prefix *ifprefix;
struct net_if_ipv6 *ipv6;
int i;
if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
return NULL;
}
ifprefix = ipv6_prefix_find(iface, prefix, len);
if (ifprefix) {
return ifprefix;
}
if (!ipv6) {
return NULL;
}
for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
if (ipv6->prefix[i].is_used) {
continue;
}
net_if_ipv6_prefix_init(iface, &ipv6->prefix[i], prefix,
len, lifetime);
NET_DBG("[%d] interface %p prefix %s/%d added", i, iface,
log_strdup(net_sprint_ipv6_addr(prefix)), len);
net_mgmt_event_notify_with_info(
NET_EVENT_IPV6_PREFIX_ADD, iface,
&ipv6->prefix[i].prefix, sizeof(struct in6_addr));
return &ipv6->prefix[i];
}
return NULL;
}
bool net_if_ipv6_prefix_rm(struct net_if *iface, struct in6_addr *addr,
u8_t len)
{
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
int i;
if (!ipv6) {
return false;
}
for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
if (!ipv6->prefix[i].is_used) {
continue;
}
if (!net_ipv6_addr_cmp(&ipv6->prefix[i].prefix, addr) ||
ipv6->prefix[i].len != len) {
continue;
}
net_if_ipv6_prefix_unset_timer(&ipv6->prefix[i]);
ipv6->prefix[i].is_used = false;
/* Remove also all auto addresses if the they have the same
* prefix.
*/
remove_prefix_addresses(iface, ipv6, addr, len);
net_mgmt_event_notify_with_info(
NET_EVENT_IPV6_PREFIX_DEL, iface,
&ipv6->prefix[i].prefix, sizeof(struct in6_addr));
return true;
}
return false;
}
struct net_if_ipv6_prefix *net_if_ipv6_prefix_get(struct net_if *iface,
struct in6_addr *addr)
{
struct net_if_ipv6_prefix *prefix = NULL;
struct net_if_ipv6 *ipv6;
int i;
if (!iface) {
iface = net_if_get_default();
}
ipv6 = iface->config.ip.ipv6;
if (!ipv6) {
return NULL;
}
for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
if (!ipv6->prefix[i].is_used) {
continue;
}
if (net_ipv6_is_prefix(ipv6->prefix[i].prefix.s6_addr,
addr->s6_addr,
ipv6->prefix[i].len)) {
if (!prefix || prefix->len > ipv6->prefix[i].len) {
prefix = &ipv6->prefix[i];
}
}
}
return prefix;
}
struct net_if_ipv6_prefix *net_if_ipv6_prefix_lookup(struct net_if *iface,
struct in6_addr *addr,
u8_t len)
{
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
int i;
if (!ipv6) {
return NULL;
}
for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
if (!ipv6->prefix[i].is_used) {
continue;
}
if (net_ipv6_is_prefix(ipv6->prefix[i].prefix.s6_addr,
addr->s6_addr, len)) {
return &ipv6->prefix[i];
}
}
return NULL;
}
bool net_if_ipv6_addr_onlink(struct net_if **iface, struct in6_addr *addr)
{
struct net_if *tmp;
for (tmp = __net_if_start; tmp != __net_if_end; tmp++) {
struct net_if_ipv6 *ipv6 = tmp->config.ip.ipv6;
int i;
if (iface && *iface && *iface != tmp) {
continue;
}
if (!ipv6) {
continue;
}
for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
if (ipv6->prefix[i].is_used &&
net_ipv6_is_prefix(ipv6->prefix[i].prefix.s6_addr,
addr->s6_addr,
ipv6->prefix[i].len)) {
if (iface) {
*iface = tmp;
}
return true;
}
}
}
return false;
}
void net_if_ipv6_prefix_set_timer(struct net_if_ipv6_prefix *prefix,
u32_t lifetime)
{
/* No need to set a timer for infinite timeout */
if (lifetime == 0xffffffff) {
return;
}
NET_DBG("Prefix lifetime %u sec", lifetime);
prefix_start_timer(prefix, lifetime);
}
void net_if_ipv6_prefix_unset_timer(struct net_if_ipv6_prefix *prefix)
{
if (!prefix->is_used) {
return;
}
prefix_timer_remove(prefix);
}
struct net_if_router *net_if_ipv6_router_lookup(struct net_if *iface,
struct in6_addr *addr)
{
return iface_router_lookup(iface, AF_INET6, addr);
}
struct net_if_router *net_if_ipv6_router_find_default(struct net_if *iface,
struct in6_addr *addr)
{
return iface_router_find_default(iface, AF_INET6, addr);
}
void net_if_ipv6_router_update_lifetime(struct net_if_router *router,
u16_t lifetime)
{
NET_DBG("Updating expire time of %s by %u secs",
log_strdup(net_sprint_ipv6_addr(&router->address.in6_addr)),
lifetime);
router->life_start = k_uptime_get_32();
router->lifetime = lifetime;
iface_router_run_timer(router->life_start);
}
struct net_if_router *net_if_ipv6_router_add(struct net_if *iface,
struct in6_addr *addr,
u16_t lifetime)
{
return iface_router_add(iface, AF_INET6, addr, false, lifetime);
}
bool net_if_ipv6_router_rm(struct net_if_router *router)
{
return iface_router_rm(router);
}
struct in6_addr *net_if_ipv6_get_ll(struct net_if *iface,
enum net_addr_state addr_state)
{
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
int i;
if (!ipv6) {
return NULL;
}
for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
if (!ipv6->unicast[i].is_used ||
(addr_state != NET_ADDR_ANY_STATE &&
ipv6->unicast[i].addr_state != addr_state) ||
ipv6->unicast[i].address.family != AF_INET6) {
continue;
}
if (net_ipv6_is_ll_addr(&ipv6->unicast[i].address.in6_addr)) {
return &ipv6->unicast[i].address.in6_addr;
}
}
return NULL;
}
struct in6_addr *net_if_ipv6_get_ll_addr(enum net_addr_state state,
struct net_if **iface)
{
struct net_if *tmp;
for (tmp = __net_if_start; tmp != __net_if_end; tmp++) {
struct in6_addr *addr;
addr = net_if_ipv6_get_ll(tmp, state);
if (addr) {
if (iface) {
*iface = tmp;
}
return addr;
}
}
return NULL;
}
static inline struct in6_addr *check_global_addr(struct net_if *iface,
enum net_addr_state state)
{
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
int i;
if (!ipv6) {
return NULL;
}
for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
if (!ipv6->unicast[i].is_used ||
(ipv6->unicast[i].addr_state != state) ||
ipv6->unicast[i].address.family != AF_INET6) {
continue;
}
if (!net_ipv6_is_ll_addr(&ipv6->unicast[i].address.in6_addr)) {
return &ipv6->unicast[i].address.in6_addr;
}
}
return NULL;
}
struct in6_addr *net_if_ipv6_get_global_addr(enum net_addr_state state,
struct net_if **iface)
{
struct net_if *tmp;
for (tmp = __net_if_start; tmp != __net_if_end; tmp++) {
struct in6_addr *addr;
if (iface && *iface && tmp != *iface) {
continue;
}
addr = check_global_addr(tmp, state);
if (addr) {
if (iface) {
*iface = tmp;
}
return addr;
}
}
return NULL;
}
static u8_t get_diff_ipv6(const struct in6_addr *src,
const struct in6_addr *dst)
{
return get_ipaddr_diff((const u8_t *)src, (const u8_t *)dst, 16);
}
static inline bool is_proper_ipv6_address(struct net_if_addr *addr)
{
if (addr->is_used && addr->addr_state == NET_ADDR_PREFERRED &&
addr->address.family == AF_INET6 &&
!net_ipv6_is_ll_addr(&addr->address.in6_addr)) {
return true;
}
return false;
}
static struct in6_addr *net_if_ipv6_get_best_match(struct net_if *iface,
const struct in6_addr *dst,
u8_t *best_so_far)
{
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
struct in6_addr *src = NULL;
u8_t len;
int i;
if (!ipv6) {
return NULL;
}
for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
if (!is_proper_ipv6_address(&ipv6->unicast[i])) {
continue;
}
len = get_diff_ipv6(dst, &ipv6->unicast[i].address.in6_addr);
if (len >= *best_so_far) {
/* Mesh local address can only be selected for the same
* subnet.
*/
if (ipv6->unicast[i].is_mesh_local && len < 64) {
continue;
}
*best_so_far = len;
src = &ipv6->unicast[i].address.in6_addr;
}
}
return src;
}
const struct in6_addr *net_if_ipv6_select_src_addr(struct net_if *dst_iface,
const struct in6_addr *dst)
{
struct in6_addr *src = NULL;
u8_t best_match = 0U;
struct net_if *iface;
if (!net_ipv6_is_ll_addr(dst) && !net_ipv6_is_addr_mcast(dst)) {
for (iface = __net_if_start;
!dst_iface && iface != __net_if_end;
iface++) {
struct in6_addr *addr;
addr = net_if_ipv6_get_best_match(iface, dst,
&best_match);
if (addr) {
src = addr;
}
}
/* If caller has supplied interface, then use that */
if (dst_iface) {
src = net_if_ipv6_get_best_match(dst_iface, dst,
&best_match);
}
} else {
for (iface = __net_if_start;
!dst_iface && iface != __net_if_end;
iface++) {
struct in6_addr *addr;
addr = net_if_ipv6_get_ll(iface, NET_ADDR_PREFERRED);
if (addr) {
src = addr;
break;
}
}
if (dst_iface) {
src = net_if_ipv6_get_ll(dst_iface, NET_ADDR_PREFERRED);
}
}
if (!src) {
return net_ipv6_unspecified_address();
}
return src;
}
struct net_if *net_if_ipv6_select_src_iface(const struct in6_addr *dst)
{
const struct in6_addr *src;
struct net_if *iface;
src = net_if_ipv6_select_src_addr(NULL, dst);
if (src == net_ipv6_unspecified_address()) {
return net_if_get_default();
}
if (!net_if_ipv6_addr_lookup(src, &iface)) {
return net_if_get_default();
}
return iface;
}
u32_t net_if_ipv6_calc_reachable_time(struct net_if_ipv6 *ipv6)
{
u32_t min_reachable, max_reachable;
min_reachable = (MIN_RANDOM_NUMER * ipv6->base_reachable_time)
/ MIN_RANDOM_DENOM;
max_reachable = (MAX_RANDOM_NUMER * ipv6->base_reachable_time)
/ MAX_RANDOM_DENOM;
NET_DBG("min_reachable:%u max_reachable:%u", min_reachable,
max_reachable);
return min_reachable +
sys_rand32_get() % (max_reachable - min_reachable);
}
static void iface_ipv6_start(struct net_if *iface)
{
if (IS_ENABLED(CONFIG_NET_IPV6_DAD)) {
net_if_start_dad(iface);
} else {
struct net_if_ipv6 *ipv6 __unused = iface->config.ip.ipv6;
join_mcast_nodes(iface,
&ipv6->mcast[0].address.in6_addr);
}
net_if_start_rs(iface);
}
static void iface_ipv6_init(int if_count)
{
int i;
iface_ipv6_dad_init();
iface_ipv6_nd_init();
k_delayed_work_init(&address_lifetime_timer, address_lifetime_timeout);
k_delayed_work_init(&prefix_lifetime_timer, prefix_lifetime_timeout);
if (if_count > ARRAY_SIZE(ipv6_addresses)) {
NET_WARN("You have %lu IPv6 net_if addresses but %d "
"network interfaces", ARRAY_SIZE(ipv6_addresses),
if_count);
NET_WARN("Consider increasing CONFIG_NET_IF_MAX_IPV6_COUNT "
"value.");
}
for (i = 0; i < ARRAY_SIZE(ipv6_addresses); i++) {
ipv6_addresses[i].ipv6.hop_limit = CONFIG_NET_INITIAL_HOP_LIMIT;
ipv6_addresses[i].ipv6.base_reachable_time = REACHABLE_TIME;
net_if_ipv6_set_reachable_time(&ipv6_addresses[i].ipv6);
}
}
#else
#define join_mcast_allnodes(...)
#define join_mcast_solicit_node(...)
#define leave_mcast_all(...)
#define join_mcast_nodes(...)
#define iface_ipv6_start(...)
#define iface_ipv6_init(...)
#endif /* CONFIG_NET_IPV6 */
#if defined(CONFIG_NET_IPV4)
int net_if_config_ipv4_get(struct net_if *iface, struct net_if_ipv4 **ipv4)
{
int i;
if (iface->config.ip.ipv4) {
if (ipv4) {
*ipv4 = iface->config.ip.ipv4;
}
return 0;
}
for (i = 0; i < ARRAY_SIZE(ipv4_addresses); i++) {
if (ipv4_addresses[i].iface) {
continue;
}
iface->config.ip.ipv4 = &ipv4_addresses[i].ipv4;
ipv4_addresses[i].iface = iface;
if (ipv4) {
*ipv4 = &ipv4_addresses[i].ipv4;
}
return 0;
}
return -ESRCH;
}
int net_if_config_ipv4_put(struct net_if *iface)
{
int i;
if (!iface->config.ip.ipv4) {
return -EALREADY;
}
for (i = 0; i < ARRAY_SIZE(ipv4_addresses); i++) {
if (ipv4_addresses[i].iface != iface) {
continue;
}
iface->config.ip.ipv4 = NULL;
ipv4_addresses[i].iface = NULL;
return 0;
}
return 0;
}
struct net_if_router *net_if_ipv4_router_lookup(struct net_if *iface,
struct in_addr *addr)
{
return iface_router_lookup(iface, AF_INET, addr);
}
struct net_if_router *net_if_ipv4_router_find_default(struct net_if *iface,
struct in_addr *addr)
{
return iface_router_find_default(iface, AF_INET, addr);
}
struct net_if_router *net_if_ipv4_router_add(struct net_if *iface,
struct in_addr *addr,
bool is_default,
u16_t lifetime)
{
return iface_router_add(iface, AF_INET, addr, is_default, lifetime);
}
bool net_if_ipv4_router_rm(struct net_if_router *router)
{
return iface_router_rm(router);
}
bool net_if_ipv4_addr_mask_cmp(struct net_if *iface,
const struct in_addr *addr)
{
struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
u32_t subnet;
int i;
if (!ipv4) {
return false;
}
subnet = UNALIGNED_GET(&addr->s_addr) & ipv4->netmask.s_addr;
for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
if (!ipv4->unicast[i].is_used ||
ipv4->unicast[i].address.family != AF_INET) {
continue;
}
if ((ipv4->unicast[i].address.in_addr.s_addr &
ipv4->netmask.s_addr) == subnet) {
return true;
}
}
return false;
}
static bool ipv4_is_broadcast_address(struct net_if *iface,
const struct in_addr *addr)
{
struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
if (!ipv4) {
return false;
}
if (!net_if_ipv4_addr_mask_cmp(iface, addr)) {
return false;
}
if ((UNALIGNED_GET(&addr->s_addr) & ~ipv4->netmask.s_addr) ==
~ipv4->netmask.s_addr) {
return true;
}
return false;
}
bool net_if_ipv4_is_addr_bcast(struct net_if *iface,
const struct in_addr *addr)
{
if (iface) {
return ipv4_is_broadcast_address(iface, addr);
}
for (iface = __net_if_start; iface != __net_if_end; iface++) {
bool ret;
ret = ipv4_is_broadcast_address(iface, addr);
if (ret) {
return ret;
}
}
return false;
}
struct net_if *net_if_ipv4_select_src_iface(const struct in_addr *dst)
{
struct net_if *iface;
for (iface = __net_if_start; iface != __net_if_end; iface++) {
bool ret;
ret = net_if_ipv4_addr_mask_cmp(iface, dst);
if (ret) {
return iface;
}
}
return net_if_get_default();
}
static u8_t get_diff_ipv4(const struct in_addr *src,
const struct in_addr *dst)
{
return get_ipaddr_diff((const u8_t *)src, (const u8_t *)dst, 4);
}
static inline bool is_proper_ipv4_address(struct net_if_addr *addr)
{
if (addr->is_used && addr->addr_state == NET_ADDR_PREFERRED &&
addr->address.family == AF_INET &&
!net_ipv4_is_ll_addr(&addr->address.in_addr)) {
return true;
}
return false;
}
static struct in_addr *net_if_ipv4_get_best_match(struct net_if *iface,
const struct in_addr *dst,
u8_t *best_so_far)
{
struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
struct in_addr *src = NULL;
u8_t len;
int i;
if (!ipv4) {
return NULL;
}
for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
if (!is_proper_ipv4_address(&ipv4->unicast[i])) {
continue;
}
len = get_diff_ipv4(dst, &ipv4->unicast[i].address.in_addr);
if (len >= *best_so_far) {
*best_so_far = len;
src = &ipv4->unicast[i].address.in_addr;
}
}
return src;
}
static struct in_addr *if_ipv4_get_addr(struct net_if *iface,
enum net_addr_state addr_state, bool ll)
{
struct net_if_ipv4 *ipv4;
int i;
if (!iface) {
return NULL;
}
ipv4 = iface->config.ip.ipv4;
if (!ipv4) {
return NULL;
}
for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
if (!ipv4->unicast[i].is_used ||
(addr_state != NET_ADDR_ANY_STATE &&
ipv4->unicast[i].addr_state != addr_state) ||
ipv4->unicast[i].address.family != AF_INET) {
continue;
}
if (net_ipv4_is_ll_addr(&ipv4->unicast[i].address.in_addr)) {
if (!ll) {
continue;
}
} else {
if (ll) {
continue;
}
}
return &ipv4->unicast[i].address.in_addr;
}
return NULL;
}
struct in_addr *net_if_ipv4_get_ll(struct net_if *iface,
enum net_addr_state addr_state)
{
return if_ipv4_get_addr(iface, addr_state, true);
}
struct in_addr *net_if_ipv4_get_global_addr(struct net_if *iface,
enum net_addr_state addr_state)
{
return if_ipv4_get_addr(iface, addr_state, false);
}
const struct in_addr *net_if_ipv4_select_src_addr(struct net_if *dst_iface,
const struct in_addr *dst)
{
struct in_addr *src = NULL;
u8_t best_match = 0U;
struct net_if *iface;
if (!net_ipv4_is_ll_addr(dst) && !net_ipv4_is_addr_mcast(dst)) {
for (iface = __net_if_start;
!dst_iface && iface != __net_if_end;
iface++) {
struct in_addr *addr;
addr = net_if_ipv4_get_best_match(iface, dst,
&best_match);
if (addr) {
src = addr;
}
}
/* If caller has supplied interface, then use that */
if (dst_iface) {
src = net_if_ipv4_get_best_match(dst_iface, dst,
&best_match);
}
} else {
for (iface = __net_if_start;
!dst_iface && iface != __net_if_end;
iface++) {
struct in_addr *addr;
addr = net_if_ipv4_get_ll(iface, NET_ADDR_PREFERRED);
if (addr) {
src = addr;
break;
}
}
if (dst_iface) {
src = net_if_ipv4_get_ll(dst_iface, NET_ADDR_PREFERRED);
}
}
if (!src) {
src = net_if_ipv4_get_global_addr(dst_iface,
NET_ADDR_PREFERRED);
if (src) {
return src;
}
return net_ipv4_unspecified_address();
}
return src;
}
struct net_if_addr *net_if_ipv4_addr_lookup(const struct in_addr *addr,
struct net_if **ret)
{
struct net_if *iface;
for (iface = __net_if_start; iface != __net_if_end; iface++) {
struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
int i;
if (!ipv4) {
continue;
}
for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
if (!ipv4->unicast[i].is_used ||
ipv4->unicast[i].address.family != AF_INET) {
continue;
}
if (UNALIGNED_GET(&addr->s4_addr32[0]) ==
ipv4->unicast[i].address.in_addr.s_addr) {
if (ret) {
*ret = iface;
}
return &ipv4->unicast[i];
}
}
}
return NULL;
}
int z_impl_net_if_ipv4_addr_lookup_by_index(const struct in_addr *addr)
{
struct net_if_addr *if_addr;
struct net_if *iface = NULL;
if_addr = net_if_ipv4_addr_lookup(addr, &iface);
if (!if_addr) {
return 0;
}
return net_if_get_by_iface(iface);
}
#ifdef CONFIG_USERSPACE
Z_SYSCALL_HANDLER(net_if_ipv4_addr_lookup_by_index, addr)
{
struct in_addr addr_v4;
Z_OOPS(z_user_from_copy(&