rel_1.6.0 init

This commit is contained in:
guocheng.kgc 2020-06-18 20:06:52 +08:00 committed by shengdong.dsd
commit 27b3e2883d
19359 changed files with 8093121 additions and 0 deletions

View file

@ -0,0 +1,88 @@
NAME := bluetooth
$(NAME)_TYPE := kernel
$(NAME)_MBINS_TYPE := kernel
GLOBAL_INCLUDES += include \
include/drivers \
common/include \
port/include
$(NAME)_INCLUDES += common/tinycrypt/include \
../../rhino/core/include
$(NAME)_COMPONENTS += yloop
ifeq ($(bt_mesh), 1)
$(NAME)_COMPONENTS += protocols.bluetooth.host.bt_mesh
$(NAME)_INCLUDES += include/bluetooth/mesh
GLOBAL_DEFINES += CONFIG_BT_MESH
endif
$(NAME)_SOURCES := common/atomic_c.c \
common/buf.c \
common/log.c \
common/poll.c \
host/uuid.c \
host/hci_core.c \
host/conn.c \
host/l2cap.c \
host/att.c \
host/gatt.c \
host/crypto.c \
host/smp.c \
host/keys.c \
common/tinycrypt/source/cmac_mode.c \
common/tinycrypt/source/aes_encrypt.c \
common/rpa.c \
common/work.c \
port/aos_port.c
$(NAME)_SOURCES += host/hci_ecc.c \
common/tinycrypt/source/utils.c \
common/tinycrypt/source/sha256.c \
common/tinycrypt/source/hmac.c \
common/tinycrypt/source/hmac_prng.c \
common/tinycrypt/source/cmac_mode.c \
common/tinycrypt/source/aes_encrypt.c \
common/tinycrypt/source/ecc.c \
common/tinycrypt/source/ecc_dh.c
bt_controller?=0
ifeq ($(bt_controller), 1)
$(NAME)_COMPONENTS += protocols.bluetooth.controller
GLOBAL_DEFINES += CONFIG_BT_CTLR
endif
ifeq ($(hci_h4),1)
$(NAME)_SOURCES += hci_drivers/h4.c
endif
ifeq ($(COMPILER),)
$(NAME)_CFLAGS += -Wall -Werror
else ifeq ($(COMPILER),gcc)
$(NAME)_CFLAGS += -Wall -Werror
endif
GLOBAL_DEFINES += CONFIG_AOS_BLUETOOTH
GLOBAL_DEFINES += CONFIG_BLUETOOTH
GLOBAL_DEFINES += CONFIG_BT_PERIPHERAL
GLOBAL_DEFINES += CONFIG_BT_SMP
GLOBAL_DEFINES += CONFIG_BT_CONN
GLOBAL_DEFINES += CONFIG_BLE_50
## BLE debug log general control macro (Note: still to be affected by DEBUG)
## Enable below macros if BLE stack debug needed
GLOBAL_DEFINES += CONFIG_BT_DEBUG_LOG
GLOBAL_DEFINES += CONFIG_BT_DEBUG
## BLE subsystem debug log control macro
## Enable below macros if component-specific debug needed
#GLOBAL_DEFINES += CONFIG_BT_DEBUG_L2CAP
#GLOBAL_DEFINES += CONFIG_BT_DEBUG_CONN
#GLOBAL_DEFINES += CONFIG_BT_DEBUG_ATT
#GLOBAL_DEFINES += CONFIG_BT_DEBUG_GATT
#GLOBAL_DEFINES += CONFIG_BT_DEBUG_HCI_DRIVER
#GLOBAL_DEFINES += CONFIG_BT_DEBUG_HCI_CORE
#GLOBAL_DEFINES += CONFIG_BT_DEBUG_CORE

View file

@ -0,0 +1,8 @@
zephyr_library()
zephyr_library_sources(dummy.c)
zephyr_library_sources_ifdef(CONFIG_BT_DEBUG log.c)
zephyr_library_sources_ifdef(CONFIG_BT_RPA rpa.c)
zephyr_library_link_libraries(subsys__bluetooth)

View file

@ -0,0 +1,112 @@
# Kconfig - Bluetooth common configuration options
#
# Copyright (c) 2017 Nordic Semiconductor ASA
# Copyright (c) 2016 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
if BT_HCI
config BT_HCI_VS_EXT
bool "Zephyr HCI Vendor-Specific Extensions"
default y
help
Enable support for the Zephyr HCI Vendor-Specific Extensions in the
Host and/or Controller.
config BT_HCI_VS_EXT_DETECT
bool "Use heuristics to guess HCI vendor extensions support in advance"
depends on BT_HCI_VS_EXT && !BT_CTLR
default y if BOARD_QEMU_X86 || BOARD_QEMU_CORTEX_M3
help
Use some heuristics to try to guess in advance whether the controller
supports the HCI vendor extensions in advance, in order to prevent
sending vendor commands to controller which may interpret them in
completely different ways.
config BT_RPA
# Virtual/hidden option
bool
select TINYCRYPT
select TINYCRYPT_AES
default n
config BT_DEBUG
# Virtual/hidden option to make the conditions more intuitive
bool
choice
prompt "Bluetooth debug type"
depends on BT
default BT_DEBUG_NONE
config BT_DEBUG_NONE
bool "No debug log"
help
Select this to disable all Bluetooth debug logs.
config BT_DEBUG_LOG
bool "Normal printf-style to console"
select BT_DEBUG
select PRINTK
select SYS_LOG
help
This option enables Bluetooth debug going to standard
serial console.
config BT_DEBUG_MONITOR
bool "Monitor protocol over UART"
select BT_DEBUG
select PRINTK
select CONSOLE_HAS_DRIVER
help
Use a custom logging protocol over the console UART
instead of plain-text output. Requires a special application
on the host side that can decode this protocol. Currently
the 'btmon' tool from BlueZ is capable of doing this.
If the target board has two or more external UARTs it is
possible to keep using UART_CONSOLE together with this option,
however if there is only a single external UART then
UART_CONSOLE needs to be disabled (in which case printk/printf
will get encoded into the monitor protocol).
endchoice
if BT_DEBUG
config BT_DEBUG_COLOR
bool "Use colored logs"
depends on BT_DEBUG_LOG
select SYS_LOG_SHOW_COLOR
default y
help
Use color in the logs. This requires an ANSI capable terminal.
if !HAS_DTS
config BT_MONITOR_ON_DEV_NAME
string "Device Name of Bluetooth monitor logging UART"
depends on BT_DEBUG_MONITOR
default "UART_0"
help
This option specifies the name of UART device to be used
for the Bluetooth monitor logging.
endif
config BT_DEBUG_HCI_DRIVER
bool "Bluetooth HCI driver debug"
depends on BT_DEBUG
help
This option enables debug support for the active
Bluetooth HCI driver, including the Controller-side HCI layer
when included in the build.
config BT_DEBUG_RPA
bool "Bluetooth Resolvable Private Address (RPA) debug"
depends on BT_RPA
help
This option enables debug support for the Bluetooth
Resolvable Private Address (RPA) generation and resolution.
endif #BT_DEBUG
endif # BT_HCI

View file

@ -0,0 +1,352 @@
/*
* Copyright (c) 2016 Intel Corporation
* Copyright (c) 2011-2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file Atomic ops in pure C
*
* This module provides the atomic operators for processors
* which do not support native atomic operations.
*
* The atomic operations are guaranteed to be atomic with respect
* to interrupt service routines, and to operations performed by peer
* processors.
*
* (originally from x86's atomic.c)
*/
#include <atomic.h>
#include <toolchain.h>
#include <arch/cpu.h>
/**
*
* @brief Atomic compare-and-set primitive
*
* This routine provides the compare-and-set operator. If the original value at
* <target> equals <oldValue>, then <newValue> is stored at <target> and the
* function returns 1.
*
* If the original value at <target> does not equal <oldValue>, then the store
* is not done and the function returns 0.
*
* The reading of the original value at <target>, the comparison,
* and the write of the new value (if it occurs) all happen atomically with
* respect to both interrupts and accesses of other processors to <target>.
*
* @param target address to be tested
* @param old_value value to compare against
* @param new_value value to compare against
* @return Returns 1 if <new_value> is written, 0 otherwise.
*/
int atomic_cas(atomic_t *target, atomic_val_t old_value,
atomic_val_t new_value)
{
unsigned int key;
int ret = 0;
key = irq_lock();
if (*target == old_value) {
*target = new_value;
ret = 1;
}
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic addition primitive
*
* This routine provides the atomic addition operator. The <value> is
* atomically added to the value at <target>, placing the result at <target>,
* and the old value from <target> is returned.
*
* @param target memory location to add to
* @param value the value to add
*
* @return The previous value from <target>
*/
atomic_val_t atomic_add(atomic_t *target, atomic_val_t value)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
*target += value;
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic subtraction primitive
*
* This routine provides the atomic subtraction operator. The <value> is
* atomically subtracted from the value at <target>, placing the result at
* <target>, and the old value from <target> is returned.
*
* @param target the memory location to subtract from
* @param value the value to subtract
*
* @return The previous value from <target>
*/
atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
*target -= value;
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic increment primitive
*
* @param target memory location to increment
*
* This routine provides the atomic increment operator. The value at <target>
* is atomically incremented by 1, and the old value from <target> is returned.
*
* @return The value from <target> before the increment
*/
atomic_val_t atomic_inc(atomic_t *target)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
(*target)++;
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic decrement primitive
*
* @param target memory location to decrement
*
* This routine provides the atomic decrement operator. The value at <target>
* is atomically decremented by 1, and the old value from <target> is returned.
*
* @return The value from <target> prior to the decrement
*/
atomic_val_t atomic_dec(atomic_t *target)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
(*target)--;
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic get primitive
*
* @param target memory location to read from
*
* This routine provides the atomic get primitive to atomically read
* a value from <target>. It simply does an ordinary load. Note that <target>
* is expected to be aligned to a 4-byte boundary.
*
* @return The value read from <target>
*/
atomic_val_t atomic_get(const atomic_t *target)
{
return *target;
}
/**
*
* @brief Atomic get-and-set primitive
*
* This routine provides the atomic set operator. The <value> is atomically
* written at <target> and the previous value at <target> is returned.
*
* @param target the memory location to write to
* @param value the value to write
*
* @return The previous value from <target>
*/
atomic_val_t atomic_set(atomic_t *target, atomic_val_t value)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
*target = value;
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic clear primitive
*
* This routine provides the atomic clear operator. The value of 0 is atomically
* written at <target> and the previous value at <target> is returned. (Hence,
* atomic_clear(pAtomicVar) is equivalent to atomic_set(pAtomicVar, 0).)
*
* @param target the memory location to write
*
* @return The previous value from <target>
*/
atomic_val_t atomic_clear(atomic_t *target)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
*target = 0;
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic bitwise inclusive OR primitive
*
* This routine provides the atomic bitwise inclusive OR operator. The <value>
* is atomically bitwise OR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* @param target the memory location to be modified
* @param value the value to OR
*
* @return The previous value from <target>
*/
atomic_val_t atomic_or(atomic_t *target, atomic_val_t value)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
*target |= value;
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic bitwise exclusive OR (XOR) primitive
*
* This routine provides the atomic bitwise exclusive OR operator. The <value>
* is atomically bitwise XOR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* @param target the memory location to be modified
* @param value the value to XOR
*
* @return The previous value from <target>
*/
atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
*target ^= value;
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic bitwise AND primitive
*
* This routine provides the atomic bitwise AND operator. The <value> is
* atomically bitwise AND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* @param target the memory location to be modified
* @param value the value to AND
*
* @return The previous value from <target>
*/
atomic_val_t atomic_and(atomic_t *target, atomic_val_t value)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
*target &= value;
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic bitwise NAND primitive
*
* This routine provides the atomic bitwise NAND operator. The <value> is
* atomically bitwise NAND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* @param target the memory location to be modified
* @param value the value to NAND
*
* @return The previous value from <target>
*/
atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
*target = ~(*target & value);
irq_unlock(key);
return ret;
}

View file

@ -0,0 +1,687 @@
/* buf.c - Buffer management */
/*
* Copyright (c) 2015 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdio.h>
#include <errno.h>
#include <stddef.h>
#include <string.h>
#include <misc/byteorder.h>
#include <net/buf.h>
#include <bluetooth/l2cap.h>
#if defined(CONFIG_NET_BUF_LOG)
#define SYS_LOG_DOMAIN "net/buf"
#define SYS_LOG_LEVEL CONFIG_SYS_LOG_NET_BUF_LEVEL
#include <logging/sys_log.h>
#define NET_BUF_DBG(fmt, ...) SYS_LOG_DBG("(%p) " fmt, k_current_get(), \
##__VA_ARGS__)
#define NET_BUF_ERR(fmt, ...) SYS_LOG_ERR(fmt, ##__VA_ARGS__)
#define NET_BUF_WARN(fmt, ...) SYS_LOG_WRN(fmt, ##__VA_ARGS__)
#define NET_BUF_INFO(fmt, ...) SYS_LOG_INF(fmt, ##__VA_ARGS__)
#define NET_BUF_ASSERT(cond) do { if (!(cond)) { \
NET_BUF_ERR("assert: '" #cond "' failed"); \
} } while (0)
#else
#define NET_BUF_DBG(fmt, ...)
#define NET_BUF_ERR(fmt, ...)
#define NET_BUF_WARN(fmt, ...)
#define NET_BUF_INFO(fmt, ...)
#define NET_BUF_ASSERT(cond)
#endif /* CONFIG_NET_BUF_LOG */
#if CONFIG_NET_BUF_WARN_ALLOC_INTERVAL > 0
#define WARN_ALLOC_INTERVAL K_SECONDS(CONFIG_NET_BUF_WARN_ALLOC_INTERVAL)
#else
#define WARN_ALLOC_INTERVAL K_FOREVER
#endif
#define CMD_BUF_SIZE BT_BUF_RX_SIZE
struct cmd_data {
u8_t type;
u8_t status;
u16_t opcode;
struct k_sem *sync;
};
static struct {
struct net_buf buf;
u8_t data[CMD_BUF_SIZE] __net_buf_align;
u8_t ud[ROUND_UP(sizeof(struct cmd_data), 4)] __net_buf_align;
} _net_buf_hci_cmd_pool_name[CONFIG_BT_HCI_CMD_COUNT];
static struct {
struct net_buf buf;
u8_t data[BT_BUF_RX_SIZE] __net_buf_align;
u8_t ud[ROUND_UP(BT_BUF_USER_DATA_MIN, 4)] __net_buf_align;
} _net_buf_hci_rx_pool_name[CONFIG_BT_RX_BUF_COUNT];
static struct {
struct net_buf buf;
u8_t data[BT_L2CAP_BUF_SIZE(CONFIG_BT_L2CAP_TX_MTU)] __net_buf_align;
u8_t ud[ROUND_UP(CONFIG_BT_L2CAP_TX_USER_DATA_SIZE, 4)] __net_buf_align;
} _net_buf_acl_tx_pool_name[CONFIG_BT_L2CAP_TX_BUF_COUNT];
#ifdef CONFIG_BT_MESH
#define BT_MESH_ADV_DATA_SIZE 29
#define BT_MESH_ADV_USER_DATA_SIZE 4
static struct {
struct net_buf buf;
u8_t data[BT_MESH_ADV_DATA_SIZE] __net_buf_align;
u8_t ud[ROUND_UP(BT_MESH_ADV_USER_DATA_SIZE, 4)] __net_buf_align;
} _net_buf_adv_buf_pool_name[CONFIG_BT_MESH_ADV_BUF_COUNT];
#endif
struct net_buf_pool hci_cmd_pool __net_buf_align = NET_BUF_POOL_INITIALIZER(hci_cmd_pool, \
_net_buf_hci_cmd_pool_name, CONFIG_BT_HCI_CMD_COUNT, \
CMD_BUF_SIZE, sizeof(struct cmd_data), NULL);
struct net_buf_pool hci_rx_pool __net_buf_align = NET_BUF_POOL_INITIALIZER(hci_rx_pool, \
_net_buf_hci_rx_pool_name, CONFIG_BT_RX_BUF_COUNT, \
BT_BUF_RX_SIZE, BT_BUF_USER_DATA_MIN, NULL);
struct net_buf_pool acl_tx_pool __net_buf_align = NET_BUF_POOL_INITIALIZER(acl_tx_pool, \
_net_buf_acl_tx_pool_name, CONFIG_BT_L2CAP_TX_BUF_COUNT, \
BT_L2CAP_BUF_SIZE(CONFIG_BT_L2CAP_TX_MTU), \
CONFIG_BT_L2CAP_TX_USER_DATA_SIZE, NULL);
#ifdef CONFIG_BT_MESH
struct net_buf_pool adv_buf_pool __net_buf_align = NET_BUF_POOL_INITIALIZER(adv_buf_pool, \
_net_buf_adv_buf_pool_name, CONFIG_BT_MESH_ADV_BUF_COUNT, \
BT_MESH_ADV_DATA_SIZE, BT_MESH_ADV_USER_DATA_SIZE, NULL);
struct net_buf_pool *net_buf_pool_list[] = { &hci_cmd_pool, &hci_rx_pool, &acl_tx_pool, &adv_buf_pool };
#else
struct net_buf_pool *net_buf_pool_list[] = { &hci_cmd_pool, &hci_rx_pool, &acl_tx_pool };
#endif
struct net_buf_pool *net_buf_pool_get(int id)
{
return net_buf_pool_list[id];
}
static int pool_id(struct net_buf_pool *pool)
{
int index;
for (index = 0; index < (sizeof(net_buf_pool_list) / 4); index++) {
if (net_buf_pool_list[index] == pool) {
break;
}
}
assert(index < (sizeof(net_buf_pool_list) / 4));
return index;
}
/* Helpers to access the storage array, since we don't have access to its
* type at this point anymore.
*/
#define BUF_SIZE(pool) (sizeof(struct net_buf) + \
ROUND_UP(pool->buf_size, 4) + \
ROUND_UP(pool->user_data_size, 4))
#define UNINIT_BUF(pool, n) (struct net_buf *)(((u8_t *)(pool->__bufs)) + \
((n) * BUF_SIZE(pool)))
int net_buf_id(struct net_buf *buf)
{
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
u8_t *pool_start = (u8_t *)pool->__bufs;
u8_t *buf_ptr = (u8_t *)buf;
return (buf_ptr - pool_start) / BUF_SIZE(pool);
}
static inline struct net_buf *pool_get_uninit(struct net_buf_pool *pool,
u16_t uninit_count)
{
struct net_buf *buf;
buf = UNINIT_BUF(pool, pool->buf_count - uninit_count);
buf->pool_id = pool_id(pool);
buf->size = pool->buf_size;
return buf;
}
void net_buf_reset(struct net_buf *buf)
{
NET_BUF_ASSERT(buf->flags == 0);
NET_BUF_ASSERT(buf->frags == NULL);
buf->len = 0;
buf->data = buf->__buf;
}
#if defined(CONFIG_NET_BUF_LOG)
struct net_buf *net_buf_alloc_debug(struct net_buf_pool *pool, s32_t timeout,
const char *func, int line)
#else
struct net_buf *net_buf_alloc(struct net_buf_pool *pool, s32_t timeout)
#endif
{
struct net_buf *buf;
unsigned int key;
NET_BUF_ASSERT(pool);
NET_BUF_DBG("%s():%d: pool %p timeout %d", func, line, pool, timeout);
/* We need to lock interrupts temporarily to prevent race conditions
* when accessing pool->uninit_count.
*/
key = irq_lock();
/* If there are uninitialized buffers we're guaranteed to succeed
* with the allocation one way or another.
*/
if (pool->uninit_count) {
u16_t uninit_count;
/* If this is not the first access to the pool, we can
* be opportunistic and try to fetch a previously used
* buffer from the LIFO with K_NO_WAIT.
*/
if (pool->uninit_count < pool->buf_count) {
buf = k_lifo_get(&pool->free, K_NO_WAIT);
if (buf) {
irq_unlock(key);
goto success;
}
}
uninit_count = pool->uninit_count--;
irq_unlock(key);
buf = pool_get_uninit(pool, uninit_count);
goto success;
}
irq_unlock(key);
#if defined(CONFIG_NET_BUF_LOG) && SYS_LOG_LEVEL >= SYS_LOG_LEVEL_WARNING
if (timeout == K_FOREVER) {
u32_t ref = k_uptime_get_32();
buf = k_lifo_get(&pool->free, K_NO_WAIT);
while (!buf) {
#if defined(CONFIG_NET_BUF_POOL_USAGE)
NET_BUF_WARN("%s():%d: Pool %s low on buffers.",
func, line, pool->name);
#else
NET_BUF_WARN("%s():%d: Pool %p low on buffers.",
func, line, pool);
#endif
buf = k_lifo_get(&pool->free, WARN_ALLOC_INTERVAL);
#if defined(CONFIG_NET_BUF_POOL_USAGE)
NET_BUF_WARN("%s():%d: Pool %s blocked for %u secs",
func, line, pool->name,
(k_uptime_get_32() - ref) / MSEC_PER_SEC);
#else
NET_BUF_WARN("%s():%d: Pool %p blocked for %u secs",
func, line, pool,
(k_uptime_get_32() - ref) / MSEC_PER_SEC);
#endif
}
} else {
buf = k_lifo_get(&pool->free, timeout);
}
#else
buf = k_lifo_get(&pool->free, timeout);
#endif
if (!buf) {
NET_BUF_ERR("%s():%d: Failed to get free buffer", func, line);
return NULL;
}
success:
NET_BUF_DBG("allocated buf %p", buf);
buf->ref = 1;
buf->flags = 0;
buf->frags = NULL;
net_buf_reset(buf);
#if defined(CONFIG_NET_BUF_POOL_USAGE)
pool->avail_count--;
NET_BUF_ASSERT(pool->avail_count >= 0);
#endif
return buf;
}
#if defined(CONFIG_NET_BUF_LOG)
struct net_buf *net_buf_get_debug(struct k_fifo *fifo, s32_t timeout,
const char *func, int line)
#else
struct net_buf *net_buf_get(struct k_fifo *fifo, s32_t timeout)
#endif
{
struct net_buf *buf, *frag;
NET_BUF_DBG("%s():%d: fifo %p timeout %d", func, line, fifo, timeout);
buf = k_fifo_get(fifo, timeout);
if (!buf) {
return NULL;
}
NET_BUF_DBG("%s():%d: buf %p fifo %p", func, line, buf, fifo);
/* Get any fragments belonging to this buffer */
for (frag = buf; (frag->flags & NET_BUF_FRAGS); frag = frag->frags) {
frag->frags = k_fifo_get(fifo, K_NO_WAIT);
NET_BUF_ASSERT(frag->frags);
/* The fragments flag is only for FIFO-internal usage */
frag->flags &= ~NET_BUF_FRAGS;
}
/* Mark the end of the fragment list */
frag->frags = NULL;
return buf;
}
void net_buf_reserve(struct net_buf *buf, size_t reserve)
{
NET_BUF_ASSERT(buf);
NET_BUF_ASSERT(buf->len == 0);
NET_BUF_DBG("buf %p reserve %zu", buf, reserve);
buf->data = buf->__buf + reserve;
}
void net_buf_slist_put(sys_slist_t *list, struct net_buf *buf)
{
struct net_buf *tail;
unsigned int key;
NET_BUF_ASSERT(list);
NET_BUF_ASSERT(buf);
for (tail = buf; tail->frags; tail = tail->frags) {
tail->flags |= NET_BUF_FRAGS;
}
key = irq_lock();
sys_slist_append_list(list, &buf->node, &tail->node);
irq_unlock(key);
}
struct net_buf *net_buf_slist_get(sys_slist_t *list)
{
struct net_buf *buf, *frag;
unsigned int key;
NET_BUF_ASSERT(list);
key = irq_lock();
buf = (void *)sys_slist_get(list);
irq_unlock(key);
if (!buf) {
return NULL;
}
/* Get any fragments belonging to this buffer */
for (frag = buf; (frag->flags & NET_BUF_FRAGS); frag = frag->frags) {
key = irq_lock();
frag->frags = (void *)sys_slist_get(list);
irq_unlock(key);
NET_BUF_ASSERT(frag->frags);
/* The fragments flag is only for list-internal usage */
frag->flags &= ~NET_BUF_FRAGS;
}
/* Mark the end of the fragment list */
frag->frags = NULL;
return buf;
}
void net_buf_put(struct k_fifo *fifo, struct net_buf *buf)
{
struct net_buf *tail;
NET_BUF_ASSERT(fifo);
NET_BUF_ASSERT(buf);
for (tail = buf; tail->frags; tail = tail->frags) {
tail->flags |= NET_BUF_FRAGS;
}
k_fifo_put_list(fifo, buf, tail);
}
#if defined(CONFIG_NET_BUF_LOG)
void net_buf_unref_debug(struct net_buf *buf, const char *func, int line)
#else
void net_buf_unref(struct net_buf *buf)
#endif
{
NET_BUF_ASSERT(buf);
while (buf) {
struct net_buf *frags = buf->frags;
struct net_buf_pool *pool;
#if defined(CONFIG_NET_BUF_LOG)
if (!buf->ref) {
NET_BUF_ERR("%s():%d: buf %p double free", func, line,
buf);
return;
}
#endif
NET_BUF_DBG("buf %p ref %u pool_id %u frags %p", buf, buf->ref,
buf->pool_id, buf->frags);
if (--buf->ref > 0) {
return;
}
buf->frags = NULL;
pool = net_buf_pool_get(buf->pool_id);
#if defined(CONFIG_NET_BUF_POOL_USAGE)
pool->avail_count++;
NET_BUF_ASSERT(pool->avail_count <= pool->buf_count);
#endif
if (pool->destroy) {
pool->destroy(buf);
} else {
net_buf_destroy(buf);
}
buf = frags;
}
}
struct net_buf *net_buf_ref(struct net_buf *buf)
{
NET_BUF_ASSERT(buf);
NET_BUF_DBG("buf %p (old) ref %u pool_id %u",
buf, buf->ref, buf->pool_id);
buf->ref++;
return buf;
}
struct net_buf *net_buf_clone(struct net_buf *buf, s32_t timeout)
{
struct net_buf_pool *pool;
struct net_buf *clone;
NET_BUF_ASSERT(buf);
pool = net_buf_pool_get(buf->pool_id);
clone = net_buf_alloc(pool, timeout);
if (!clone) {
return NULL;
}
net_buf_reserve(clone, net_buf_headroom(buf));
/* TODO: Add reference to the original buffer instead of copying it. */
memcpy(net_buf_add(clone, buf->len), buf->data, buf->len);
return clone;
}
struct net_buf *net_buf_frag_last(struct net_buf *buf)
{
NET_BUF_ASSERT(buf);
while (buf->frags) {
buf = buf->frags;
}
return buf;
}
void net_buf_frag_insert(struct net_buf *parent, struct net_buf *frag)
{
NET_BUF_ASSERT(parent);
NET_BUF_ASSERT(frag);
if (parent->frags) {
net_buf_frag_last(frag)->frags = parent->frags;
}
/* Take ownership of the fragment reference */
parent->frags = frag;
}
struct net_buf *net_buf_frag_add(struct net_buf *head, struct net_buf *frag)
{
NET_BUF_ASSERT(frag);
if (!head) {
return net_buf_ref(frag);
}
net_buf_frag_insert(net_buf_frag_last(head), frag);
return head;
}
#if defined(CONFIG_NET_BUF_LOG)
struct net_buf *net_buf_frag_del_debug(struct net_buf *parent,
struct net_buf *frag,
const char *func, int line)
#else
struct net_buf *net_buf_frag_del(struct net_buf *parent, struct net_buf *frag)
#endif
{
struct net_buf *next_frag;
NET_BUF_ASSERT(frag);
if (parent) {
NET_BUF_ASSERT(parent->frags);
NET_BUF_ASSERT(parent->frags == frag);
parent->frags = frag->frags;
}
next_frag = frag->frags;
frag->frags = NULL;
#if defined(CONFIG_NET_BUF_LOG)
net_buf_unref_debug(frag, func, line);
#else
net_buf_unref(frag);
#endif
return next_frag;
}
#if defined(CONFIG_NET_BUF_SIMPLE_LOG)
#define NET_BUF_SIMPLE_DBG(fmt, ...) NET_BUF_DBG(fmt, ##__VA_ARGS__)
#define NET_BUF_SIMPLE_ERR(fmt, ...) NET_BUF_ERR(fmt, ##__VA_ARGS__)
#define NET_BUF_SIMPLE_WARN(fmt, ...) NET_BUF_WARN(fmt, ##__VA_ARGS__)
#define NET_BUF_SIMPLE_INFO(fmt, ...) NET_BUF_INFO(fmt, ##__VA_ARGS__)
#define NET_BUF_SIMPLE_ASSERT(cond) NET_BUF_ASSERT(cond)
#else
#define NET_BUF_SIMPLE_DBG(fmt, ...)
#define NET_BUF_SIMPLE_ERR(fmt, ...)
#define NET_BUF_SIMPLE_WARN(fmt, ...)
#define NET_BUF_SIMPLE_INFO(fmt, ...)
#define NET_BUF_SIMPLE_ASSERT(cond)
#endif /* CONFIG_NET_BUF_SIMPLE_LOG */
void *net_buf_simple_add(struct net_buf_simple *buf, size_t len)
{
u8_t *tail = net_buf_simple_tail(buf);
NET_BUF_SIMPLE_DBG("buf %p len %zu", buf, len);
NET_BUF_SIMPLE_ASSERT(net_buf_simple_tailroom(buf) >= len);
buf->len += len;
return tail;
}
void *net_buf_simple_add_mem(struct net_buf_simple *buf, const void *mem,
size_t len)
{
NET_BUF_SIMPLE_DBG("buf %p len %zu", buf, len);
return memcpy(net_buf_simple_add(buf, len), mem, len);
}
u8_t *net_buf_simple_add_u8(struct net_buf_simple *buf, u8_t val)
{
u8_t *u8;
NET_BUF_SIMPLE_DBG("buf %p val 0x%02x", buf, val);
u8 = net_buf_simple_add(buf, 1);
*u8 = val;
return u8;
}
void net_buf_simple_add_le16(struct net_buf_simple *buf, u16_t val)
{
NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
val = sys_cpu_to_le16(val);
memcpy(net_buf_simple_add(buf, sizeof(val)), &val, sizeof(val));
}
void net_buf_simple_add_be16(struct net_buf_simple *buf, u16_t val)
{
NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
val = sys_cpu_to_be16(val);
memcpy(net_buf_simple_add(buf, sizeof(val)), &val, sizeof(val));
}
void net_buf_simple_add_le32(struct net_buf_simple *buf, u32_t val)
{
NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
val = sys_cpu_to_le32(val);
memcpy(net_buf_simple_add(buf, sizeof(val)), &val, sizeof(val));
}
void net_buf_simple_add_be32(struct net_buf_simple *buf, u32_t val)
{
NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
val = sys_cpu_to_be32(val);
memcpy(net_buf_simple_add(buf, sizeof(val)), &val, sizeof(val));
}
void *net_buf_simple_push(struct net_buf_simple *buf, size_t len)
{
NET_BUF_SIMPLE_DBG("buf %p len %zu", buf, len);
NET_BUF_SIMPLE_ASSERT(net_buf_simple_headroom(buf) >= len);
buf->data -= len;
buf->len += len;
return buf->data;
}
void net_buf_simple_push_le16(struct net_buf_simple *buf, u16_t val)
{
NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
val = sys_cpu_to_le16(val);
memcpy(net_buf_simple_push(buf, sizeof(val)), &val, sizeof(val));
}
void net_buf_simple_push_be16(struct net_buf_simple *buf, u16_t val)
{
NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
val = sys_cpu_to_be16(val);
memcpy(net_buf_simple_push(buf, sizeof(val)), &val, sizeof(val));
}
void net_buf_simple_push_u8(struct net_buf_simple *buf, u8_t val)
{
u8_t *data = net_buf_simple_push(buf, 1);
*data = val;
}
void *net_buf_simple_pull(struct net_buf_simple *buf, size_t len)
{
NET_BUF_SIMPLE_DBG("buf %p len %zu", buf, len);
NET_BUF_SIMPLE_ASSERT(buf->len >= len);
buf->len -= len;
return buf->data += len;
}
u8_t net_buf_simple_pull_u8(struct net_buf_simple *buf)
{
u8_t val;
val = buf->data[0];
net_buf_simple_pull(buf, 1);
return val;
}
u16_t net_buf_simple_pull_le16(struct net_buf_simple *buf)
{
u16_t val;
val = UNALIGNED_GET((u16_t *)buf->data);
net_buf_simple_pull(buf, sizeof(val));
return sys_le16_to_cpu(val);
}
u16_t net_buf_simple_pull_be16(struct net_buf_simple *buf)
{
u16_t val;
val = UNALIGNED_GET((u16_t *)buf->data);
net_buf_simple_pull(buf, sizeof(val));
return sys_be16_to_cpu(val);
}
u32_t net_buf_simple_pull_le32(struct net_buf_simple *buf)
{
u32_t val;
val = UNALIGNED_GET((u32_t *)buf->data);
net_buf_simple_pull(buf, sizeof(val));
return sys_le32_to_cpu(val);
}
u32_t net_buf_simple_pull_be32(struct net_buf_simple *buf)
{
u32_t val;
val = UNALIGNED_GET((u32_t *)buf->data);
net_buf_simple_pull(buf, sizeof(val));
return sys_be32_to_cpu(val);
}
size_t net_buf_simple_headroom(struct net_buf_simple *buf)
{
return buf->data - buf->__buf;
}
size_t net_buf_simple_tailroom(struct net_buf_simple *buf)
{
return buf->size - net_buf_simple_headroom(buf) - buf->len;
}

View file

@ -0,0 +1,35 @@
/**
* @file dummy.c
* Static compilation checks.
*/
/*
* Copyright (c) 2017 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <config.h>
/* The Bluetooth subsystem requires the system workqueue to execute at a
* cooperative priority to function correctly. If this build assert triggers
* verify your configuration to ensure that cooperative threads are enabled
* and that the system workqueue priority is negative (cooperative).
*/
//BUILD_ASSERT(CONFIG_SYSTEM_WORKQUEUE_PRIORITY < 0);
/* The Bluetooth subsystem requires the Tx thread to execute at higher priority
* than the Rx thread as the Tx thread needs to process the acknowledgements
* before new Rx data is processed. This is a necessity to correctly detect
* transaction violations in ATT and SMP protocols.
*/
//BUILD_ASSERT(CONFIG_BT_HCI_TX_PRIO < CONFIG_BT_RX_PRIO);
#if defined(CONFIG_BT_CTLR)
/* The Bluetooth Controller's priority receive thread priority shall be higher
* than the Bluetooth Host's Tx and the Controller's receive thread priority.
* This is required in order to dispatch Number of Completed Packets event
* before any new data arrives on a connection to the Host threads.
*/
BUILD_ASSERT(CONFIG_BT_CTLR_RX_PRIO < CONFIG_BT_HCI_TX_PRIO);
#endif /* CONFIG_BT_CTLR */

View file

@ -0,0 +1,15 @@
/* cpu.h - automatically selects the correct arch.h file to include */
/*
* Copyright (c) 1997-2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef __ARCHCPU_H__
#define __ARCHCPU_H__
unsigned int irq_lock(void);
void irq_unlock(unsigned int key);
#endif /* __ARCHCPU_H__ */

View file

@ -0,0 +1,420 @@
/* atomic operations */
/*
* Copyright (c) 1997-2015, Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef __ATOMIC_H__
#define __ATOMIC_H__
#ifdef __cplusplus
extern "C" {
#endif
typedef int atomic_t;
typedef atomic_t atomic_val_t;
/**
* @defgroup atomic_apis Atomic Services APIs
* @ingroup kernel_apis
* @{
*/
/**
* @brief Atomic compare-and-set.
*
* This routine performs an atomic compare-and-set on @a target. If the current
* value of @a target equals @a old_value, @a target is set to @a new_value.
* If the current value of @a target does not equal @a old_value, @a target
* is left unchanged.
*
* @param target Address of atomic variable.
* @param old_value Original value to compare against.
* @param new_value New value to store.
* @return 1 if @a new_value is written, 0 otherwise.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline int atomic_cas(atomic_t *target, atomic_val_t old_value,
atomic_val_t new_value)
{
return __atomic_compare_exchange_n(target, &old_value, new_value,
0, __ATOMIC_SEQ_CST,
__ATOMIC_SEQ_CST);
}
#else
extern int atomic_cas(atomic_t *target, atomic_val_t old_value,
atomic_val_t new_value);
#endif
/**
*
* @brief Atomic addition.
*
* This routine performs an atomic addition on @a target.
*
* @param target Address of atomic variable.
* @param value Value to add.
*
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_add(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_add(target, value, __ATOMIC_SEQ_CST);
}
#else
extern atomic_val_t atomic_add(atomic_t *target, atomic_val_t value);
#endif
/**
*
* @brief Atomic subtraction.
*
* This routine performs an atomic subtraction on @a target.
*
* @param target Address of atomic variable.
* @param value Value to subtract.
*
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_sub(target, value, __ATOMIC_SEQ_CST);
}
#else
extern atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value);
#endif
/**
*
* @brief Atomic increment.
*
* This routine performs an atomic increment by 1 on @a target.
*
* @param target Address of atomic variable.
*
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_inc(atomic_t *target)
{
return atomic_add(target, 1);
}
#else
extern atomic_val_t atomic_inc(atomic_t *target);
#endif
/**
*
* @brief Atomic decrement.
*
* This routine performs an atomic decrement by 1 on @a target.
*
* @param target Address of atomic variable.
*
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_dec(atomic_t *target)
{
return atomic_sub(target, 1);
}
#else
extern atomic_val_t atomic_dec(atomic_t *target);
#endif
/**
*
* @brief Atomic get.
*
* This routine performs an atomic read on @a target.
*
* @param target Address of atomic variable.
*
* @return Value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_get(const atomic_t *target)
{
return __atomic_load_n(target, __ATOMIC_SEQ_CST);
}
#else
extern atomic_val_t atomic_get(const atomic_t *target);
#endif
/**
*
* @brief Atomic get-and-set.
*
* This routine atomically sets @a target to @a value and returns
* the previous value of @a target.
*
* @param target Address of atomic variable.
* @param value Value to write to @a target.
*
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_set(atomic_t *target, atomic_val_t value)
{
/* This builtin, as described by Intel, is not a traditional
* test-and-set operation, but rather an atomic exchange operation. It
* writes value into *ptr, and returns the previous contents of *ptr.
*/
return __atomic_exchange_n(target, value, __ATOMIC_SEQ_CST);
}
#else
extern atomic_val_t atomic_set(atomic_t *target, atomic_val_t value);
#endif
/**
*
* @brief Atomic clear.
*
* This routine atomically sets @a target to zero and returns its previous
* value. (Hence, it is equivalent to atomic_set(target, 0).)
*
* @param target Address of atomic variable.
*
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_clear(atomic_t *target)
{
return atomic_set(target, 0);
}
#else
extern atomic_val_t atomic_clear(atomic_t *target);
#endif
/**
*
* @brief Atomic bitwise inclusive OR.
*
* This routine atomically sets @a target to the bitwise inclusive OR of
* @a target and @a value.
*
* @param target Address of atomic variable.
* @param value Value to OR.
*
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_or(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_or(target, value, __ATOMIC_SEQ_CST);
}
#else
extern atomic_val_t atomic_or(atomic_t *target, atomic_val_t value);
#endif
/**
*
* @brief Atomic bitwise exclusive OR (XOR).
*
* This routine atomically sets @a target to the bitwise exclusive OR (XOR) of
* @a target and @a value.
*
* @param target Address of atomic variable.
* @param value Value to XOR
*
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_xor(target, value, __ATOMIC_SEQ_CST);
}
#else
extern atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value);
#endif
/**
*
* @brief Atomic bitwise AND.
*
* This routine atomically sets @a target to the bitwise AND of @a target
* and @a value.
*
* @param target Address of atomic variable.
* @param value Value to AND.
*
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_and(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_and(target, value, __ATOMIC_SEQ_CST);
}
#else
extern atomic_val_t atomic_and(atomic_t *target, atomic_val_t value);
#endif
/**
*
* @brief Atomic bitwise NAND.
*
* This routine atomically sets @a target to the bitwise NAND of @a target
* and @a value. (This operation is equivalent to target = ~(target & value).)
*
* @param target Address of atomic variable.
* @param value Value to NAND.
*
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_nand(target, value, __ATOMIC_SEQ_CST);
}
#else
extern atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value);
#endif
/**
* @brief Initialize an atomic variable.
*
* This macro can be used to initialize an atomic variable. For example,
* @code atomic_t my_var = ATOMIC_INIT(75); @endcode
*
* @param i Value to assign to atomic variable.
*/
#define ATOMIC_INIT(i) (i)
/**
* @cond INTERNAL_HIDDEN
*/
#define ATOMIC_BITS (sizeof(atomic_val_t) * 8)
#define ATOMIC_MASK(bit) (1 << ((bit) & (ATOMIC_BITS - 1)))
#define ATOMIC_ELEM(addr, bit) ((addr) + ((bit) / ATOMIC_BITS))
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @brief Define an array of atomic variables.
*
* This macro defines an array of atomic variables containing at least
* @a num_bits bits.
*
* @note
* If used from file scope, the bits of the array are initialized to zero;
* if used from within a function, the bits are left uninitialized.
*
* @param name Name of array of atomic variables.
* @param num_bits Number of bits needed.
*/
#define ATOMIC_DEFINE(name, num_bits) \
atomic_t name[1 + ((num_bits) - 1) / ATOMIC_BITS]
/**
* @brief Atomically test a bit.
*
* This routine tests whether bit number @a bit of @a target is set or not.
* The target may be a single atomic variable or an array of them.
*
* @param target Address of atomic variable or array.
* @param bit Bit number (starting from 0).
*
* @return 1 if the bit was set, 0 if it wasn't.
*/
static inline int atomic_test_bit(const atomic_t *target, int bit)
{
atomic_val_t val = atomic_get(ATOMIC_ELEM(target, bit));
return (1 & (val >> (bit & (ATOMIC_BITS - 1))));
}
/**
* @brief Atomically test and clear a bit.
*
* Atomically clear bit number @a bit of @a target and return its old value.
* The target may be a single atomic variable or an array of them.
*
* @param target Address of atomic variable or array.
* @param bit Bit number (starting from 0).
*
* @return 1 if the bit was set, 0 if it wasn't.
*/
static inline int atomic_test_and_clear_bit(atomic_t *target, int bit)
{
atomic_val_t mask = ATOMIC_MASK(bit);
atomic_val_t old;
old = atomic_and(ATOMIC_ELEM(target, bit), ~mask);
return (old & mask) != 0;
}
/**
* @brief Atomically set a bit.
*
* Atomically set bit number @a bit of @a target and return its old value.
* The target may be a single atomic variable or an array of them.
*
* @param target Address of atomic variable or array.
* @param bit Bit number (starting from 0).
*
* @return 1 if the bit was set, 0 if it wasn't.
*/
static inline int atomic_test_and_set_bit(atomic_t *target, int bit)
{
atomic_val_t mask = ATOMIC_MASK(bit);
atomic_val_t old;
old = atomic_or(ATOMIC_ELEM(target, bit), mask);
return (old & mask) != 0;
}
/**
* @brief Atomically clear a bit.
*
* Atomically clear bit number @a bit of @a target.
* The target may be a single atomic variable or an array of them.
*
* @param target Address of atomic variable or array.
* @param bit Bit number (starting from 0).
*
* @return N/A
*/
static inline void atomic_clear_bit(atomic_t *target, int bit)
{
atomic_val_t mask = ATOMIC_MASK(bit);
atomic_and(ATOMIC_ELEM(target, bit), ~mask);
}
/**
* @brief Atomically set a bit.
*
* Atomically set bit number @a bit of @a target.
* The target may be a single atomic variable or an array of them.
*
* @param target Address of atomic variable or array.
* @param bit Bit number (starting from 0).
*
* @return N/A
*/
static inline void atomic_set_bit(atomic_t *target, int bit)
{
atomic_val_t mask = ATOMIC_MASK(bit);
atomic_or(ATOMIC_ELEM(target, bit), mask);
}
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* __ATOMIC_H__ */

View file

@ -0,0 +1,109 @@
/** @file
* @brief Bluetooth subsystem logging helpers.
*/
/*
* Copyright (c) 2017 Nordic Semiconductor ASA
* Copyright (c) 2015-2016 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef __BT_LOG_H
#define __BT_LOG_H
#include <zephyr.h>
#include <bluetooth/bluetooth.h>
#include <bluetooth/hci.h>
#ifdef __cplusplus
extern "C" {
#endif
#if !defined(BT_DBG_ENABLED)
#define BT_DBG_ENABLED 1
#endif
#if defined(CONFIG_BT_DEBUG_MONITOR)
#include <stdio.h>
/* These defines follow the values used by syslog(2) */
#define BT_LOG_ERR 3
#define BT_LOG_WARN 4
#define BT_LOG_INFO 6
#define BT_LOG_DBG 7
__printf_like(2, 3) void bt_log(int prio, const char *fmt, ...);
#define BT_DBG(fmt, ...) \
if (BT_DBG_ENABLED) { \
bt_log(BT_LOG_DBG, "%s (%p): " fmt, \
__func__, k_current_get(), ##__VA_ARGS__); \
}
#define BT_ERR(fmt, ...) bt_log(BT_LOG_ERR, "%s: " fmt, \
__func__, ##__VA_ARGS__)
#define BT_WARN(fmt, ...) bt_log(BT_LOG_WARN, "%s: " fmt, \
__func__, ##__VA_ARGS__)
#define BT_INFO(fmt, ...) bt_log(BT_LOG_INFO, fmt, ##__VA_ARGS__)
/* Enabling debug increases stack size requirement */
#define BT_STACK_DEBUG_EXTRA 300
#elif defined(CONFIG_BT_DEBUG_LOG)
#if !defined(SYS_LOG_DOMAIN)
#define SYS_LOG_DOMAIN "bt"
#endif
#define SYS_LOG_LEVEL SYS_LOG_LEVEL_DEBUG
#define BT_DBG(fmt, ...) \
if (BT_DBG_ENABLED) { \
SYS_LOG_DBG(fmt,\
##__VA_ARGS__); \
}
#define BT_ERR(fmt, ...) SYS_LOG_ERR(fmt, ##__VA_ARGS__)
#define BT_WARN(fmt, ...) SYS_LOG_WRN(fmt, ##__VA_ARGS__)
#define BT_INFO(fmt, ...) SYS_LOG_INF(fmt, ##__VA_ARGS__)
/* Enabling debug increases stack size requirement considerably */
#define BT_STACK_DEBUG_EXTRA 300
#else
#define BT_DBG(fmt, ...)
#define BT_ERR BT_DBG
#define BT_WARN BT_DBG
#define BT_INFO BT_DBG
#define BT_STACK_DEBUG_EXTRA 0
#endif
#define BT_ASSERT(cond) if (!(cond)) { \
BT_ERR("assert: '" #cond "' failed"); \
k_oops(); \
}
#define BT_STACK(name, size) \
K_THREAD_STACK_MEMBER(name, (size) + BT_STACK_DEBUG_EXTRA)
#define BT_STACK_NOINIT(name, size) \
K_THREAD_STACK_DEFINE(name, (size) + BT_STACK_DEBUG_EXTRA)
/* This helper is only available when BT_DEBUG is enabled */
const char *bt_hex(const void *buf, size_t len);
/* These helpers are only safe to be called from internal threads as they're
* not multi-threading safe
*/
const char *bt_addr_str(const bt_addr_t *addr);
const char *bt_addr_le_str(const bt_addr_le_t *addr);
#ifdef __cplusplus
}
#endif
#endif /* __BT_LOG_H */

View file

@ -0,0 +1,16 @@
/* rpa.h - Bluetooth Resolvable Private Addresses (RPA) generation and
* resolution
*/
/*
* Copyright (c) 2017 Nordic Semiconductor ASA
* Copyright (c) 2015-2016 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <bluetooth/bluetooth.h>
#include <bluetooth/hci.h>
bool bt_rpa_irk_matches(const u8_t irk[16], const bt_addr_t *addr);
int bt_rpa_create(const u8_t irk[16], bt_addr_t *rpa);

View file

@ -0,0 +1,133 @@
/* errno.h - errno numbers */
/*
* Copyright (c) 1984-1999, 2012 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/*
* Copyright (c) 1982, 1986 Regents of the University of California.
* All rights reserved. The Berkeley software License Agreement
* specifies the terms and conditions for redistribution.
*
* @(#)errno.h 7.1 (Berkeley) 6/4/86
*/
#ifndef __INCerrnoh
#define __INCerrnoh
#ifdef __cplusplus
extern "C" {
#endif
extern int *__errno(void);
#define errno (*__errno())
/*
* POSIX Error codes
*/
#define EPERM 1 /* Not owner */
#define ENOENT 2 /* No such file or directory */
#define ESRCH 3 /* No such context */
#define EINTR 4 /* Interrupted system call */
#define EIO 5 /* I/O error */
#define ENXIO 6 /* No such device or address */
#define E2BIG 7 /* Arg list too long */
#define ENOEXEC 8 /* Exec format error */
#define EBADF 9 /* Bad file number */
#define ECHILD 10 /* No children */
#define EAGAIN 11 /* No more contexts */
#define ENOMEM 12 /* Not enough core */
#define EACCES 13 /* Permission denied */
#define EFAULT 14 /* Bad address */
#define ENOTEMPTY 15 /* Directory not empty */
#define EBUSY 16 /* Mount device busy */
#define EEXIST 17 /* File exists */
#define EXDEV 18 /* Cross-device link */
#define ENODEV 19 /* No such device */
#define ENOTDIR 20 /* Not a directory */
#define EISDIR 21 /* Is a directory */
#define EINVAL 22 /* Invalid argument */
#define ENFILE 23 /* File table overflow */
#define EMFILE 24 /* Too many open files */
#define ENOTTY 25 /* Not a typewriter */
#define ENAMETOOLONG 26 /* File name too long */
#define EFBIG 27 /* File too large */
#define ENOSPC 28 /* No space left on device */
#define ESPIPE 29 /* Illegal seek */
#define EROFS 30 /* Read-only file system */
#define EMLINK 31 /* Too many links */
#define EPIPE 32 /* Broken pipe */
#define EDEADLK 33 /* Resource deadlock avoided */
#define ENOLCK 34 /* No locks available */
#define ENOTSUP 35 /* Unsupported value */
#define EMSGSIZE 36 /* Message size */
/* ANSI math software */
#define EDOM 37 /* Argument too large */
#define ERANGE 38 /* Result too large */
/* ipc/network software */
/* argument errors */
#define EDESTADDRREQ 40 /* Destination address required */
#define EPROTOTYPE 41 /* Protocol wrong type for socket */
#define ENOPROTOOPT 42 /* Protocol not available */
#define EPROTONOSUPPORT 43 /* Protocol not supported */
#define ESOCKTNOSUPPORT 44 /* Socket type not supported */
#define EOPNOTSUPP 45 /* Operation not supported on socket */
#define EPFNOSUPPORT 46 /* Protocol family not supported */
#define EAFNOSUPPORT 47 /* Addr family not supported */
#define EADDRINUSE 48 /* Address already in use */
#define EADDRNOTAVAIL 49 /* Can't assign requested address */
#define ENOTSOCK 50 /* Socket operation on non-socket */
/* operational errors */
#define ENETUNREACH 51 /* Network is unreachable */
#define ENETRESET 52 /* Network dropped connection on reset */
#define ECONNABORTED 53 /* Software caused connection abort */
#define ECONNRESET 54 /* Connection reset by peer */
#define ENOBUFS 55 /* No buffer space available */
#define EISCONN 56 /* Socket is already connected */
#define ENOTCONN 57 /* Socket is not connected */
#define ESHUTDOWN 58 /* Can't send after socket shutdown */
#define ETOOMANYREFS 59 /* Too many references: can't splice */
#define ETIMEDOUT 60 /* Connection timed out */
#define ECONNREFUSED 61 /* Connection refused */
#define ENETDOWN 62 /* Network is down */
#define ETXTBSY 63 /* Text file busy */
#define ELOOP 64 /* Too many levels of symbolic links */
#define EHOSTUNREACH 65 /* No route to host */
#define ENOTBLK 66 /* Block device required */
#define EHOSTDOWN 67 /* Host is down */
/* non-blocking and interrupt i/o */
#define EINPROGRESS 68 /* Operation now in progress */
#define EALREADY 69 /* Operation already in progress */
#define EWOULDBLOCK EAGAIN /* Operation would block */
#define ENOSYS 71 /* Function not implemented */
/* aio errors (should be under posix) */
#define ECANCELED 72 /* Operation canceled */
#define ERRMAX 81
/* specific STREAMS errno values */
#define ENOSR 74 /* Insufficient memory */
#define ENOSTR 75 /* STREAMS device required */
#define EPROTO 76 /* Generic STREAMS error */
#define EBADMSG 77 /* Invalid STREAMS message */
#define ENODATA 78 /* Missing expected message data */
#define ETIME 79 /* STREAMS timeout occurred */
#define ENOMSG 80 /* Unexpected message type */
#ifdef __cplusplus
}
#endif
#endif /* __INCerrnoh */

View file

@ -0,0 +1,253 @@
/*
* Copyright (c) 2015 Intel corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Public interface for configuring interrupts
*/
#ifndef _IRQ_H_
#define _IRQ_H_
/* Pull in the arch-specific implementations */
#include <arch/cpu.h>
#include <arch_isr.h>
#ifndef _ASMLANGUAGE
#include <toolchain.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup isr_apis Interrupt Service Routine APIs
* @ingroup kernel_apis
* @{
*/
/**
* @brief Initialize an interrupt handler.
*
* This routine initializes an interrupt handler for an IRQ. The IRQ must be
* subsequently enabled before the interrupt handler begins servicing
* interrupts.
*
* @warning
* Although this routine is invoked at run-time, all of its arguments must be
* computable by the compiler at build time.
*
* @param irq_p IRQ line number.
* @param priority_p Interrupt priority.
* @param isr_p Address of interrupt service routine.
* @param isr_param_p Parameter passed to interrupt service routine.
* @param flags_p Architecture-specific IRQ configuration flags..
*
* @return Interrupt vector assigned to this interrupt.
*/
#define IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p)
/**
* @brief Initialize a 'direct' interrupt handler.
*
* This routine initializes an interrupt handler for an IRQ. The IRQ must be
* subsequently enabled via irq_enable() before the interrupt handler begins
* servicing interrupts.
*
* These ISRs are designed for performance-critical interrupt handling and do
* not go through common interrupt handling code. They must be implemented in
* such a way that it is safe to put them directly in the vector table. For
* ISRs written in C, The ISR_DIRECT_DECLARE() macro will do this
* automatically. For ISRs written in assembly it is entirely up to the
* developer to ensure that the right steps are taken.
*
* This type of interrupt currently has a few limitations compared to normal
* Zephyr interrupts:
* - No parameters are passed to the ISR.
* - No stack switch is done, the ISR will run on the interrupted context's
* stack, unless the architecture automatically does the stack switch in HW.
* - Interrupt locking state is unchanged from how the HW sets it when the ISR
* runs. On arches that enter ISRs with interrupts locked, they will remain
* locked.
* - Scheduling decisions are now optional, controlled by the return value of
* ISRs implemented with the ISR_DIRECT_DECLARE() macro
* - The call into the OS to exit power management idle state is now optional.
* Normal interrupts always do this before the ISR is run, but when it runs
* is now controlled by the placement of a ISR_DIRECT_PM() macro, or omitted
* entirely.
*
* @warning
* Although this routine is invoked at run-time, all of its arguments must be
* computable by the compiler at build time.
*
* @param irq_p IRQ line number.
* @param priority_p Interrupt priority.
* @param isr_p Address of interrupt service routine.
* @param flags_p Architecture-specific IRQ configuration flags.
*
* @return Interrupt vector assigned to this interrupt.
*/
#define IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p)
/**
* @brief Common tasks before executing the body of an ISR
*
* This macro must be at the beginning of all direct interrupts and performs
* minimal architecture-specific tasks before the ISR itself can run. It takes
* no arguments and has no return value.
*/
#define ISR_DIRECT_HEADER() _ARCH_ISR_DIRECT_HEADER()
/**
* @brief Common tasks before exiting the body of an ISR
*
* This macro must be at the end of all direct interrupts and performs
* minimal architecture-specific tasks like EOI. It has no return value.
*
* In a normal interrupt, a check is done at end of interrupt to invoke
* _Swap() logic if the current thread is preemptible and there is another
* thread ready to run in the kernel's ready queue cache. This is now optional
* and controlled by the check_reschedule argument. If unsure, set to nonzero.
* On systems that do stack switching and nested interrupt tracking in software,
* _Swap() should only be called if this was a non-nested interrupt.
*
* @param check_reschedule If nonzero, additionally invoke scheduling logic
*/
#define ISR_DIRECT_FOOTER(check_reschedule) \
_ARCH_ISR_DIRECT_FOOTER(check_reschedule)
/**
* @brief Perform power management idle exit logic
*
* This macro may optionally be invoked somewhere in between IRQ_DIRECT_HEADER()
* and IRQ_DIRECT_FOOTER() invocations. It performs tasks necessary to
* exit power management idle state. It takes no parameters and returns no
* arguments. It may be omitted, but be careful!
*/
#define ISR_DIRECT_PM() _ARCH_ISR_DIRECT_PM()
/**
* @brief Helper macro to declare a direct interrupt service routine.
*
* This will declare the function in a proper way and automatically include
* the ISR_DIRECT_FOOTER() and ISR_DIRECT_HEADER() macros. The function should
* return nonzero status if a scheduling decision should potentially be made.
* See ISR_DIRECT_FOOTER() for more details on the scheduling decision.
*
* For architectures that support 'regular' and 'fast' interrupt types, where
* these interrupt types require different assembly language handling of
* registers by the ISR, this will always generate code for the 'fast'
* interrupt type.
*
* Example usage:
*
* ISR_DIRECT_DECLARE(my_isr)
* {
* bool done = do_stuff();
* ISR_DIRECT_PM(); <-- done after do_stuff() due to latency concerns
* if (!done) {
* return 0; <-- Don't bother checking if we have to _Swap()
* }
* k_sem_give(some_sem);
* return 1;
* }
*
* @param name symbol name of the ISR
*/
#define ISR_DIRECT_DECLARE(name) _ARCH_ISR_DIRECT_DECLARE(name)
/**
* @brief Lock interrupts.
*
* This routine disables all interrupts on the CPU. It returns an unsigned
* integer "lock-out key", which is an architecture-dependent indicator of
* whether interrupts were locked prior to the call. The lock-out key must be
* passed to irq_unlock() to re-enable interrupts.
*
* This routine can be called recursively, as long as the caller keeps track
* of each lock-out key that is generated. Interrupts are re-enabled by
* passing each of the keys to irq_unlock() in the reverse order they were
* acquired. (That is, each call to irq_lock() must be balanced by
* a corresponding call to irq_unlock().)
*
* @note
* This routine can be called by ISRs or by threads. If it is called by a
* thread, the interrupt lock is thread-specific; this means that interrupts
* remain disabled only while the thread is running. If the thread performs an
* operation that allows another thread to run (for example, giving a semaphore
* or sleeping for N milliseconds), the interrupt lock no longer applies and
* interrupts may be re-enabled while other processing occurs. When the thread
* once again becomes the current thread, the kernel re-establishes its
* interrupt lock; this ensures the thread won't be interrupted until it has
* explicitly released the interrupt lock it established.
*
* @warning
* The lock-out key should never be used to manually re-enable interrupts
* or to inspect or manipulate the contents of the CPU's interrupt bits.
*
* @return Lock-out key.
*/
/*#define irq_lock() _arch_irq_lock()*/
/**
* @brief Unlock interrupts.
*
* This routine reverses the effect of a previous call to irq_lock() using
* the associated lock-out key. The caller must call the routine once for
* each time it called irq_lock(), supplying the keys in the reverse order
* they were acquired, before interrupts are enabled.
*
* @note Can be called by ISRs.
*
* @param key Lock-out key generated by irq_lock().
*
* @return N/A
*/
/*#define irq_unlock(key) _arch_irq_unlock(key)*/
/**
* @brief Enable an IRQ.
*
* This routine enables interrupts from source @a irq.
*
* @param irq IRQ line.
*
* @return N/A
*/
#define irq_enable(irq) _arch_irq_enable(irq)
/**
* @brief Disable an IRQ.
*
* This routine disables interrupts from source @a irq.
*
* @param irq IRQ line.
*
* @return N/A
*/
#define irq_disable(irq) _arch_irq_disable(irq)
/**
* @brief Get IRQ enable state.
*
* This routine indicates if interrupts from source @a irq are enabled.
*
* @param irq IRQ line.
*
* @return interrupt enable state, true or false
*/
#define irq_is_enabled(irq) _arch_irq_is_enabled(irq)
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ASMLANGUAGE */
#endif /* _IRQ_H_ */

View file

@ -0,0 +1,27 @@
/* Macros for tagging symbols and putting them in the correct sections. */
/*
* Copyright (c) 2013-2014, Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _section_tags__h_
#define _section_tags__h_
#include <toolchain.h>
#if !defined(_ASMLANGUAGE)
#define __noinit __in_section_unique(NOINIT)
#define __irq_vector_table _GENERIC_SECTION(IRQ_VECTOR_TABLE)
#define __sw_isr_table _GENERIC_SECTION(SW_ISR_TABLE)
#if defined(CONFIG_ARM)
#define __kinetis_flash_config_section __in_section_unique(KINETIS_FLASH_CONFIG)
#define __ti_ccfg_section _GENERIC_SECTION(TI_CCFG)
#endif /* CONFIG_ARM */
#endif /* !_ASMLANGUAGE */
#endif /* _section_tags__h_ */

View file

@ -0,0 +1,58 @@
/*
* Copyright (c) 2013-2014, Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Definitions of various linker Sections.
*
* Linker Section declarations used by linker script, C files and Assembly
* files.
*/
#ifndef _SECTIONS_H
#define _SECTIONS_H
#define _TEXT_SECTION_NAME text
#define _RODATA_SECTION_NAME rodata
#define _CTOR_SECTION_NAME ctors
/* Linker issue with XIP where the name "data" cannot be used */
#define _DATA_SECTION_NAME datas
#define _BSS_SECTION_NAME bss
#define _NOINIT_SECTION_NAME noinit
#define _APP_DATA_SECTION_NAME app_datas
#define _APP_BSS_SECTION_NAME app_bss
#define _APP_NOINIT_SECTION_NAME app_noinit
#define _UNDEFINED_SECTION_NAME undefined
/* Various text section names */
#define TEXT text
#if defined(CONFIG_X86)
#define TEXT_START text_start /* beginning of TEXT section */
#else
#define TEXT_START text /* beginning of TEXT section */
#endif
/* Various data type section names */
#define BSS bss
#define RODATA rodata
#define DATA data
#define NOINIT noinit
/* Interrupts */
#define IRQ_VECTOR_TABLE .gnu.linkonce.irq_vector_table
#define SW_ISR_TABLE .gnu.linkonce.sw_isr_table
/* Architecture-specific sections */
#if defined(CONFIG_ARM)
#define KINETIS_FLASH_CONFIG kinetis_flash_config
#define TI_CCFG .ti_ccfg
#endif
#include <linker/section_tags.h>
#endif /* _SECTIONS_H */

View file

@ -0,0 +1,114 @@
/*
* Copyright (c) 2011-2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Debug aid
*
*
* The __ASSERT() macro can be used inside kernel code.
*
* Assertions are enabled by setting the __ASSERT_ON symbol to a non-zero value.
* There are two ways to do this:
* a) Use the ASSERT and ASSERT_LEVEL kconfig options
* b) Add "CFLAGS += -D__ASSERT_ON=<level>" at the end of a project's Makefile
* The Makefile method takes precedence over the kconfig option if both are
* used.
*
* Specifying an assertion level of 1 causes the compiler to issue warnings that
* the kernel contains debug-type __ASSERT() statements; this reminder is issued
* since assertion code is not normally present in a final product. Specifying
* assertion level 2 suppresses these warnings.
*
* The __ASSERT_EVAL() macro can also be used inside kernel code.
*
* It makes use of the __ASSERT() macro, but has some extra flexibility. It
* allows the developer to specify different actions depending whether the
* __ASSERT() macro is enabled or not. This can be particularly useful to
* prevent the compiler from generating comments (errors, warnings or remarks)
* about variables that are only used with __ASSERT() being assigned a value,
* but otherwise unused when the __ASSERT() macro is disabled.
*
* Consider the following example:
*
* int x;
*
* x = foo ();
* __ASSERT (x != 0, "foo() returned zero!");
*
* If __ASSERT() is disabled, then 'x' is assigned a value, but never used.
* This type of situation can be resolved using the __ASSERT_EVAL() macro.
*
* __ASSERT_EVAL ((void) foo(),
* int x = foo(),
* x != 0,
* "foo() returned zero!");
*
* The first parameter tells __ASSERT_EVAL() what to do if __ASSERT() is
* disabled. The second parameter tells __ASSERT_EVAL() what to do if
* __ASSERT() is enabled. The third and fourth parameters are the parameters
* it passes to __ASSERT().
*
* The __ASSERT_NO_MSG() macro can be used to perform an assertion that reports
* the failed test and its location, but lacks additional debugging information
* provided to assist the user in diagnosing the problem; its use is
* discouraged.
*/
#ifndef ___ASSERT__H_
#define ___ASSERT__H_
#ifdef CONFIG_ASSERT
#ifndef __ASSERT_ON
#define __ASSERT_ON CONFIG_ASSERT_LEVEL
#endif
#endif
#ifdef __ASSERT_ON
#if (__ASSERT_ON < 0) || (__ASSERT_ON > 2)
#error "Invalid __ASSERT() level: must be between 0 and 2"
#endif
#if __ASSERT_ON
#include <misc/printk.h>
#define __ASSERT(test, fmt, ...) \
do { \
if (!(test)) { \
printk("ASSERTION FAIL [%s] @ %s:%d:\n\t", \
_STRINGIFY(test), \
__FILE__, \
__LINE__); \
printk(fmt, ##__VA_ARGS__); \
for (;;) \
; /* spin thread */ \
} \
} while ((0))
#define __ASSERT_EVAL(expr1, expr2, test, fmt, ...) \
do { \
expr2; \
__ASSERT(test, fmt, ##__VA_ARGS__); \
} while (0)
#if (__ASSERT_ON == 1)
#warning "__ASSERT() statements are ENABLED"
#endif
#else
#define __ASSERT(test, fmt, ...) \
do {/* nothing */ \
} while ((0))
#define __ASSERT_EVAL(expr1, expr2, test, fmt, ...) expr1
#endif
#else
#define __ASSERT(test, fmt, ...) \
do {/* nothing */ \
} while ((0))
#define __ASSERT_EVAL(expr1, expr2, test, fmt, ...) expr1
#endif
#define __ASSERT_NO_MSG(test) __ASSERT(test, "")
#endif /* ___ASSERT__H_ */

View file

@ -0,0 +1,325 @@
/** @file
* @brief Byte order helpers.
*/
/*
* Copyright (c) 2015-2016, Intel Corporation.
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef __BYTEORDER_H__
#define __BYTEORDER_H__
#include <zephyr/types.h>
#include <stddef.h>
#include <misc/__assert.h>
/* Internal helpers only used by the sys_* APIs further below */
#define __bswap_16(x) ((u16_t) ((((x) >> 8) & 0xff) | (((x) & 0xff) << 8)))
#define __bswap_32(x) ((u32_t) ((((x) >> 24) & 0xff) | \
(((x) >> 8) & 0xff00) | \
(((x) & 0xff00) << 8) | \
(((x) & 0xff) << 24)))
#define __bswap_64(x) ((u64_t) ((((x) >> 56) & 0xff) | \
(((x) >> 40) & 0xff00) | \
(((x) >> 24) & 0xff0000) | \
(((x) >> 8) & 0xff000000) | \
(((x) & 0xff000000) << 8) | \
(((x) & 0xff0000) << 24) | \
(((x) & 0xff00) << 40) | \
(((x) & 0xff) << 56)))
/** @def sys_le16_to_cpu
* @brief Convert 16-bit integer from little-endian to host endianness.
*
* @param val 16-bit integer in little-endian format.
*
* @return 16-bit integer in host endianness.
*/
/** @def sys_cpu_to_le16
* @brief Convert 16-bit integer from host endianness to little-endian.
*
* @param val 16-bit integer in host endianness.
*
* @return 16-bit integer in little-endian format.
*/
/** @def sys_be16_to_cpu
* @brief Convert 16-bit integer from big-endian to host endianness.
*
* @param val 16-bit integer in big-endian format.
*
* @return 16-bit integer in host endianness.
*/
/** @def sys_cpu_to_be16
* @brief Convert 16-bit integer from host endianness to big-endian.
*
* @param val 16-bit integer in host endianness.
*
* @return 16-bit integer in big-endian format.
*/
/** @def sys_le32_to_cpu
* @brief Convert 32-bit integer from little-endian to host endianness.
*
* @param val 32-bit integer in little-endian format.
*
* @return 32-bit integer in host endianness.
*/
/** @def sys_cpu_to_le32
* @brief Convert 32-bit integer from host endianness to little-endian.
*
* @param val 32-bit integer in host endianness.
*
* @return 32-bit integer in little-endian format.
*/
/** @def sys_be32_to_cpu
* @brief Convert 32-bit integer from big-endian to host endianness.
*
* @param val 32-bit integer in big-endian format.
*
* @return 32-bit integer in host endianness.
*/
/** @def sys_cpu_to_be32
* @brief Convert 32-bit integer from host endianness to big-endian.
*
* @param val 32-bit integer in host endianness.
*
* @return 32-bit integer in big-endian format.
*/
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define sys_le16_to_cpu(val) (val)
#define sys_cpu_to_le16(val) (val)
#define sys_be16_to_cpu(val) __bswap_16(val)
#define sys_cpu_to_be16(val) __bswap_16(val)
#define sys_le32_to_cpu(val) (val)
#define sys_cpu_to_le32(val) (val)
#define sys_le64_to_cpu(val) (val)
#define sys_cpu_to_le64(val) (val)
#define sys_be32_to_cpu(val) __bswap_32(val)
#define sys_cpu_to_be32(val) __bswap_32(val)
#define sys_be64_to_cpu(val) __bswap_64(val)
#define sys_cpu_to_be64(val) __bswap_64(val)
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#define sys_le16_to_cpu(val) __bswap_16(val)
#define sys_cpu_to_le16(val) __bswap_16(val)
#define sys_be16_to_cpu(val) (val)
#define sys_cpu_to_be16(val) (val)
#define sys_le32_to_cpu(val) __bswap_32(val)
#define sys_cpu_to_le32(val) __bswap_32(val)
#define sys_le64_to_cpu(val) __bswap_64(val)
#define sys_cpu_to_le64(val) __bswap_64(val)
#define sys_be32_to_cpu(val) (val)
#define sys_cpu_to_be32(val) (val)
#define sys_be64_to_cpu(val) (val)
#define sys_cpu_to_be64(val) (val)
#else
#error "Unknown byte order"
#endif
/**
* @brief Put a 16-bit integer as big-endian to arbitrary location.
*
* Put a 16-bit integer, originally in host endianness, to a
* potentially unaligned memory location in big-endian format.
*
* @param val 16-bit integer in host endianness.
* @param dst Destination memory address to store the result.
*/
static inline void sys_put_be16(u16_t val, u8_t dst[2])
{
dst[0] = val >> 8;
dst[1] = val;
}
/**
* @brief Put a 32-bit integer as big-endian to arbitrary location.
*
* Put a 32-bit integer, originally in host endianness, to a
* potentially unaligned memory location in big-endian format.
*
* @param val 32-bit integer in host endianness.
* @param dst Destination memory address to store the result.
*/
static inline void sys_put_be32(u32_t val, u8_t dst[4])
{
sys_put_be16(val >> 16, dst);
sys_put_be16(val, &dst[2]);
}
/**
* @brief Put a 16-bit integer as little-endian to arbitrary location.
*
* Put a 16-bit integer, originally in host endianness, to a
* potentially unaligned memory location in little-endian format.
*
* @param val 16-bit integer in host endianness.
* @param dst Destination memory address to store the result.
*/
static inline void sys_put_le16(u16_t val, u8_t dst[2])
{
dst[0] = val;
dst[1] = val >> 8;
}
/**
* @brief Put a 32-bit integer as little-endian to arbitrary location.
*
* Put a 32-bit integer, originally in host endianness, to a
* potentially unaligned memory location in little-endian format.
*
* @param val 32-bit integer in host endianness.
* @param dst Destination memory address to store the result.
*/
static inline void sys_put_le32(u32_t val, u8_t dst[4])
{
sys_put_le16(val, dst);
sys_put_le16(val >> 16, &dst[2]);
}
/**
* @brief Put a 64-bit integer as little-endian to arbitrary location.
*
* Put a 64-bit integer, originally in host endianness, to a
* potentially unaligned memory location in little-endian format.
*
* @param val 64-bit integer in host endianness.
* @param dst Destination memory address to store the result.
*/
static inline void sys_put_le64(u64_t val, u8_t dst[8])
{
sys_put_le32(val, dst);
sys_put_le32(val >> 32, &dst[4]);
}
/**
* @brief Get a 16-bit integer stored in big-endian format.
*
* Get a 16-bit integer, stored in big-endian format in a potentially
* unaligned memory location, and convert it to the host endianness.
*
* @param src Location of the big-endian 16-bit integer to get.
*
* @return 16-bit integer in host endianness.
*/
static inline u16_t sys_get_be16(const u8_t src[2])
{
return ((u16_t)src[0] << 8) | src[1];
}
/**
* @brief Get a 32-bit integer stored in big-endian format.
*
* Get a 32-bit integer, stored in big-endian format in a potentially
* unaligned memory location, and convert it to the host endianness.
*
* @param src Location of the big-endian 32-bit integer to get.
*
* @return 32-bit integer in host endianness.
*/
static inline u32_t sys_get_be32(const u8_t src[4])
{
return ((u32_t)sys_get_be16(&src[0]) << 16) | sys_get_be16(&src[2]);
}
/**
* @brief Get a 16-bit integer stored in little-endian format.
*
* Get a 16-bit integer, stored in little-endian format in a potentially
* unaligned memory location, and convert it to the host endianness.
*
* @param src Location of the little-endian 16-bit integer to get.
*
* @return 16-bit integer in host endianness.
*/
static inline u16_t sys_get_le16(const u8_t src[2])
{
return ((u16_t)src[1] << 8) | src[0];
}
/**
* @brief Get a 32-bit integer stored in little-endian format.
*
* Get a 32-bit integer, stored in little-endian format in a potentially
* unaligned memory location, and convert it to the host endianness.
*
* @param src Location of the little-endian 32-bit integer to get.
*
* @return 32-bit integer in host endianness.
*/
static inline u32_t sys_get_le32(const u8_t src[4])
{
return ((u32_t)sys_get_le16(&src[2]) << 16) | sys_get_le16(&src[0]);
}
/**
* @brief Get a 64-bit integer stored in little-endian format.
*
* Get a 64-bit integer, stored in little-endian format in a potentially
* unaligned memory location, and convert it to the host endianness.
*
* @param src Location of the little-endian 64-bit integer to get.
*
* @return 64-bit integer in host endianness.
*/
static inline u64_t sys_get_le64(const u8_t src[8])
{
return ((u64_t)sys_get_le32(&src[4]) << 32) | sys_get_le32(&src[0]);
}
/**
* @brief Swap one buffer content into another
*
* Copy the content of src buffer into dst buffer in reversed order,
* i.e.: src[n] will be put in dst[end-n]
* Where n is an index and 'end' the last index in both arrays.
* The 2 memory pointers must be pointing to different areas, and have
* a minimum size of given length.
*
* @param dst A valid pointer on a memory area where to copy the data in
* @param src A valid pointer on a memory area where to copy the data from
* @param length Size of both dst and src memory areas
*/
static inline void sys_memcpy_swap(void *dst, const void *src, size_t length)
{
__ASSERT(((src < dst && (src + length) <= dst) ||
(src > dst && (dst + length) <= src)),
"Source and destination buffers must not overlap");
src += length - 1;
for (; length > 0; length--) {
*((u8_t *)dst++) = *((u8_t *)src--);
}
}
/**
* @brief Swap buffer content
*
* In-place memory swap, where final content will be reversed.
* I.e.: buf[n] will be put in buf[end-n]
* Where n is an index and 'end' the last index of buf.
*
* @param buf A valid pointer on a memory area to swap
* @param length Size of buf memory area
*/
static inline void sys_mem_swap(void *buf, size_t length)
{
size_t i;
for (i = 0; i < (length/2); i++) {
u8_t tmp = ((u8_t *)buf)[i];
((u8_t *)buf)[i] = ((u8_t *)buf)[length - 1 - i];
((u8_t *)buf)[length - 1 - i] = tmp;
}
}
#endif /* __BYTEORDER_H__ */

View file

@ -0,0 +1,494 @@
/*
* Copyright (c) 2013-2015 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Doubly-linked list implementation
*
* Doubly-linked list implementation using inline macros/functions.
* This API is not thread safe, and thus if a list is used across threads,
* calls to functions must be protected with synchronization primitives.
*
* The lists are expected to be initialized such that both the head and tail
* pointers point to the list itself. Initializing the lists in such a fashion
* simplifies the adding and removing of nodes to/from the list.
*/
#ifndef _misc_dlist__h_
#define _misc_dlist__h_
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
struct _dnode {
union {
struct _dnode *head; /* ptr to head of list (sys_dlist_t) */
struct _dnode *next; /* ptr to next node (sys_dnode_t) */
};
union {
struct _dnode *tail; /* ptr to tail of list (sys_dlist_t) */
struct _dnode *prev; /* ptr to previous node (sys_dnode_t) */
};
};
typedef struct _dnode sys_dlist_t;
typedef struct _dnode sys_dnode_t;
/**
* @brief Provide the primitive to iterate on a list
* Note: the loop is unsafe and thus __dn should not be removed
*
* User _MUST_ add the loop statement curly braces enclosing its own code:
*
* SYS_DLIST_FOR_EACH_NODE(l, n) {
* <user code>
* }
*
* This and other SYS_DLIST_*() macros are not thread safe.
*
* @param __dl A pointer on a sys_dlist_t to iterate on
* @param __dn A sys_dnode_t pointer to peek each node of the list
*/
#define SYS_DLIST_FOR_EACH_NODE(__dl, __dn) \
for (__dn = sys_dlist_peek_head(__dl); __dn; \
__dn = sys_dlist_peek_next(__dl, __dn))
/**
* @brief Provide the primitive to iterate on a list, from a node in the list
* Note: the loop is unsafe and thus __dn should not be removed
*
* User _MUST_ add the loop statement curly braces enclosing its own code:
*
* SYS_DLIST_ITERATE_FROM_NODE(l, n) {
* <user code>
* }
*
* Like SYS_DLIST_FOR_EACH_NODE(), but __dn already contains a node in the list
* where to start searching for the next entry from. If NULL, it starts from
* the head.
*
* This and other SYS_DLIST_*() macros are not thread safe.
*
* @param __dl A pointer on a sys_dlist_t to iterate on
* @param __dn A sys_dnode_t pointer to peek each node of the list;
* it contains the starting node, or NULL to start from the head
*/
#define SYS_DLIST_ITERATE_FROM_NODE(__dl, __dn) \
for (__dn = __dn ? sys_dlist_peek_next_no_check(__dl, __dn) \
: sys_dlist_peek_head(__dl); \
__dn; \
__dn = sys_dlist_peek_next(__dl, __dn))
/**
* @brief Provide the primitive to safely iterate on a list
* Note: __dn can be removed, it will not break the loop.
*
* User _MUST_ add the loop statement curly braces enclosing its own code:
*
* SYS_DLIST_FOR_EACH_NODE_SAFE(l, n, s) {
* <user code>
* }
*
* This and other SYS_DLIST_*() macros are not thread safe.
*
* @param __dl A pointer on a sys_dlist_t to iterate on
* @param __dn A sys_dnode_t pointer to peek each node of the list
* @param __dns A sys_dnode_t pointer for the loop to run safely
*/
#define SYS_DLIST_FOR_EACH_NODE_SAFE(__dl, __dn, __dns) \
for (__dn = sys_dlist_peek_head(__dl), \
__dns = sys_dlist_peek_next(__dl, __dn); \
__dn; __dn = __dns, \
__dns = sys_dlist_peek_next(__dl, __dn))
/*
* @brief Provide the primitive to resolve the container of a list node
* Note: it is safe to use with NULL pointer nodes
*
* @param __dn A pointer on a sys_dnode_t to get its container
* @param __cn Container struct type pointer
* @param __n The field name of sys_dnode_t within the container struct
*/
#define SYS_DLIST_CONTAINER(__dn, __cn, __n) \
(__dn ? CONTAINER_OF(__dn, __typeof__(*__cn), __n) : NULL)
/*
* @brief Provide the primitive to peek container of the list head
*
* @param __dl A pointer on a sys_dlist_t to peek
* @param __cn Container struct type pointer
* @param __n The field name of sys_dnode_t within the container struct
*/
#define SYS_DLIST_PEEK_HEAD_CONTAINER(__dl, __cn, __n) \
SYS_DLIST_CONTAINER(sys_dlist_peek_head(__dl), __cn, __n)
/*
* @brief Provide the primitive to peek the next container
*
* @param __dl A pointer on a sys_dlist_t to peek
* @param __cn Container struct type pointer
* @param __n The field name of sys_dnode_t within the container struct
*/
#define SYS_DLIST_PEEK_NEXT_CONTAINER(__dl, __cn, __n) \
((__cn) ? SYS_DLIST_CONTAINER(sys_dlist_peek_next(__dl, &(__cn->__n)), \
__cn, __n) : NULL)
/**
* @brief Provide the primitive to iterate on a list under a container
* Note: the loop is unsafe and thus __cn should not be detached
*
* User _MUST_ add the loop statement curly braces enclosing its own code:
*
* SYS_DLIST_FOR_EACH_CONTAINER(l, c, n) {
* <user code>
* }
*
* @param __dl A pointer on a sys_dlist_t to iterate on
* @param __cn A pointer to peek each entry of the list
* @param __n The field name of sys_dnode_t within the container struct
*/
#define SYS_DLIST_FOR_EACH_CONTAINER(__dl, __cn, __n) \
for (__cn = SYS_DLIST_PEEK_HEAD_CONTAINER(__dl, __cn, __n); __cn; \
__cn = SYS_DLIST_PEEK_NEXT_CONTAINER(__dl, __cn, __n))
/**
* @brief Provide the primitive to safely iterate on a list under a container
* Note: __cn can be detached, it will not break the loop.
*
* User _MUST_ add the loop statement curly braces enclosing its own code:
*
* SYS_DLIST_FOR_EACH_CONTAINER_SAFE(l, c, cn, n) {
* <user code>
* }
*
* @param __dl A pointer on a sys_dlist_t to iterate on
* @param __cn A pointer to peek each entry of the list
* @param __cns A pointer for the loop to run safely
* @param __n The field name of sys_dnode_t within the container struct
*/
#define SYS_DLIST_FOR_EACH_CONTAINER_SAFE(__dl, __cn, __cns, __n) \
for (__cn = SYS_DLIST_PEEK_HEAD_CONTAINER(__dl, __cn, __n), \
__cns = SYS_DLIST_PEEK_NEXT_CONTAINER(__dl, __cn, __n); __cn; \
__cn = __cns, \
__cns = SYS_DLIST_PEEK_NEXT_CONTAINER(__dl, __cn, __n))
/**
* @brief initialize list
*
* @param list the doubly-linked list
*
* @return N/A
*/
static inline void sys_dlist_init(sys_dlist_t *list)
{
list->head = (sys_dnode_t *)list;
list->tail = (sys_dnode_t *)list;
}
#define SYS_DLIST_STATIC_INIT(ptr_to_list) {{(ptr_to_list)}, {(ptr_to_list)}}
/**
* @brief check if a node is the list's head
*
* @param list the doubly-linked list to operate on
* @param node the node to check
*
* @return 1 if node is the head, 0 otherwise
*/
static inline int sys_dlist_is_head(sys_dlist_t *list, sys_dnode_t *node)
{
return list->head == node;
}
/**
* @brief check if a node is the list's tail
*
* @param list the doubly-linked list to operate on
* @param node the node to check
*
* @return 1 if node is the tail, 0 otherwise
*/
static inline int sys_dlist_is_tail(sys_dlist_t *list, sys_dnode_t *node)
{
return list->tail == node;
}
/**
* @brief check if the list is empty
*
* @param list the doubly-linked list to operate on
*
* @return 1 if empty, 0 otherwise
*/
static inline int sys_dlist_is_empty(sys_dlist_t *list)
{
return list->head == list;
}
/**
* @brief check if more than one node present
*
* This and other sys_dlist_*() functions are not thread safe.
*
* @param list the doubly-linked list to operate on
*
* @return 1 if multiple nodes, 0 otherwise
*/
static inline int sys_dlist_has_multiple_nodes(sys_dlist_t *list)
{
return list->head != list->tail;
}
/**
* @brief get a reference to the head item in the list
*
* @param list the doubly-linked list to operate on
*
* @return a pointer to the head element, NULL if list is empty
*/
static inline sys_dnode_t *sys_dlist_peek_head(sys_dlist_t *list)
{
return sys_dlist_is_empty(list) ? NULL : list->head;
}
/**
* @brief get a reference to the head item in the list
*
* The list must be known to be non-empty.
*
* @param list the doubly-linked list to operate on
*
* @return a pointer to the head element
*/
static inline sys_dnode_t *sys_dlist_peek_head_not_empty(sys_dlist_t *list)
{
return list->head;
}
/**
* @brief get a reference to the next item in the list, node is not NULL
*
* Faster than sys_dlist_peek_next() if node is known not to be NULL.
*
* @param list the doubly-linked list to operate on
* @param node the node from which to get the next element in the list
*
* @return a pointer to the next element from a node, NULL if node is the tail
*/
static inline sys_dnode_t *sys_dlist_peek_next_no_check(sys_dlist_t *list,
sys_dnode_t *node)
{
return (node == list->tail) ? NULL : node->next;
}
/**
* @brief get a reference to the next item in the list
*
* @param list the doubly-linked list to operate on
* @param node the node from which to get the next element in the list
*
* @return a pointer to the next element from a node, NULL if node is the tail
* or NULL (when node comes from reading the head of an empty list).
*/
static inline sys_dnode_t *sys_dlist_peek_next(sys_dlist_t *list,
sys_dnode_t *node)
{
return node ? sys_dlist_peek_next_no_check(list, node) : NULL;
}
/**
* @brief get a reference to the tail item in the list
*
* @param list the doubly-linked list to operate on
*
* @return a pointer to the tail element, NULL if list is empty
*/
static inline sys_dnode_t *sys_dlist_peek_tail(sys_dlist_t *list)
{
return sys_dlist_is_empty(list) ? NULL : list->tail;
}
/**
* @brief add node to tail of list
*
* This and other sys_dlist_*() functions are not thread safe.
*
* @param list the doubly-linked list to operate on
* @param node the element to append
*
* @return N/A
*/
static inline void sys_dlist_append(sys_dlist_t *list, sys_dnode_t *node)
{
node->next = list;
node->prev = list->tail;
list->tail->next = node;
list->tail = node;
}
/**
* @brief add node to head of list
*
* This and other sys_dlist_*() functions are not thread safe.
*
* @param list the doubly-linked list to operate on
* @param node the element to append
*
* @return N/A
*/
static inline void sys_dlist_prepend(sys_dlist_t *list, sys_dnode_t *node)
{
node->next = list->head;
node->prev = list;
list->head->prev = node;
list->head = node;
}
/**
* @brief insert node after a node
*
* Insert a node after a specified node in a list.
* This and other sys_dlist_*() functions are not thread safe.
*
* @param list the doubly-linked list to operate on
* @param insert_point the insert point in the list: if NULL, insert at head
* @param node the element to append
*
* @return N/A
*/
static inline void sys_dlist_insert_after(sys_dlist_t *list,
sys_dnode_t *insert_point, sys_dnode_t *node)
{
if (!insert_point) {
sys_dlist_prepend(list, node);
} else {
node->next = insert_point->next;
node->prev = insert_point;
insert_point->next->prev = node;
insert_point->next = node;
}
}
/**
* @brief insert node before a node
*
* Insert a node before a specified node in a list.
* This and other sys_dlist_*() functions are not thread safe.
*
* @param list the doubly-linked list to operate on
* @param insert_point the insert point in the list: if NULL, insert at tail
* @param node the element to insert
*
* @return N/A
*/
static inline void sys_dlist_insert_before(sys_dlist_t *list,
sys_dnode_t *insert_point, sys_dnode_t *node)
{
if (!insert_point) {
sys_dlist_append(list, node);
} else {
node->prev = insert_point->prev;
node->next = insert_point;
insert_point->prev->next = node;
insert_point->prev = node;
}
}
/**
* @brief insert node at position
*
* Insert a node in a location depending on a external condition. The cond()
* function checks if the node is to be inserted _before_ the current node
* against which it is checked.
* This and other sys_dlist_*() functions are not thread safe.
*
* @param list the doubly-linked list to operate on
* @param node the element to insert
* @param cond a function that determines if the current node is the correct
* insert point
* @param data parameter to cond()
*
* @return N/A
*/
static inline void sys_dlist_insert_at(sys_dlist_t *list, sys_dnode_t *node,
int (*cond)(sys_dnode_t *, void *), void *data)
{
if (sys_dlist_is_empty(list)) {
sys_dlist_append(list, node);
} else {
sys_dnode_t *pos = sys_dlist_peek_head(list);
while (pos && !cond(pos, data)) {
pos = sys_dlist_peek_next(list, pos);
}
sys_dlist_insert_before(list, pos, node);
}
}
/**
* @brief remove a specific node from a list
*
* The list is implicit from the node. The node must be part of a list.
* This and other sys_dlist_*() functions are not thread safe.
*
* @param node the node to remove
*
* @return N/A
*/
static inline void sys_dlist_remove(sys_dnode_t *node)
{
node->prev->next = node->next;
node->next->prev = node->prev;
}
/**
* @brief get the first node in a list
*
* This and other sys_dlist_*() functions are not thread safe.
*
* @param list the doubly-linked list to operate on
*
* @return the first node in the list, NULL if list is empty
*/
static inline sys_dnode_t *sys_dlist_get(sys_dlist_t *list)
{
sys_dnode_t *node;
if (sys_dlist_is_empty(list)) {
return NULL;
}
node = list->head;
sys_dlist_remove(node);
return node;
}
#ifdef __cplusplus
}
#endif
#endif /* _misc_dlist__h_ */

View file

@ -0,0 +1,29 @@
/* printk.h - low-level debug output */
/*
* Copyright (c) 2010-2012, 2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _PRINTK_H_
#define _PRINTK_H_
#include <stddef.h>
#include <stdarg.h>
#include <stdio.h>
#include <zephyr.h>
#ifdef __cplusplus
extern "C" {
#endif
#define snprintk snprintf
#define printk printf
#ifdef __cplusplus
}
#endif
#endif

View file

@ -0,0 +1,468 @@
/*
* Copyright (c) 2016 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
*
* @brief Single-linked list implementation
*
* Single-linked list implementation using inline macros/functions.
* This API is not thread safe, and thus if a list is used across threads,
* calls to functions must be protected with synchronization primitives.
*/
#ifndef __SLIST_H__
#define __SLIST_H__
#include <stddef.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
struct _snode {
struct _snode *next;
};
typedef struct _snode sys_snode_t;
struct _slist {
sys_snode_t *head;
sys_snode_t *tail;
};
typedef struct _slist sys_slist_t;
/**
* @brief Provide the primitive to iterate on a list
* Note: the loop is unsafe and thus __sn should not be removed
*
* User _MUST_ add the loop statement curly braces enclosing its own code:
*
* SYS_SLIST_FOR_EACH_NODE(l, n) {
* <user code>
* }
*
* This and other SYS_SLIST_*() macros are not thread safe.
*
* @param __sl A pointer on a sys_slist_t to iterate on
* @param __sn A sys_snode_t pointer to peek each node of the list
*/
#define SYS_SLIST_FOR_EACH_NODE(__sl, __sn) \
for (__sn = sys_slist_peek_head(__sl); __sn; \
__sn = sys_slist_peek_next(__sn))
/**
* @brief Provide the primitive to iterate on a list, from a node in the list
* Note: the loop is unsafe and thus __sn should not be removed
*
* User _MUST_ add the loop statement curly braces enclosing its own code:
*
* SYS_SLIST_ITERATE_FROM_NODE(l, n) {
* <user code>
* }
*
* Like SYS_SLIST_FOR_EACH_NODE(), but __dn already contains a node in the list
* where to start searching for the next entry from. If NULL, it starts from
* the head.
*
* This and other SYS_SLIST_*() macros are not thread safe.
*
* @param __sl A pointer on a sys_slist_t to iterate on
* @param __sn A sys_snode_t pointer to peek each node of the list
* it contains the starting node, or NULL to start from the head
*/
#define SYS_SLIST_ITERATE_FROM_NODE(__sl, __sn) \
for (__sn = __sn ? sys_slist_peek_next_no_check(__sn) \
: sys_slist_peek_head(__sl); \
__sn; \
__sn = sys_slist_peek_next(__sn))
/**
* @brief Provide the primitive to safely iterate on a list
* Note: __sn can be removed, it will not break the loop.
*
* User _MUST_ add the loop statement curly braces enclosing its own code:
*
* SYS_SLIST_FOR_EACH_NODE_SAFE(l, n, s) {
* <user code>
* }
*
* This and other SYS_SLIST_*() macros are not thread safe.
*
* @param __sl A pointer on a sys_slist_t to iterate on
* @param __sn A sys_snode_t pointer to peek each node of the list
* @param __sns A sys_snode_t pointer for the loop to run safely
*/
#define SYS_SLIST_FOR_EACH_NODE_SAFE(__sl, __sn, __sns) \
for (__sn = sys_slist_peek_head(__sl), \
__sns = sys_slist_peek_next(__sn); \
__sn; __sn = __sns, \
__sns = sys_slist_peek_next(__sn))
/*
* @brief Provide the primitive to resolve the container of a list node
* Note: it is safe to use with NULL pointer nodes
*
* @param __ln A pointer on a sys_node_t to get its container
* @param __cn Container struct type pointer
* @param __n The field name of sys_node_t within the container struct
*/
#define SYS_SLIST_CONTAINER(__ln, __cn, __n) \
((__ln) ? CONTAINER_OF((__ln), __typeof__(*(__cn)), __n) : NULL)
/*
* @brief Provide the primitive to peek container of the list head
*
* @param __sl A pointer on a sys_slist_t to peek
* @param __cn Container struct type pointer
* @param __n The field name of sys_node_t within the container struct
*/
#define SYS_SLIST_PEEK_HEAD_CONTAINER(__sl, __cn, __n) \
SYS_SLIST_CONTAINER(sys_slist_peek_head(__sl), __cn, __n)
/*
* @brief Provide the primitive to peek container of the list tail
*
* @param __sl A pointer on a sys_slist_t to peek
* @param __cn Container struct type pointer
* @param __n The field name of sys_node_t within the container struct
*/
#define SYS_SLIST_PEEK_TAIL_CONTAINER(__sl, __cn, __n) \
SYS_SLIST_CONTAINER(sys_slist_peek_tail(__sl), __cn, __n)
/*
* @brief Provide the primitive to peek the next container
*
* @param __cn Container struct type pointer
* @param __n The field name of sys_node_t within the container struct
*/
#define SYS_SLIST_PEEK_NEXT_CONTAINER(__cn, __n) \
((__cn) ? SYS_SLIST_CONTAINER(sys_slist_peek_next(&((__cn)->__n)), \
__cn, __n) : NULL)
/**
* @brief Provide the primitive to iterate on a list under a container
* Note: the loop is unsafe and thus __cn should not be detached
*
* User _MUST_ add the loop statement curly braces enclosing its own code:
*
* SYS_SLIST_FOR_EACH_CONTAINER(l, c, n) {
* <user code>
* }
*
* @param __sl A pointer on a sys_slist_t to iterate on
* @param __cn A pointer to peek each entry of the list
* @param __n The field name of sys_node_t within the container struct
*/
#define SYS_SLIST_FOR_EACH_CONTAINER(__sl, __cn, __n) \
for (__cn = SYS_SLIST_PEEK_HEAD_CONTAINER(__sl, __cn, __n); __cn; \
__cn = SYS_SLIST_PEEK_NEXT_CONTAINER(__cn, __n))
/**
* @brief Provide the primitive to safely iterate on a list under a container
* Note: __cn can be detached, it will not break the loop.
*
* User _MUST_ add the loop statement curly braces enclosing its own code:
*
* SYS_SLIST_FOR_EACH_NODE_SAFE(l, c, cn, n) {
* <user code>
* }
*
* @param __sl A pointer on a sys_slist_t to iterate on
* @param __cn A pointer to peek each entry of the list
* @param __cns A pointer for the loop to run safely
* @param __n The field name of sys_node_t within the container struct
*/
#define SYS_SLIST_FOR_EACH_CONTAINER_SAFE(__sl, __cn, __cns, __n) \
for (__cn = SYS_SLIST_PEEK_HEAD_CONTAINER(__sl, __cn, __n), \
__cns = SYS_SLIST_PEEK_NEXT_CONTAINER(__cn, __n); __cn; \
__cn = __cns, __cns = SYS_SLIST_PEEK_NEXT_CONTAINER(__cn, __n))
/**
* @brief Initialize a list
*
* @param list A pointer on the list to initialize
*/
static inline void sys_slist_init(sys_slist_t *list)
{
list->head = NULL;
list->tail = NULL;
}
#define SYS_SLIST_STATIC_INIT(ptr_to_list) {NULL, NULL}
/**
* @brief Test if the given list is empty
*
* @param list A pointer on the list to test
*
* @return a boolean, true if it's empty, false otherwise
*/
static inline bool sys_slist_is_empty(sys_slist_t *list)
{
return (!list->head);
}
/**
* @brief Peek the first node from the list
*
* @param list A point on the list to peek the first node from
*
* @return A pointer on the first node of the list (or NULL if none)
*/
static inline sys_snode_t *sys_slist_peek_head(sys_slist_t *list)
{
return list->head;
}
/**
* @brief Peek the last node from the list
*
* @param list A point on the list to peek the last node from
*
* @return A pointer on the last node of the list (or NULL if none)
*/
static inline sys_snode_t *sys_slist_peek_tail(sys_slist_t *list)
{
return list->tail;
}
/**
* @brief Peek the next node from current node, node is not NULL
*
* Faster then sys_slist_peek_next() if node is known not to be NULL.
*
* @param node A pointer on the node where to peek the next node
*
* @return a pointer on the next node (or NULL if none)
*/
static inline sys_snode_t *sys_slist_peek_next_no_check(sys_snode_t *node)
{
return node->next;
}
/**
* @brief Peek the next node from current node
*
* @param node A pointer on the node where to peek the next node
*
* @return a pointer on the next node (or NULL if none)
*/
static inline sys_snode_t *sys_slist_peek_next(sys_snode_t *node)
{
return node ? sys_slist_peek_next_no_check(node) : NULL;
}
/**
* @brief Prepend a node to the given list
*
* This and other sys_slist_*() functions are not thread safe.
*
* @param list A pointer on the list to affect
* @param node A pointer on the node to prepend
*/
static inline void sys_slist_prepend(sys_slist_t *list,
sys_snode_t *node)
{
node->next = list->head;
list->head = node;
if (!list->tail) {
list->tail = list->head;
}
}
/**
* @brief Append a node to the given list
*
* This and other sys_slist_*() functions are not thread safe.
*
* @param list A pointer on the list to affect
* @param node A pointer on the node to append
*/
static inline void sys_slist_append(sys_slist_t *list,
sys_snode_t *node)
{
node->next = NULL;
if (!list->tail) {
list->tail = node;
list->head = node;
} else {
list->tail->next = node;
list->tail = node;
}
}
/**
* @brief Append a list to the given list
*
* Append a singly-linked, NULL-terminated list consisting of nodes containing
* the pointer to the next node as the first element of a node, to @a list.
* This and other sys_slist_*() functions are not thread safe.
*
* @param list A pointer on the list to affect
* @param head A pointer to the first element of the list to append
* @param tail A pointer to the last element of the list to append
*/
static inline void sys_slist_append_list(sys_slist_t *list,
void *head, void *tail)
{
if (!list->tail) {
list->head = (sys_snode_t *)head;
list->tail = (sys_snode_t *)tail;
} else {
list->tail->next = (sys_snode_t *)head;
list->tail = (sys_snode_t *)tail;
}
}
/**
* @brief merge two slists, appending the second one to the first
*
* When the operation is completed, the appending list is empty.
* This and other sys_slist_*() functions are not thread safe.
*
* @param list A pointer on the list to affect
* @param list_to_append A pointer to the list to append.
*/
static inline void sys_slist_merge_slist(sys_slist_t *list,
sys_slist_t *list_to_append)
{
sys_slist_append_list(list, list_to_append->head,
list_to_append->tail);
sys_slist_init(list_to_append);
}
/**
* @brief Insert a node to the given list
*
* This and other sys_slist_*() functions are not thread safe.
*
* @param list A pointer on the list to affect
* @param prev A pointer on the previous node
* @param node A pointer on the node to insert
*/
static inline void sys_slist_insert(sys_slist_t *list,
sys_snode_t *prev,
sys_snode_t *node)
{
if (!prev) {
sys_slist_prepend(list, node);
} else if (!prev->next) {
sys_slist_append(list, node);
} else {
node->next = prev->next;
prev->next = node;
}
}
/**
* @brief Fetch and remove the first node of the given list
*
* List must be known to be non-empty.
* This and other sys_slist_*() functions are not thread safe.
*
* @param list A pointer on the list to affect
*
* @return A pointer to the first node of the list
*/
static inline sys_snode_t *sys_slist_get_not_empty(sys_slist_t *list)
{
sys_snode_t *node = list->head;
list->head = node->next;
if (list->tail == node) {
list->tail = list->head;
}
return node;
}
/**
* @brief Fetch and remove the first node of the given list
*
* This and other sys_slist_*() functions are not thread safe.
*
* @param list A pointer on the list to affect
*
* @return A pointer to the first node of the list (or NULL if empty)
*/
static inline sys_snode_t *sys_slist_get(sys_slist_t *list)
{
return sys_slist_is_empty(list) ? NULL : sys_slist_get_not_empty(list);
}
/**
* @brief Remove a node
*
* This and other sys_slist_*() functions are not thread safe.
*
* @param list A pointer on the list to affect
* @param prev_node A pointer on the previous node
* (can be NULL, which means the node is the list's head)
* @param node A pointer on the node to remove
*/
static inline void sys_slist_remove(sys_slist_t *list,
sys_snode_t *prev_node,
sys_snode_t *node)
{
if (!prev_node) {
list->head = node->next;
/* Was node also the tail? */
if (list->tail == node) {
list->tail = list->head;
}
} else {
prev_node->next = node->next;
/* Was node the tail? */
if (list->tail == node) {
list->tail = prev_node;
}
}
node->next = NULL;
}
/**
* @brief Find and remove a node from a list
*
* This and other sys_slist_*() functions are not thread safe.
*
* @param list A pointer on the list to affect
* @param node A pointer on the node to remove from the list
*
* @return true if node was removed
*/
static inline bool sys_slist_find_and_remove(sys_slist_t *list,
sys_snode_t *node)
{
sys_snode_t *prev = NULL;
sys_snode_t *test;
SYS_SLIST_FOR_EACH_NODE(list, test) {
if (test == node) {
sys_slist_remove(list, prev, node);
return true;
}
prev = test;
}
return false;
}
#ifdef __cplusplus
}
#endif
#endif /* __SLIST_H__ */

View file

@ -0,0 +1,87 @@
/**
* @file stack.h
* Stack usage analysis helpers
*/
/*
* Copyright (c) 2015 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _MISC_STACK_H_
#define _MISC_STACK_H_
#include <misc/printk.h>
#if defined(CONFIG_INIT_STACKS)
static inline size_t stack_unused_space_get(const char *stack, size_t size)
{
size_t unused = 0;
int i;
#ifdef CONFIG_STACK_SENTINEL
/* First 4 bytes of the stack buffer reserved for the sentinel
* value, it won't be 0xAAAAAAAA for thread stacks.
*/
stack += 4;
#endif
/* TODO Currently all supported platforms have stack growth down and
* there is no Kconfig option to configure it so this always build
* "else" branch. When support for platform with stack direction up
* (or configurable direction) is added this check should be confirmed
* that correct Kconfig option is used.
*/
#if defined(STACK_GROWS_UP)
for (i = size - 1; i >= 0; i--) {
if ((unsigned char)stack[i] == 0xaa) {
unused++;
} else {
break;
}
}
#else
for (i = 0; i < size; i++) {
if ((unsigned char)stack[i] == 0xaa) {
unused++;
} else {
break;
}
}
#endif
return unused;
}
#else
static inline size_t stack_unused_space_get(const char *stack, size_t size)
{
return 0;
}
#endif
#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_PRINTK)
static inline void stack_analyze(const char *name, const char *stack,
unsigned int size)
{
unsigned int pcnt, unused = 0;
unused = stack_unused_space_get(stack, size);
/* Calculate the real size reserved for the stack */
pcnt = ((size - unused) * 100) / size;
printk("%s (real size %u):\tunused %u\tusage %u / %u (%u %%)\n", name,
size, unused, size - unused, size, pcnt);
}
#else
static inline void stack_analyze(const char *name, const char *stack,
unsigned int size)
{
}
#endif
#define STACK_ANALYZE(name, sym) \
stack_analyze(name, K_THREAD_STACK_BUFFER(sym), \
K_THREAD_STACK_SIZEOF(sym))
#endif /* _MISC_STACK_H_ */

View file

@ -0,0 +1,305 @@
/*
* Copyright (c) 2011-2014, Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Misc utilities
*
* Misc utilities usable by the kernel and application code.
*/
#ifndef _UTIL__H_
#define _UTIL__H_
#ifdef __cplusplus
extern "C" {
#endif
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
/* Helper to pass a int as a pointer or vice-versa.
* Those are available for 32 bits architectures:
*/
#define POINTER_TO_UINT(x) ((u32_t) (x))
#define UINT_TO_POINTER(x) ((void *) (x))
#define POINTER_TO_INT(x) ((s32_t) (x))
#define INT_TO_POINTER(x) ((void *) (x))
/* Evaluates to 0 if cond is true-ish; compile error otherwise */
#define ZERO_OR_COMPILE_ERROR(cond) ((int) sizeof(char[1 - 2 * !(cond)]) - 1)
/* Evaluates to 0 if array is an array; compile error if not array (e.g.
* pointer)
*/
#define IS_ARRAY(array) \
ZERO_OR_COMPILE_ERROR( \
!__builtin_types_compatible_p(__typeof__(array), \
__typeof__(&(array)[0])))
/* Evaluates to number of elements in an array; compile error if not
* an array (e.g. pointer)
*/
#define ARRAY_SIZE(array) \
((unsigned long) (IS_ARRAY(array) + \
(sizeof(array) / sizeof((array)[0]))))
/* Evaluates to 1 if ptr is part of array, 0 otherwise; compile error if
* "array" argument is not an array (e.g. "ptr" and "array" mixed up)
*/
#define PART_OF_ARRAY(array, ptr) \
((ptr) && ((ptr) >= &array[0] && (ptr) < &array[ARRAY_SIZE(array)]))
#define CONTAINER_OF(ptr, type, field) \
((type *)(((char *)(ptr)) - offsetof(type, field)))
/* round "x" up/down to next multiple of "align" (which must be a power of 2) */
#define ROUND_UP(x, align) \
(((unsigned long)(x) + ((unsigned long)align - 1)) & \
~((unsigned long)align - 1))
#define ROUND_DOWN(x, align) ((unsigned long)(x) & ~((unsigned long)align - 1))
#define ceiling_fraction(numerator, divider) \
(((numerator) + ((divider) - 1)) / (divider))
#ifdef INLINED
#define INLINE inline
#else
#define INLINE
#endif
#ifndef max
#define max(a, b) (((a) > (b)) ? (a) : (b))
#endif
#ifndef min
#define min(a, b) (((a) < (b)) ? (a) : (b))
#endif
static inline int is_power_of_two(unsigned int x)
{
return (x != 0) && !(x & (x - 1));
}
static inline s64_t arithmetic_shift_right(s64_t value, u8_t shift)
{
s64_t sign_ext;
if (shift == 0) {
return value;
}
/* extract sign bit */
sign_ext = (value >> 63) & 1;
/* make all bits of sign_ext be the same as the value's sign bit */
sign_ext = -sign_ext;
/* shift value and fill opened bit positions with sign bit */
return (value >> shift) | (sign_ext << (64 - shift));
}
#endif /* !_ASMLANGUAGE */
/* KB, MB, GB */
#define KB(x) ((x) << 10)
#define MB(x) (KB(x) << 10)
#define GB(x) (MB(x) << 10)
/* KHZ, MHZ */
#define KHZ(x) ((x) * 1000)
#define MHZ(x) (KHZ(x) * 1000)
#define BIT_MASK(n) (BIT(n) - 1)
/**
* @brief Check for macro definition in compiler-visible expressions
*
* This trick was pioneered in Linux as the config_enabled() macro.
* The madness has the effect of taking a macro value that may be
* defined to "1" (e.g. CONFIG_MYFEATURE), or may not be defined at
* all and turning it into a literal expression that can be used at
* "runtime". That is, it works similarly to
* "defined(CONFIG_MYFEATURE)" does except that it is an expansion
* that can exist in a standard expression and be seen by the compiler
* and optimizer. Thus much ifdef usage can be replaced with cleaner
* expressions like:
*
* if (IS_ENABLED(CONFIG_MYFEATURE))
* myfeature_enable();
*
* INTERNAL
* First pass just to expand any existing macros, we need the macro
* value to be e.g. a literal "1" at expansion time in the next macro,
* not "(1)", etc... Standard recursive expansion does not work.
*/
#define IS_ENABLED(config_macro) _IS_ENABLED1(config_macro)
/* Now stick on a "_XXXX" prefix, it will now be "_XXXX1" if config_macro
* is "1", or just "_XXXX" if it's undefined.
* ENABLED: _IS_ENABLED2(_XXXX1)
* DISABLED _IS_ENABLED2(_XXXX)
*/
#define _IS_ENABLED1(config_macro) _IS_ENABLED2(_XXXX##config_macro)
/* Here's the core trick, we map "_XXXX1" to "_YYYY," (i.e. a string
* with a trailing comma), so it has the effect of making this a
* two-argument tuple to the preprocessor only in the case where the
* value is defined to "1"
* ENABLED: _YYYY, <--- note comma!
* DISABLED: _XXXX
*/
#define _XXXX1 _YYYY,
/* Then we append an extra argument to fool the gcc preprocessor into
* accepting it as a varargs macro.
* arg1 arg2 arg3
* ENABLED: _IS_ENABLED3(_YYYY, 1, 0)
* DISABLED _IS_ENABLED3(_XXXX 1, 0)
*/
#define _IS_ENABLED2(one_or_two_args) _IS_ENABLED3(one_or_two_args 1, 0)
/* And our second argument is thus now cooked to be 1 in the case
* where the value is defined to 1, and 0 if not:
*/
#define _IS_ENABLED3(ignore_this, val, ...) val
/**
* Macros for doing code-generation with the preprocessor.
*
* Generally it is better to generate code with the preprocessor than
* to copy-paste code or to generate code with the build system /
* python script's etc.
*
* http://stackoverflow.com/a/12540675
*/
#define UTIL_EMPTY(...)
#define UTIL_DEFER(...) __VA_ARGS__ UTIL_EMPTY()
#define UTIL_OBSTRUCT(...) __VA_ARGS__ UTIL_DEFER(UTIL_EMPTY)()
#define UTIL_EXPAND(...) __VA_ARGS__
#define UTIL_EVAL(...) UTIL_EVAL1(UTIL_EVAL1(UTIL_EVAL1(__VA_ARGS__)))
#define UTIL_EVAL1(...) UTIL_EVAL2(UTIL_EVAL2(UTIL_EVAL2(__VA_ARGS__)))
#define UTIL_EVAL2(...) UTIL_EVAL3(UTIL_EVAL3(UTIL_EVAL3(__VA_ARGS__)))
#define UTIL_EVAL3(...) UTIL_EVAL4(UTIL_EVAL4(UTIL_EVAL4(__VA_ARGS__)))
#define UTIL_EVAL4(...) UTIL_EVAL5(UTIL_EVAL5(UTIL_EVAL5(__VA_ARGS__)))
#define UTIL_EVAL5(...) __VA_ARGS__
#define UTIL_CAT(a, ...) UTIL_PRIMITIVE_CAT(a, __VA_ARGS__)
#define UTIL_PRIMITIVE_CAT(a, ...) a##__VA_ARGS__
#define UTIL_INC(x) UTIL_PRIMITIVE_CAT(UTIL_INC_, x)
#define UTIL_INC_0 1
#define UTIL_INC_1 2
#define UTIL_INC_2 3
#define UTIL_INC_3 4
#define UTIL_INC_4 5
#define UTIL_INC_5 6
#define UTIL_INC_6 7
#define UTIL_INC_7 8
#define UTIL_INC_8 9
#define UTIL_INC_9 10
#define UTIL_INC_10 11
#define UTIL_INC_11 12
#define UTIL_INC_12 13
#define UTIL_INC_13 14
#define UTIL_INC_14 15
#define UTIL_INC_15 16
#define UTIL_INC_16 17
#define UTIL_INC_17 18
#define UTIL_INC_18 19
#define UTIL_INC_19 19
#define UTIL_DEC(x) UTIL_PRIMITIVE_CAT(UTIL_DEC_, x)
#define UTIL_DEC_0 0
#define UTIL_DEC_1 0
#define UTIL_DEC_2 1
#define UTIL_DEC_3 2
#define UTIL_DEC_4 3
#define UTIL_DEC_5 4
#define UTIL_DEC_6 5
#define UTIL_DEC_7 6
#define UTIL_DEC_8 7
#define UTIL_DEC_9 8
#define UTIL_DEC_10 9
#define UTIL_DEC_11 10
#define UTIL_DEC_12 11
#define UTIL_DEC_13 12
#define UTIL_DEC_14 13
#define UTIL_DEC_15 14
#define UTIL_DEC_16 15
#define UTIL_DEC_17 16
#define UTIL_DEC_18 17
#define UTIL_DEC_19 18
#define UTIL_CHECK_N(x, n, ...) n
#define UTIL_CHECK(...) UTIL_CHECK_N(__VA_ARGS__, 0,)
#define UTIL_NOT(x) UTIL_CHECK(UTIL_PRIMITIVE_CAT(UTIL_NOT_, x))
#define UTIL_NOT_0 ~, 1,
#define UTIL_COMPL(b) UTIL_PRIMITIVE_CAT(UTIL_COMPL_, b)
#define UTIL_COMPL_0 1
#define UTIL_COMPL_1 0
#define UTIL_BOOL(x) UTIL_COMPL(UTIL_NOT(x))
#define UTIL_IIF(c) UTIL_PRIMITIVE_CAT(UTIL_IIF_, c)
#define UTIL_IIF_0(t, ...) __VA_ARGS__
#define UTIL_IIF_1(t, ...) t
#define UTIL_IF(c) UTIL_IIF(UTIL_BOOL(c))
#define UTIL_EAT(...)
#define UTIL_EXPAND(...) __VA_ARGS__
#define UTIL_WHEN(c) UTIL_IF(c)(UTIL_EXPAND, UTIL_EAT)
#define UTIL_REPEAT(count, macro, ...) \
UTIL_WHEN(count) \
( \
UTIL_OBSTRUCT(UTIL_REPEAT_INDIRECT) () \
( \
UTIL_DEC(count), macro, __VA_ARGS__ \
) \
UTIL_OBSTRUCT(macro) \
( \
UTIL_DEC(count), __VA_ARGS__ \
) \
)
#define UTIL_REPEAT_INDIRECT() UTIL_REPEAT
/**
* Generates a sequence of code.
* Useful for generating code like;
*
* NRF_PWM0, NRF_PWM1, NRF_PWM2,
*
* @arg LEN: The length of the sequence. Must be defined and less than
* 20.
*
* @arg F(i, F_ARG): A macro function that accepts two arguments.
* F is called repeatedly, the first argument
* is the index in the sequence, and the second argument is the third
* argument given to UTIL_LISTIFY.
*
* Example:
*
* \#define FOO(i, _) NRF_PWM ## i ,
* { UTIL_LISTIFY(PWM_COUNT, FOO) }
* // The above two lines will generate the below:
* { NRF_PWM0 , NRF_PWM1 , }
*
* @note Calling UTIL_LISTIFY with undefined arguments has undefined
* behaviour.
*/
#define UTIL_LISTIFY(LEN, F, F_ARG) UTIL_EVAL(UTIL_REPEAT(LEN, F, F_ARG))
#ifdef __cplusplus
}
#endif
#endif /* _UTIL__H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,10 @@
/*
* Copyright (c) 2017 Linaro Limited
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _SOC_H_
#define _SOC_H_
#endif /* _SOC_H_ */

View file

@ -0,0 +1,84 @@
/*
* Copyright (c) 2014, Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Software-managed ISR table
*
* Data types for a software-managed ISR table, with a parameter per-ISR.
*/
#ifndef _SW_ISR_TABLE__H_
#define _SW_ISR_TABLE__H_
#ifdef __cplusplus
extern "C" {
#endif
#if !defined(_ASMLANGUAGE)
#include <zephyr/types.h>
#include <toolchain.h>
/*
* Note the order: arg first, then ISR. This allows a table entry to be
* loaded arg -> r0, isr -> r3 in _isr_wrapper with one ldmia instruction,
* on ARM Cortex-M (Thumb2).
*/
struct _isr_table_entry {
void *arg;
void (*isr)(void *);
};
/* The software ISR table itself, an array of these structures indexed by the
* irq line
*/
extern struct _isr_table_entry _sw_isr_table[];
/*
* Data structure created in a special binary .intlist section for each
* configured interrupt. gen_irq_tables.py pulls this out of the binary and
* uses it to create the IRQ vector table and the _sw_isr_table.
*
* More discussion in include/linker/intlist.ld
*/
struct _isr_list {
/** IRQ line number */
s32_t irq;
/** Flags for this IRQ, see ISR_FLAG_* definitions */
s32_t flags;
/** ISR to call */
void *func;
/** Parameter for non-direct IRQs */
void *param;
};
/** This interrupt gets put directly in the vector table */
#define ISR_FLAG_DIRECT (1 << 0)
#define _MK_ISR_NAME(x, y) __isr_ ## x ## _irq_ ## y
/* Create an instance of struct _isr_list which gets put in the .intList
* section. This gets consumed by gen_isr_tables.py which creates the vector
* and/or SW ISR tables.
*/
extern void os_hwi_set_handler(uint32_t irq, void *func, uint32_t param);
#define _ISR_DECLARE(irq, flags, func, param) \
static struct _isr_list _GENERIC_SECTION(.intList) __used \
_MK_ISR_NAME(func, __COUNTER__) = \
{irq, flags, &func, (void *)param}; \
os_hwi_set_handler(irq, func, param)
#define IRQ_TABLE_SIZE (CONFIG_NUM_IRQS - CONFIG_GEN_IRQ_START_VECTOR)
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* _SW_ISR_TABLE__H_ */

View file

@ -0,0 +1,26 @@
/*
* Copyright (c) 2010-2014, Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Macros to abstract toolchain specific capabilities
*
* This file contains various macros to abstract compiler capabilities that
* utilize toolchain specific attributes and/or pragmas.
*/
#ifndef _TOOLCHAIN_H
#define _TOOLCHAIN_H
#if defined(__XCC__)
#include <toolchain/xcc.h>
#elif defined(__GNUC__) || (defined(_LINKER) && defined(__GCC_LINKER_CMD__))
#include <toolchain/gcc.h>
#else
#include <toolchain/other.h>
#endif
#endif /* _TOOLCHAIN_H */

View file

@ -0,0 +1,131 @@
/*
* Copyright (c) 2010-2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Common toolchain abstraction
*
* Macros to abstract compiler capabilities (common to all toolchains).
*/
/* Abstract use of extern keyword for compatibility between C and C++ */
#ifdef __cplusplus
#define EXTERN_C extern "C"
#else
#define EXTERN_C extern
#endif
/* Use TASK_ENTRY_CPP to tag task entry points defined in C++ files. */
#ifdef __cplusplus
#define TASK_ENTRY_CPP extern "C"
#endif
/*
* Generate a reference to an external symbol.
* The reference indicates to the linker that the symbol is required
* by the module containing the reference and should be included
* in the image if the module is in the image.
*
* The assembler directive ".set" is used to define a local symbol.
* No memory is allocated, and the local symbol does not appear in
* the symbol table.
*/
#ifdef _ASMLANGUAGE
#define REQUIRES(sym) .set sym ## _Requires, sym
#else
#define REQUIRES(sym) __asm__ (".set " # sym "_Requires, " # sym "\n\t");
#endif
#ifdef _ASMLANGUAGE
#define SECTION .section
#endif
/*
* If the project is being built for speed (i.e. not for minimum size) then
* align functions and branches in executable sections to improve performance.
*/
#ifdef _ASMLANGUAGE
#ifdef CONFIG_X86
#ifdef PERF_OPT
#define PERFOPT_ALIGN .balign 16
#else
#define PERFOPT_ALIGN .balign 1
#endif
#elif defined(CONFIG_ARM)
#ifdef CONFIG_ISA_THUMB
#define PERFOPT_ALIGN .balign 2
#else
#define PERFOPT_ALIGN .balign 4
#endif
#elif defined(CONFIG_ARC)
#define PERFOPT_ALIGN .balign 4
#elif defined(CONFIG_NIOS2) || defined(CONFIG_RISCV32) || \
defined(CONFIG_XTENSA)
#define PERFOPT_ALIGN .balign 4
#else
#error Architecture unsupported
#endif
#define GC_SECTION(sym) SECTION .text.##sym, "ax"
#endif /* _ASMLANGUAGE */
/* force inlining a function */
#if !defined(_ASMLANGUAGE)
#define ALWAYS_INLINE inline __attribute__((always_inline))
#endif
#define _STRINGIFY(x) #x
#define STRINGIFY(s) _STRINGIFY(s)
/* Indicate that an array will be used for stack space. */
#if !defined(_ASMLANGUAGE)
/* don't use this anymore, use K_DECLARE_STACK instead. Remove for 1.11 */
#define __stack __aligned(STACK_ALIGN) __DEPRECATED_MACRO
#endif
/* concatenate the values of the arguments into one */
#define _DO_CONCAT(x, y) x ## y
#define _CONCAT(x, y) _DO_CONCAT(x, y)
/* Additionally used as a sentinel by gen_syscalls.py to identify what
* functions are system calls
*
* Note POSIX unit tests don't still generate the system call stubs, so
* until https://github.com/zephyrproject-rtos/zephyr/issues/5006 is
* fixed via possibly #4174, we introduce this hack -- which will
* disallow us to test system calls in POSIX unit testing (currently
* not used).
*/
#ifndef ZTEST_UNITTEST
#define __syscall static inline
#else
#define __syscall
#endif /* #ifndef ZTEST_UNITTEST */
#ifndef BUILD_ASSERT
/* compile-time assertion that makes the build fail */
#define BUILD_ASSERT(EXPR) typedef char __build_assert_failure[(EXPR) ? 1 : -1]
#endif
#ifndef BUILD_ASSERT_MSG
/* build assertion with message -- common implementation swallows message. */
#define BUILD_ASSERT_MSG(EXPR, MSG) BUILD_ASSERT(EXPR)
#endif

View file

@ -0,0 +1,315 @@
/*
* Copyright (c) 2010-2014,2017 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief GCC toolchain abstraction
*
* Macros to abstract compiler capabilities for GCC toolchain.
*/
/*
* GCC 4.6 and higher have _Static_assert built in, and its output is
* easier to understand than the common BUILD_ASSERT macros.
*/
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
#define BUILD_ASSERT(EXPR) _Static_assert(EXPR, "")
#define BUILD_ASSERT_MSG(EXPR, MSG) _Static_assert(EXPR, MSG)
#endif
#include <toolchain/common.h>
#define ALIAS_OF(of) __attribute__((alias(#of)))
#define FUNC_ALIAS(real_func, new_alias, return_type) \
return_type new_alias() ALIAS_OF(real_func)
#define CODE_UNREACHABLE __builtin_unreachable()
#define FUNC_NORETURN __attribute__((__noreturn__))
/* The GNU assembler for Cortex-M3 uses # for immediate values, not
* comments, so the @nobits# trick does not work.
*/
#if defined(CONFIG_ARM)
#define _NODATA_SECTION(segment) __attribute__((section(#segment)))
#else
#define _NODATA_SECTION(segment) \
__attribute__((section(#segment ",\"wa\",@nobits#")))
#endif
/* Unaligned access */
#define UNALIGNED_GET(p) \
__extension__ ({ \
struct __attribute__((__packed__)) { \
__typeof__(*(p)) __v; \
} *__p = (__typeof__(__p)) (p); \
__p->__v; \
})
#define UNALIGNED_PUT(v, p) \
do { \
struct __attribute__((__packed__)) { \
__typeof__(*p) __v; \
} *__p = (__typeof__(__p)) (p); \
__p->__v = (v); \
} while (0)
/* Double indirection to ensure section names are expanded before
* stringification
*/
#define __GENERIC_SECTION(segment) __attribute__((section(STRINGIFY(segment))))
#define _GENERIC_SECTION(segment) __GENERIC_SECTION(segment)
#define ___in_section(a, b, c) \
__attribute__((section("." _STRINGIFY(a) \
"." _STRINGIFY(b) \
"." _STRINGIFY(c))))
#define __in_section(a, b, c) ___in_section(a, b, c)
#define __in_section_unique(seg) ___in_section(seg, __FILE__, __COUNTER__)
#ifdef CONFIG_APPLICATION_MEMORY
#define __kernel __in_section_unique(kernel)
#define __kernel_noinit __in_section_unique(kernel_noinit)
#define __kernel_bss __in_section_unique(kernel_bss)
#else
#define __kernel
#define __kernel_noinit __noinit
#define __kernel_bss
#endif
#ifndef __packed
#define __packed __attribute__((__packed__))
#endif
#ifndef __aligned
#define __aligned(x) __attribute__((__aligned__(x)))
#endif
#define __may_alias __attribute__((__may_alias__))
#ifndef __printf_like
#define __printf_like(f, a) __attribute__((format (printf, f, a)))
#endif
#define __used __attribute__((__used__))
#define __deprecated __attribute__((deprecated))
#define ARG_UNUSED(x) (void)(x)
#define popcount(x) __builtin_popcount(x)
#define __weak __attribute__((__weak__))
#define __unused __attribute__((__unused__))
/* Be *very* careful with this, you cannot filter out with -wno-deprecated,
* which has implications for -Werror
*/
#define __DEPRECATED_MACRO _Pragma("GCC warning \"Macro is deprecated\"")
/* These macros allow having ARM asm functions callable from thumb */
#if defined(_ASMLANGUAGE) && !defined(_LINKER)
#ifdef CONFIG_ARM
#if defined(CONFIG_ISA_THUMB)
#define FUNC_CODE() \
.code 16; \
.thumb_func;
#define FUNC_INSTR(a) \
BX pc; \
NOP; \
.code 32; \
A##a:
#elif defined(CONFIG_ISA_THUMB2)
#define FUNC_CODE() .thumb;
#define FUNC_INSTR(a)
#elif defined(CONFIG_ISA_ARM)
#define FUNC_CODE() .code 32;
#define FUNC_INSTR(a)
#else
#error unknown instruction set
#endif /* ISA */
#else
#define FUNC_CODE()
#define FUNC_INSTR(a)
#endif /* !CONFIG_ARM */
#endif /* _ASMLANGUAGE && !_LINKER */
/*
* These macros are used to declare assembly language symbols that need
* to be typed properly(func or data) to be visible to the OMF tool.
* So that the build tool could mark them as an entry point to be linked
* correctly. This is an elfism. Use #if 0 for a.out.
*/
#if defined(_ASMLANGUAGE) && !defined(_LINKER)
#if defined(CONFIG_ARM) || defined(CONFIG_NIOS2) || defined(CONFIG_RISCV32) \
|| defined(CONFIG_XTENSA)
#define GTEXT(sym) .global sym; .type sym, %function
#define GDATA(sym) .global sym; .type sym, %object
#define WTEXT(sym) .weak sym; .type sym, %function
#define WDATA(sym) .weak sym; .type sym, %object
#elif defined(CONFIG_ARC)
/*
* Need to use assembly macros because ';' is interpreted as the start of
* a single line comment in the ARC assembler.
*/
.macro glbl_text symbol
.globl \symbol
.type \symbol, %function
.endm
.macro glbl_data symbol
.globl \symbol
.type \symbol, %object
.endm
.macro weak_data symbol
.weak \symbol
.type \symbol, %object
.endm
#define GTEXT(sym) glbl_text sym
#define GDATA(sym) glbl_data sym
#define WDATA(sym) weak_data sym
#else /* !CONFIG_ARM && !CONFIG_ARC */
#define GTEXT(sym) .globl sym; .type sym, @function
#define GDATA(sym) .globl sym; .type sym, @object
#endif
/*
* These macros specify the section in which a given function or variable
* resides.
*
* - SECTION_FUNC allows only one function to reside in a sub-section
* - SECTION_SUBSEC_FUNC allows multiple functions to reside in a sub-section
* This ensures that garbage collection only discards the section
* if all functions in the sub-section are not referenced.
*/
#if defined(CONFIG_ARC)
/*
* Need to use assembly macros because ';' is interpreted as the start of
* a single line comment in the ARC assembler.
*
* Also, '\()' is needed in the .section directive of these macros for
* correct substitution of the 'section' variable.
*/
.macro section_var section, symbol
.section .\section\().\symbol
\symbol :
.endm
.macro section_func section, symbol
.section .\section\().\symbol, "ax"
FUNC_CODE()
PERFOPT_ALIGN
\symbol :
FUNC_INSTR(\symbol)
.endm
.macro section_subsec_func section, subsection, symbol
.section .\section\().\subsection, "ax"
PERFOPT_ALIGN
\symbol :
.endm
#define SECTION_VAR(sect, sym) section_var sect, sym
#define SECTION_FUNC(sect, sym) section_func sect, sym
#define SECTION_SUBSEC_FUNC(sect, subsec, sym) \
section_subsec_func sect, subsec, sym
#else /* !CONFIG_ARC */
#define SECTION_VAR(sect, sym) .section .sect.##sym; sym :
#define SECTION_FUNC(sect, sym) \
.section .sect.sym, "ax"; \
FUNC_CODE() \
PERFOPT_ALIGN; sym : \
FUNC_INSTR(sym)
#define SECTION_SUBSEC_FUNC(sect, subsec, sym) \
.section .sect.subsec, "ax"; PERFOPT_ALIGN; sym :
#endif /* CONFIG_ARC */
#endif /* _ASMLANGUAGE && !_LINKER */
#if defined(CONFIG_ARM) && defined(_ASMLANGUAGE)
#if defined(CONFIG_ISA_THUMB2)
/* '.syntax unified' is a gcc-ism used in thumb-2 asm files */
#define _ASM_FILE_PROLOGUE .text; .syntax unified; .thumb
#elif defined(CONFIG_ISA_THUMB)
#define _ASM_FILE_PROLOGUE .text; .code 16
#else
#define _ASM_FILE_PROLOGUE .text; .code 32
#endif
#endif
/*
* These macros generate absolute symbols for GCC
*/
/* create an extern reference to the absolute symbol */
#define GEN_OFFSET_EXTERN(name) extern const char name[]
#define GEN_ABS_SYM_BEGIN(name) \
EXTERN_C void name(void); \
void name(void) \
{
#define GEN_ABS_SYM_END }
#if defined(CONFIG_ARM)
/*
* GNU/ARM backend does not have a proper operand modifier which does not
* produces prefix # followed by value, such as %0 for PowerPC, Intel, and
* MIPS. The workaround performed here is using %B0 which converts
* the value to ~(value). Thus "n"(~(value)) is set in operand constraint
* to output (value) in the ARM specific GEN_OFFSET macro.
*/
#define GEN_ABSOLUTE_SYM(name, value) \
__asm__(".globl\t" #name "\n\t.equ\t" #name \
",%B0" \
"\n\t.type\t" #name ",%%object" : : "n"(~(value)))
#elif defined(CONFIG_X86) || defined(CONFIG_ARC)
#define GEN_ABSOLUTE_SYM(name, value) \
__asm__(".globl\t" #name "\n\t.equ\t" #name \
",%c0" \
"\n\t.type\t" #name ",@object" : : "n"(value))
#elif defined(CONFIG_NIOS2) || defined(CONFIG_RISCV32) || defined(CONFIG_XTENSA)
/* No special prefixes necessary for constants in this arch AFAICT */
#define GEN_ABSOLUTE_SYM(name, value) \
__asm__(".globl\t" #name "\n\t.equ\t" #name \
",%0" \
"\n\t.type\t" #name ",%%object" : : "n"(value))
#else
#error processor architecture not supported
#endif
#define compiler_barrier() do { \
__asm__ __volatile__ ("" ::: "memory"); \
} while ((0))

View file

@ -0,0 +1,39 @@
/*
* Copyright (c) 2017 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _XCC_TOOLCHAIN_H_
#define _XCC_TOOLCHAIN_H_
#include <toolchain/gcc.h>
/* XCC doesn't support __COUNTER__ but this should be good enough */
#define __COUNTER__ __LINE__
#undef __in_section_unique
#define __in_section_unique(seg) \
__attribute__((section("." STRINGIFY(seg) "." STRINGIFY(__COUNTER__))))
#ifndef __GCC_LINKER_CMD__
#include <xtensa/config/core.h>
/*
* XCC does not define the following macros with the expected names, but the
* HAL defines similar ones. Thus we include it and define the missing macros
* ourselves.
*/
#ifndef __BYTE_ORDER__
#define __BYTE_ORDER__ XCHAL_MEMORY_ORDER
#endif
#ifndef __ORDER_BIG_ENDIAN__
#define __ORDER_BIG_ENDIAN__ XTHAL_BIGENDIAN
#endif
#ifndef __ORDER_LITTLE_ENDIAN__
#define __ORDER_LITTLE_ENDIAN__ XTHAL_LITTLEENDIAN
#endif
#endif /* __GCC_LINKER_CMD__ */
#endif

View file

@ -0,0 +1,52 @@
/*
* Copyright (C) 2015-2017 Alibaba Group Holding Limited
*/
#ifndef WORK_H
#define WORK_H
#include "atomic.h"
#include "zephyr.h"
struct k_work_q {
struct k_fifo fifo;
};
int k_work_q_start();
enum {
K_WORK_STATE_PENDING,
};
struct k_work;
/* work define*/
typedef void (*k_work_handler_t)(struct k_work *work);
struct k_work {
void *_reserved;
k_work_handler_t handler;
atomic_t flags[1];
};
#define _K_WORK_INITIALIZER(work_handler) \
{ \
._reserved = NULL, \
.handler = work_handler, \
.flags = { 0 } \
}
#define K_WORK_INITIALIZER DEPRECATED_MACRO _K_WORK_INITIALIZER
int k_work_init(struct k_work *work, k_work_handler_t handler);
void k_work_submit(struct k_work *work);
/*delay work define*/
struct k_delayed_work {
struct k_work work;
struct k_work_q *work_q;
k_timer_t timer;
};
void k_delayed_work_init(struct k_delayed_work *work, k_work_handler_t handler);
int k_delayed_work_submit(struct k_delayed_work *work, uint32_t delay);
int k_delayed_work_cancel(struct k_delayed_work *work);
s32_t k_delayed_work_remaining_get(struct k_delayed_work *work);
#endif /* WORK_H */

View file

@ -0,0 +1,30 @@
/*
* Copyright (c) 2017 Linaro Limited
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef __Z_TYPES_H__
#define __Z_TYPES_H__
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef signed char s8_t;
typedef signed short s16_t;
typedef signed int s32_t;
typedef signed long long s64_t;
typedef unsigned char u8_t;
typedef unsigned short u16_t;
typedef unsigned int u32_t;
typedef unsigned long long u64_t;
#ifdef __cplusplus
}
#endif
#endif /* __Z_TYPES_H__ */

View file

@ -0,0 +1,193 @@
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief ARM Cortex-M interrupt management
*
*
* Interrupt management: enabling/disabling and dynamic ISR
* connecting/replacing. SW_ISR_TABLE_DYNAMIC has to be enabled for
* connecting ISRs at runtime.
*/
//#include <kernel.h>
#include <arch/cpu.h>
//#include <arch/arm/cortex_m/cmsis.h>
#include <misc/__assert.h>
#include <toolchain.h>
#include <linker/sections.h>
#include <sw_isr_table.h>
#include <irq.h>
#include <nrf.h>
#include <exc.h>
//#include <kernel_structs.h>
//#include <logging/kernel_event_logger.h>
extern void __reserved(void);
#define NUM_IRQS_PER_REG 32
#define REG_FROM_IRQ(irq) (irq / NUM_IRQS_PER_REG)
#define BIT_FROM_IRQ(irq) (irq % NUM_IRQS_PER_REG)
/**
*
* @brief Enable an interrupt line
*
* Enable the interrupt. After this call, the CPU will receive interrupts for
* the specified <irq>.
*
* @return N/A
*/
void _arch_irq_enable(unsigned int irq)
{
NVIC_EnableIRQ((IRQn_Type)irq);
}
/**
*
* @brief Disable an interrupt line
*
* Disable an interrupt line. After this call, the CPU will stop receiving
* interrupts for the specified <irq>.
*
* @return N/A
*/
void _arch_irq_disable(unsigned int irq)
{
NVIC_DisableIRQ((IRQn_Type)irq);
}
/**
* @brief Return IRQ enable state
*
* @param irq IRQ line
* @return interrupt enable state, true or false
*/
int _arch_irq_is_enabled(unsigned int irq)
{
return NVIC->ISER[REG_FROM_IRQ(irq)] & (1 << BIT_FROM_IRQ(irq));
}
/**
* @internal
*
* @brief Set an interrupt's priority
*
* The priority is verified if ASSERT_ON is enabled. The maximum number
* of priority levels is a little complex, as there are some hardware
* priority levels which are reserved: three for various types of exceptions,
* and possibly one additional to support zero latency interrupts.
*
* @return N/A
*/
void _irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
{
/* Hardware priority levels 0 and 1 reserved for Kernel use.
* So we add 2 to the requested priority level. If we support
* ZLI, 2 is also reserved so we add 3.
*/
#if CONFIG_ZERO_LATENCY_IRQS
/* If we have zero latency interrupts, that makes priority level 2
* a case with special semantics; it is not masked by irq_lock().
* Our policy is to express priority levels with special properties
* via flags
*/
if (flags & IRQ_ZERO_LATENCY) {
prio = 2;
} else {
prio += _IRQ_PRIO_OFFSET;
}
#else
ARG_UNUSED(flags);
prio += _IRQ_PRIO_OFFSET;
#endif
/* The last priority level is also used by PendSV exception, but
* allow other interrupts to use the same level, even if it ends up
* affecting performance (can still be useful on systems with a
* reduced set of priorities, like Cortex-M0/M0+).
*/
__ASSERT(prio <= ((1 << CONFIG_NUM_IRQ_PRIO_BITS) - 1),
"invalid priority %d! values must be less than %d\n",
prio - _IRQ_PRIO_OFFSET,
(1 << CONFIG_NUM_IRQ_PRIO_BITS) - (_IRQ_PRIO_OFFSET));
NVIC_SetPriority((IRQn_Type)irq, prio);
}
/**
*
* @brief Spurious interrupt handler
*
* Installed in all dynamic interrupt slots at boot time. Throws an error if
* called.
*
* See __reserved().
*
* @return N/A
*/
void _irq_spurious(void *unused)
{
ARG_UNUSED(unused);
__reserved();
}
/* FIXME: IRQ direct inline functions have to be placed here and not in
* arch/cpu.h as inline functions due to nasty circular dependency between
* arch/cpu.h and kernel_structs.h; the inline functions typically need to
* perform operations on _kernel. For now, leave as regular functions, a
* future iteration will resolve this.
* We have a similar issue with the k_event_logger functions.
*
* See https://jira.zephyrproject.org/browse/ZEP-1595
*/
#ifdef CONFIG_SYS_POWER_MANAGEMENT
void _arch_isr_direct_pm(void)
{
#if defined(CONFIG_ARMV6_M)
int key;
/* irq_lock() does what we wan for this CPU */
key = irq_lock();
#elif defined(CONFIG_ARMV7_M)
/* Lock all interrupts. irq_lock() will on this CPU only disable those
* lower than BASEPRI, which is not what we want. See comments in
* arch/arm/core/isr_wrapper.S
*/
__asm__ volatile("cpsid i" : : : "memory");
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M */
if (_kernel.idle) {
s32_t idle_val = _kernel.idle;
_kernel.idle = 0;
_sys_power_save_idle_exit(idle_val);
}
#if defined(CONFIG_ARMV6_M)
irq_unlock(key);
#elif defined(CONFIG_ARMV7_M)
__asm__ volatile("cpsie i" : : : "memory");
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M */
}
#endif
#if defined(CONFIG_KERNEL_EVENT_LOGGER_SLEEP) || \
defined(CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT)
void _arch_isr_direct_header(void)
{
_sys_k_event_logger_interrupt();
_sys_k_event_logger_exit_sleep();
}
#endif

View file

@ -0,0 +1,76 @@
/* log.c - logging helpers */
/*
* Copyright (c) 2017 Nordic Semiconductor ASA
* Copyright (c) 2016 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
/* Helper for printk parameters to convert from binary to hex.
* We declare multiple buffers so the helper can be used multiple times
* in a single printk call.
*/
#include <stddef.h>
#include <zephyr/types.h>
#include <zephyr.h>
#include <misc/util.h>
#include <bluetooth/bluetooth.h>
#include <bluetooth/hci.h>
const char *bt_hex(const void *buf, size_t len)
{
static const char hex[] = "0123456789abcdef";
static char hexbufs[4][129];
static u8_t curbuf;
const u8_t *b = buf;
unsigned int mask;
char *str;
int i;
mask = irq_lock();
str = hexbufs[curbuf++];
curbuf %= ARRAY_SIZE(hexbufs);
irq_unlock(mask);
len = min(len, (sizeof(hexbufs[0]) - 1) / 2);
for (i = 0; i < len; i++) {
str[i * 2] = hex[b[i] >> 4];
str[i * 2 + 1] = hex[b[i] & 0xf];
}
str[i * 2] = '\0';
return str;
}
#if defined(CONFIG_BT_DEBUG)
const char *bt_addr_str(const bt_addr_t *addr)
{
static char bufs[2][BT_ADDR_STR_LEN];
static u8_t cur;
char *str;
str = bufs[cur++];
cur %= ARRAY_SIZE(bufs);
bt_addr_to_str(addr, str, sizeof(bufs[cur]));
return str;
}
const char *bt_addr_le_str(const bt_addr_le_t *addr)
{
static char bufs[2][BT_ADDR_LE_STR_LEN];
static u8_t cur;
char *str;
str = bufs[cur++];
cur %= ARRAY_SIZE(bufs);
bt_addr_le_to_str(addr, str, sizeof(bufs[cur]));
return str;
}
#endif /* CONFIG_BT_DEBUG */

View file

@ -0,0 +1,240 @@
/*
* Copyright (c) 2017 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
*
* @brief Kernel asynchronous event polling interface.
*
* This polling mechanism allows waiting on multiple events concurrently,
* either events triggered directly, or from kernel objects or other kernel
* constructs.
*/
#include <stdio.h>
#include <zephyr.h>
#include <zephyr/types.h>
#include <misc/slist.h>
#include <misc/dlist.h>
#include <misc/__assert.h>
struct k_sem g_poll_sem;
void k_poll_event_init(struct k_poll_event *event, u32_t type,
int mode, void *obj)
{
__ASSERT(mode == K_POLL_MODE_NOTIFY_ONLY,
"only NOTIFY_ONLY mode is supported\n");
__ASSERT(type < (1 << _POLL_NUM_TYPES), "invalid type\n");
__ASSERT(obj, "must provide an object\n");
event->poller = NULL;
/* event->tag is left uninitialized: the user will set it if needed */
event->type = type;
event->state = K_POLL_STATE_NOT_READY;
event->mode = mode;
event->unused = 0;
event->obj = obj;
}
/* must be called with interrupts locked */
static inline int is_condition_met(struct k_poll_event *event, u32_t *state)
{
switch (event->type) {
case K_POLL_TYPE_SEM_AVAILABLE:
if (k_sem_count_get(event->sem) > 0) {
*state = K_POLL_STATE_SEM_AVAILABLE;
return 1;
}
break;
case K_POLL_TYPE_DATA_AVAILABLE:
if (!k_queue_is_empty(event->queue)) {
*state = K_POLL_STATE_FIFO_DATA_AVAILABLE;
return 1;
}
break;
case K_POLL_TYPE_SIGNAL:
if (event->signal->signaled) {
*state = K_POLL_STATE_SIGNALED;
return 1;
}
break;
case K_POLL_TYPE_IGNORE:
return 0;
default:
__ASSERT(0, "invalid event type (0x%x)\n", event->type);
break;
}
return 0;
}
static inline void add_event(sys_dlist_t *events, struct k_poll_event *event,
struct _poller *poller)
{
sys_dlist_append(events, &event->_node);
}
/* must be called with interrupts locked */
static inline int register_event(struct k_poll_event *event,
struct _poller *poller)
{
switch (event->type) {
case K_POLL_TYPE_SEM_AVAILABLE:
__ASSERT(event->sem, "invalid semaphore\n");
add_event(&event->sem->poll_events, event, poller);
break;
case K_POLL_TYPE_DATA_AVAILABLE:
__ASSERT(event->queue, "invalid queue\n");
add_event(&event->queue->poll_events, event, poller);
break;
case K_POLL_TYPE_SIGNAL:
__ASSERT(event->signal, "invalid poll signal\n");
add_event(&event->signal->poll_events, event, poller);
break;
case K_POLL_TYPE_IGNORE:
/* nothing to do */
break;
default:
__ASSERT(0, "invalid event type\n");
break;
}
event->poller = poller;
return 0;
}
/* must be called with interrupts locked */
static inline void clear_event_registration(struct k_poll_event *event)
{
event->poller = NULL;
switch (event->type) {
case K_POLL_TYPE_SEM_AVAILABLE:
__ASSERT(event->sem, "invalid semaphore\n");
sys_dlist_remove(&event->_node);
break;
case K_POLL_TYPE_DATA_AVAILABLE:
__ASSERT(event->queue, "invalid queue\n");
sys_dlist_remove(&event->_node);
break;
case K_POLL_TYPE_SIGNAL:
__ASSERT(event->signal, "invalid poll signal\n");
sys_dlist_remove(&event->_node);
break;
case K_POLL_TYPE_IGNORE:
/* nothing to do */
break;
default:
__ASSERT(0, "invalid event type\n");
break;
}
}
/* must be called with interrupts locked */
static inline void clear_event_registrations(struct k_poll_event *events,
int last_registered,
unsigned int key)
{
for (; last_registered >= 0; last_registered--) {
clear_event_registration(&events[last_registered]);
irq_unlock(key);
key = irq_lock();
}
}
static inline void set_event_ready(struct k_poll_event *event, u32_t state)
{
event->poller = NULL;
event->state |= state;
}
static bool polling_events(struct k_poll_event *events, int num_events,
s32_t timeout, int *last_registered)
{
int rc;
bool polling = true;
unsigned int key;
for (int ii = 0; ii < num_events; ii++) {
u32_t state;
key = irq_lock();
if (is_condition_met(&events[ii], &state)) {
set_event_ready(&events[ii], state);
polling = false;
} else if (timeout != K_NO_WAIT && polling) {
rc = register_event(&events[ii], NULL);
if (rc == 0) {
++(*last_registered);
} else {
__ASSERT(0, "unexpected return code\n");
}
}
irq_unlock(key);
}
return polling;
}
int k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
{
__ASSERT(events, "NULL events\n");
__ASSERT(num_events > 0, "zero events\n");
int last_registered = -1;
unsigned int key;
bool polling = true;
/* find events whose condition is already fulfilled */
polling = polling_events(events, num_events, timeout, &last_registered);
if (polling == false) {
goto exit;
}
k_sem_take(&g_poll_sem, timeout);
last_registered = -1;
polling_events(events, num_events, timeout, &last_registered);
exit:
key = irq_lock();
clear_event_registrations(events, last_registered, key);
irq_unlock(key);
return 0;
}
/* must be called with interrupts locked */
static int _signal_poll_event(struct k_poll_event *event, u32_t state,
int *must_reschedule)
{
*must_reschedule = 0;
set_event_ready(event, state);
return 0;
}
int k_poll_signal(struct k_poll_signal *signal, int result)
{
unsigned int key = irq_lock();
struct k_poll_event *poll_event;
int must_reschedule;
signal->result = result;
signal->signaled = 1;
poll_event = (struct k_poll_event *)sys_dlist_get(&signal->poll_events);
if (!poll_event) {
irq_unlock(key);
return 0;
}
int rc = _signal_poll_event(poll_event, K_POLL_STATE_SIGNALED,
&must_reschedule);
k_sem_give(&g_poll_sem);
irq_unlock(key);
return rc;
}

View file

@ -0,0 +1,103 @@
/**
* @file rpa.c
* Resolvable Private Address Generation and Resolution
*/
/*
* Copyright (c) 2017 Nordic Semiconductor ASA
* Copyright (c) 2015-2016 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr.h>
#include <stddef.h>
#include <errno.h>
#include <string.h>
#include <atomic.h>
#include <misc/util.h>
#include <misc/byteorder.h>
#include <misc/stack.h>
#include <tinycrypt/constants.h>
#include <tinycrypt/aes.h>
#include <tinycrypt/utils.h>
#include <tinycrypt/cmac_mode.h>
#define BT_DBG_ENABLED IS_ENABLED(CONFIG_BT_DEBUG_RPA)
#include "common/log.h"
#if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_CTLR_PRIVACY) || defined(CONFIG_BT_CTLR_PRIVACY)
static int ah(const u8_t irk[16], const u8_t r[3], u8_t out[3])
{
u8_t res[16];
int err;
BT_DBG("irk %s, r %s", bt_hex(irk, 16), bt_hex(r, 3));
/* r' = padding || r */
memcpy(res, r, 3);
memset(res + 3, 0, 13);
err = bt_encrypt_le(irk, res, res);
if (err) {
return err;
}
/* The output of the random address function ah is:
* ah(h, r) = e(k, r') mod 2^24
* The output of the security function e is then truncated to 24 bits
* by taking the least significant 24 bits of the output of e as the
* result of ah.
*/
memcpy(out, res, 3);
return 0;
}
#endif
#if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_CTLR_PRIVACY)
bool bt_rpa_irk_matches(const u8_t irk[16], const bt_addr_t *addr)
{
u8_t hash[3];
int err;
BT_DBG("IRK %s bdaddr %s", bt_hex(irk, 16), bt_addr_str(addr));
err = ah(irk, addr->val + 3, hash);
if (err) {
return false;
}
return !memcmp(addr->val, hash, 3);
}
#endif
#if defined(CONFIG_BT_PRIVACY) || defined(CONFIG_BT_CTLR_PRIVACY)
int bt_rpa_create(const u8_t irk[16], bt_addr_t *rpa)
{
int err;
err = bt_rand(rpa->val + 3, 3);
if (err) {
return err;
}
BT_ADDR_SET_RPA(rpa);
err = ah(irk, rpa->val + 3, rpa->val);
if (err) {
return err;
}
BT_DBG("Created RPA %s", bt_addr_str((bt_addr_t *)rpa->val));
return 0;
}
#else
int bt_rpa_create(const u8_t irk[16], bt_addr_t *rpa)
{
return -ENOTSUP;
}
#endif /* CONFIG_BT_PRIVACY */

View file

@ -0,0 +1,116 @@
# Kconfig - Cryptography primitive options for TinyCrypt version 2.0
#
# Copyright (c) 2015 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
config TINYCRYPT
bool
prompt "TinyCrypt Support"
default n
help
This option enables the TinyCrypt cryptography library.
config TINYCRYPT_CTR_PRNG
bool
prompt "PRNG in counter mode"
depends on TINYCRYPT
default n
help
This option enables support for the pseudo-random number
generator in counter mode.
config TINYCRYPT_SHA256
bool
prompt "SHA-256 Hash function support"
depends on TINYCRYPT
default n
help
This option enables support for SHA-256
hash function primitive.
config TINYCRYPT_SHA256_HMAC
bool
prompt "HMAC (via SHA256) message auth support"
depends on TINYCRYPT_SHA256
default n
help
This option enables support for HMAC using SHA-256
message authentication code.
config TINYCRYPT_SHA256_HMAC_PRNG
bool
prompt "PRNG (via HMAC-SHA256) support"
depends on TINYCRYPT_SHA256_HMAC
default n
help
This option enables support for pseudo-random number
generator.
config TINYCRYPT_ECC_DH
bool
prompt "ECC_DH anonymous key agreement protocol"
depends on TINYCRYPT
select ENTROPY_GENERATOR
default n
help
This option enables support for the Elliptic curve
Diffie-Hellman anonymous key agreement protocol.
Enabling ECC requires a cryptographically secure random number
generator.
config TINYCRYPT_ECC_DSA
bool
prompt "ECC_DSA digital signature algorithm"
depends on TINYCRYPT
select ENTROPY_GENERATOR
default n
help
This option enables support for the Elliptic Curve Digital
Signature Algorithm (ECDSA).
Enabling ECC requires a cryptographically secure random number
generator.
config TINYCRYPT_AES
bool
prompt "AES-128 decrypt/encrypt"
depends on TINYCRYPT
default n
help
This option enables support for AES-128 decrypt and encrypt.
config TINYCRYPT_AES_CBC
bool
prompt "AES-128 block cipher"
depends on TINYCRYPT_AES
default n
help
This option enables support for AES-128 block cipher mode.
config TINYCRYPT_AES_CTR
bool
prompt "AES-128 counter mode"
depends on TINYCRYPT_AES
default n
help
This option enables support for AES-128 counter mode.
config TINYCRYPT_AES_CCM
bool
prompt "AES-128 CCM mode"
depends on TINYCRYPT_AES
default n
help
This option enables support for AES-128 CCM mode.
config TINYCRYPT_AES_CMAC
bool
prompt "AES-128 CMAC mode"
depends on TINYCRYPT_AES
default n
help
This option enables support for AES-128 CMAC mode.

View file

@ -0,0 +1,71 @@
The TinyCrypt library in Zephyr is a downstream of an externally maintained
open source project. The original upstream code can be found at:
https://github.com/01org/tinycrypt
At revision c214460d7f760e2a75908cb41000afcc0bfca282, version 0.2.7
Any changes to the local version should include Zephyr's TinyCrypt
maintainer in the review. That can be found via the git history.
The following is the license information for this code:
================================================================================
TinyCrypt Cryptographic Library
================================================================================
Copyright (c) 2017, Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================================================
Copyright (c) 2013, Kenneth MacKay
All rights reserved.
https://github.com/kmackay/micro-ecc
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -0,0 +1,130 @@
/* aes.h - TinyCrypt interface to an AES-128 implementation */
/*
* Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file
* @brief -- Interface to an AES-128 implementation.
*
* Overview: AES-128 is a NIST approved block cipher specified in
* FIPS 197. Block ciphers are deterministic algorithms that
* perform a transformation specified by a symmetric key in fixed-
* length data sets, also called blocks.
*
* Security: AES-128 provides approximately 128 bits of security.
*
* Usage: 1) call tc_aes128_set_encrypt/decrypt_key to set the key.
*
* 2) call tc_aes_encrypt/decrypt to process the data.
*/
#ifndef __TC_AES_H__
#define __TC_AES_H__
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
#define Nb (4) /* number of columns (32-bit words) comprising the state */
#define Nk (4) /* number of 32-bit words comprising the key */
#define Nr (10) /* number of rounds */
#define TC_AES_BLOCK_SIZE (Nb*Nk)
#define TC_AES_KEY_SIZE (Nb*Nk)
typedef struct tc_aes_key_sched_struct {
unsigned int words[Nb*(Nr+1)];
} *TCAesKeySched_t;
/**
* @brief Set AES-128 encryption key
* Uses key k to initialize s
* @return returns TC_CRYPTO_SUCCESS (1)
* returns TC_CRYPTO_FAIL (0) if: s == NULL or k == NULL
* @note This implementation skips the additional steps required for keys
* larger than 128 bits, and must not be used for AES-192 or
* AES-256 key schedule -- see FIPS 197 for details
* @param s IN/OUT -- initialized struct tc_aes_key_sched_struct
* @param k IN -- points to the AES key
*/
int tc_aes128_set_encrypt_key(TCAesKeySched_t s, const uint8_t *k);
/**
* @brief AES-128 Encryption procedure
* Encrypts contents of in buffer into out buffer under key;
* schedule s
* @note Assumes s was initialized by aes_set_encrypt_key;
* out and in point to 16 byte buffers
* @return returns TC_CRYPTO_SUCCESS (1)
* returns TC_CRYPTO_FAIL (0) if: out == NULL or in == NULL or s == NULL
* @param out IN/OUT -- buffer to receive ciphertext block
* @param in IN -- a plaintext block to encrypt
* @param s IN -- initialized AES key schedule
*/
int tc_aes_encrypt(uint8_t *out, const uint8_t *in,
const TCAesKeySched_t s);
/**
* @brief Set the AES-128 decryption key
* Uses key k to initialize s
* @return returns TC_CRYPTO_SUCCESS (1)
* returns TC_CRYPTO_FAIL (0) if: s == NULL or k == NULL
* @note This is the implementation of the straightforward inverse cipher
* using the cipher documented in FIPS-197 figure 12, not the
* equivalent inverse cipher presented in Figure 15
* @warning This routine skips the additional steps required for keys larger
* than 128, and must not be used for AES-192 or AES-256 key
* schedule -- see FIPS 197 for details
* @param s IN/OUT -- initialized struct tc_aes_key_sched_struct
* @param k IN -- points to the AES key
*/
int tc_aes128_set_decrypt_key(TCAesKeySched_t s, const uint8_t *k);
/**
* @brief AES-128 Encryption procedure
* Decrypts in buffer into out buffer under key schedule s
* @return returns TC_CRYPTO_SUCCESS (1)
* returns TC_CRYPTO_FAIL (0) if: out is NULL or in is NULL or s is NULL
* @note Assumes s was initialized by aes_set_encrypt_key
* out and in point to 16 byte buffers
* @param out IN/OUT -- buffer to receive ciphertext block
* @param in IN -- a plaintext block to encrypt
* @param s IN -- initialized AES key schedule
*/
int tc_aes_decrypt(uint8_t *out, const uint8_t *in,
const TCAesKeySched_t s);
#ifdef __cplusplus
}
#endif
#endif /* __TC_AES_H__ */

View file

@ -0,0 +1,151 @@
/* cbc_mode.h - TinyCrypt interface to a CBC mode implementation */
/*
* Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file
* @brief Interface to a CBC mode implementation.
*
* Overview: CBC (for "cipher block chaining") mode is a NIST approved mode of
* operation defined in SP 800-38a. It can be used with any block
* cipher to provide confidentiality of strings whose lengths are
* multiples of the block_size of the underlying block cipher.
* TinyCrypt hard codes AES as the block cipher.
*
* Security: CBC mode provides data confidentiality given that the maximum
* number q of blocks encrypted under a single key satisfies
* q < 2^63, which is not a practical constraint (it is considered a
* good practice to replace the encryption when q == 2^56). CBC mode
* provides NO data integrity.
*
* CBC mode assumes that the IV value input into the
* tc_cbc_mode_encrypt is randomly generated. The TinyCrypt library
* provides HMAC-PRNG module, which generates suitable IVs. Other
* methods for generating IVs are acceptable, provided that the
* values of the IVs generated appear random to any adversary,
* including someone with complete knowledge of the system design.
*
* The randomness property on which CBC mode's security depends is
* the unpredictability of the IV. Since it is unpredictable, this
* means in practice that CBC mode requires that the IV is stored
* somehow with the ciphertext in order to recover the plaintext.
*
* TinyCrypt CBC encryption prepends the IV to the ciphertext,
* because this affords a more efficient (few buffers) decryption.
* Hence tc_cbc_mode_encrypt assumes the ciphertext buffer is always
* 16 bytes larger than the plaintext buffer.
*
* Requires: AES-128
*
* Usage: 1) call tc_cbc_mode_encrypt to encrypt data.
*
* 2) call tc_cbc_mode_decrypt to decrypt data.
*
*/
#ifndef __TC_CBC_MODE_H__
#define __TC_CBC_MODE_H__
#include <tinycrypt/aes.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief CBC encryption procedure
* CBC encrypts inlen bytes of the in buffer into the out buffer
* using the encryption key schedule provided, prepends iv to out
* @return returns TC_CRYPTO_SUCCESS (1)
* returns TC_CRYPTO_FAIL (0) if:
* out == NULL or
* in == NULL or
* ctr == NULL or
* sched == NULL or
* inlen == 0 or
* (inlen % TC_AES_BLOCK_SIZE) != 0 or
* (outlen % TC_AES_BLOCK_SIZE) != 0 or
* outlen != inlen + TC_AES_BLOCK_SIZE
* @note Assumes: - sched has been configured by aes_set_encrypt_key
* - iv contains a 16 byte random string
* - out buffer is large enough to hold the ciphertext + iv
* - out buffer is a contiguous buffer
* - in holds the plaintext and is a contiguous buffer
* - inlen gives the number of bytes in the in buffer
* @param out IN/OUT -- buffer to receive the ciphertext
* @param outlen IN -- length of ciphertext buffer in bytes
* @param in IN -- plaintext to encrypt
* @param inlen IN -- length of plaintext buffer in bytes
* @param iv IN -- the IV for the this encrypt/decrypt
* @param sched IN -- AES key schedule for this encrypt
*/
int tc_cbc_mode_encrypt(uint8_t *out, unsigned int outlen, const uint8_t *in,
unsigned int inlen, const uint8_t *iv,
const TCAesKeySched_t sched);
/**
* @brief CBC decryption procedure
* CBC decrypts inlen bytes of the in buffer into the out buffer
* using the provided encryption key schedule
* @return returns TC_CRYPTO_SUCCESS (1)
* returns TC_CRYPTO_FAIL (0) if:
* out == NULL or
* in == NULL or
* sched == NULL or
* inlen == 0 or
* outlen == 0 or
* (inlen % TC_AES_BLOCK_SIZE) != 0 or
* (outlen % TC_AES_BLOCK_SIZE) != 0 or
* outlen != inlen + TC_AES_BLOCK_SIZE
* @note Assumes:- in == iv + ciphertext, i.e. the iv and the ciphertext are
* contiguous. This allows for a very efficient decryption
* algorithm that would not otherwise be possible
* - sched was configured by aes_set_decrypt_key
* - out buffer is large enough to hold the decrypted plaintext
* and is a contiguous buffer
* - inlen gives the number of bytes in the in buffer
* @param out IN/OUT -- buffer to receive decrypted data
* @param outlen IN -- length of plaintext buffer in bytes
* @param in IN -- ciphertext to decrypt, including IV
* @param inlen IN -- length of ciphertext buffer in bytes
* @param iv IN -- the IV for the this encrypt/decrypt
* @param sched IN -- AES key schedule for this decrypt
*
*/
int tc_cbc_mode_decrypt(uint8_t *out, unsigned int outlen, const uint8_t *in,
unsigned int inlen, const uint8_t *iv,
const TCAesKeySched_t sched);
#ifdef __cplusplus
}
#endif
#endif /* __TC_CBC_MODE_H__ */

View file

@ -0,0 +1,211 @@
/* ccm_mode.h - TinyCrypt interface to a CCM mode implementation */
/*
* Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file
* @brief Interface to a CCM mode implementation.
*
* Overview: CCM (for "Counter with CBC-MAC") mode is a NIST approved mode of
* operation defined in SP 800-38C.
*
* TinyCrypt CCM implementation accepts:
*
* 1) Both non-empty payload and associated data (it encrypts and
* authenticates the payload and also authenticates the associated
* data);
* 2) Non-empty payload and empty associated data (it encrypts and
* authenticates the payload);
* 3) Non-empty associated data and empty payload (it degenerates to
* an authentication mode on the associated data).
*
* TinyCrypt CCM implementation accepts associated data of any length
* between 0 and (2^16 - 2^8) bytes.
*
* Security: The mac length parameter is an important parameter to estimate the
* security against collision attacks (that aim at finding different
* messages that produce the same authentication tag). TinyCrypt CCM
* implementation accepts any even integer between 4 and 16, as
* suggested in SP 800-38C.
*
* RFC-3610, which also specifies CCM, presents a few relevant
* security suggestions, such as: it is recommended for most
* applications to use a mac length greater than 8. Besides, the
* usage of the same nonce for two different messages which are
* encrypted with the same key destroys the security of CCM mode.
*
* Requires: AES-128
*
* Usage: 1) call tc_ccm_config to configure.
*
* 2) call tc_ccm_mode_encrypt to encrypt data and generate tag.
*
* 3) call tc_ccm_mode_decrypt to decrypt data and verify tag.
*/
#ifndef __TC_CCM_MODE_H__
#define __TC_CCM_MODE_H__
#include <tinycrypt/aes.h>
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
/* max additional authenticated size in bytes: 2^16 - 2^8 = 65280 */
#define TC_CCM_AAD_MAX_BYTES 0xff00
/* max message size in bytes: 2^(8L) = 2^16 = 65536 */
#define TC_CCM_PAYLOAD_MAX_BYTES 0x10000
/* struct tc_ccm_mode_struct represents the state of a CCM computation */
typedef struct tc_ccm_mode_struct {
TCAesKeySched_t sched; /* AES key schedule */
uint8_t *nonce; /* nonce required by CCM */
unsigned int mlen; /* mac length in bytes (parameter t in SP-800 38C) */
} *TCCcmMode_t;
/**
* @brief CCM configuration procedure
* @return returns TC_CRYPTO_SUCCESS (1)
* returns TC_CRYPTO_FAIL (0) if:
* c == NULL or
* sched == NULL or
* nonce == NULL or
* mlen != {4, 6, 8, 10, 12, 16}
* @param c -- CCM state
* @param sched IN -- AES key schedule
* @param nonce IN - nonce
* @param nlen -- nonce length in bytes
* @param mlen -- mac length in bytes (parameter t in SP-800 38C)
*/
int tc_ccm_config(TCCcmMode_t c, TCAesKeySched_t sched, uint8_t *nonce,
unsigned int nlen, unsigned int mlen);
/**
* @brief CCM tag generation and encryption procedure
* @return returns TC_CRYPTO_SUCCESS (1)
* returns TC_CRYPTO_FAIL (0) if:
* out == NULL or
* c == NULL or
* ((plen > 0) and (payload == NULL)) or
* ((alen > 0) and (associated_data == NULL)) or
* (alen >= TC_CCM_AAD_MAX_BYTES) or
* (plen >= TC_CCM_PAYLOAD_MAX_BYTES) or
* (olen < plen + maclength)
*
* @param out OUT -- encrypted data
* @param olen IN -- output length in bytes
* @param associated_data IN -- associated data
* @param alen IN -- associated data length in bytes
* @param payload IN -- payload
* @param plen IN -- payload length in bytes
* @param c IN -- CCM state
*
* @note: out buffer should be at least (plen + c->mlen) bytes long.
*
* @note: The sequence b for encryption is formatted as follows:
* b = [FLAGS | nonce | counter ], where:
* FLAGS is 1 byte long
* nonce is 13 bytes long
* counter is 2 bytes long
* The byte FLAGS is composed by the following 8 bits:
* 0-2 bits: used to represent the value of q-1
* 3-7 btis: always 0's
*
* @note: The sequence b for authentication is formatted as follows:
* b = [FLAGS | nonce | length(mac length)], where:
* FLAGS is 1 byte long
* nonce is 13 bytes long
* length(mac length) is 2 bytes long
* The byte FLAGS is composed by the following 8 bits:
* 0-2 bits: used to represent the value of q-1
* 3-5 bits: mac length (encoded as: (mlen-2)/2)
* 6: Adata (0 if alen == 0, and 1 otherwise)
* 7: always 0
*/
int tc_ccm_generation_encryption(uint8_t *out, unsigned int olen,
const uint8_t *associated_data,
unsigned int alen, const uint8_t *payload,
unsigned int plen, TCCcmMode_t c);
/**
* @brief CCM decryption and tag verification procedure
* @return returns TC_CRYPTO_SUCCESS (1)
* returns TC_CRYPTO_FAIL (0) if:
* out == NULL or
* c == NULL or
* ((plen > 0) and (payload == NULL)) or
* ((alen > 0) and (associated_data == NULL)) or
* (alen >= TC_CCM_AAD_MAX_BYTES) or
* (plen >= TC_CCM_PAYLOAD_MAX_BYTES) or
* (olen < plen - c->mlen)
*
* @param out OUT -- decrypted data
* @param associated_data IN -- associated data
* @param alen IN -- associated data length in bytes
* @param payload IN -- payload
* @param plen IN -- payload length in bytes
* @param c IN -- CCM state
*
* @note: out buffer should be at least (plen - c->mlen) bytes long.
*
* @note: The sequence b for encryption is formatted as follows:
* b = [FLAGS | nonce | counter ], where:
* FLAGS is 1 byte long
* nonce is 13 bytes long
* counter is 2 bytes long
* The byte FLAGS is composed by the following 8 bits:
* 0-2 bits: used to represent the value of q-1
* 3-7 btis: always 0's
*
* @note: The sequence b for authentication is formatted as follows:
* b = [FLAGS | nonce | length(mac length)], where:
* FLAGS is 1 byte long
* nonce is 13 bytes long
* length(mac length) is 2 bytes long
* The byte FLAGS is composed by the following 8 bits:
* 0-2 bits: used to represent the value of q-1
* 3-5 bits: mac length (encoded as: (mlen-2)/2)
* 6: Adata (0 if alen == 0, and 1 otherwise)
* 7: always 0
*/
int tc_ccm_decryption_verification(uint8_t *out, unsigned int olen,
const uint8_t *associated_data,
unsigned int alen, const uint8_t *payload, unsigned int plen,
TCCcmMode_t c);
#ifdef __cplusplus
}
#endif
#endif /* __TC_CCM_MODE_H__ */

View file

@ -0,0 +1,194 @@
/* cmac_mode.h -- interface to a CMAC implementation */
/*
* Copyright (C) 2017 by Intel Corporation, All Rights Reserved
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file
* @brief Interface to a CMAC implementation.
*
* Overview: CMAC is defined NIST in SP 800-38B, and is the standard algorithm
* for computing a MAC using a block cipher. It can compute the MAC
* for a byte string of any length. It is distinguished from CBC-MAC
* in the processing of the final message block; CMAC uses a
* different technique to compute the final message block is full
* size or only partial, while CBC-MAC uses the same technique for
* both. This difference permits CMAC to be applied to variable
* length messages, while all messages authenticated by CBC-MAC must
* be the same length.
*
* Security: AES128-CMAC mode of operation offers 64 bits of security against
* collision attacks. Note however that an external attacker cannot
* generate the tags him/herself without knowing the MAC key. In this
* sense, to attack the collision property of AES128-CMAC, an
* external attacker would need the cooperation of the legal user to
* produce an exponentially high number of tags (e.g. 2^64) to
* finally be able to look for collisions and benefit from them. As
* an extra precaution, the current implementation allows to at most
* 2^48 calls to the tc_cmac_update function before re-calling
* tc_cmac_setup (allowing a new key to be set), as suggested in
* Appendix B of SP 800-38B.
*
* Requires: AES-128
*
* Usage: This implementation provides a "scatter-gather" interface, so that
* the CMAC value can be computed incrementally over a message
* scattered in different segments throughout memory. Experience shows
* this style of interface tends to minimize the burden of programming
* correctly. Like all symmetric key operations, it is session
* oriented.
*
* To begin a CMAC session, use tc_cmac_setup to initialize a struct
* tc_cmac_struct with encryption key and buffer. Our implementation
* always assume that the AES key to be the same size as the block
* cipher block size. Once setup, this data structure can be used for
* many CMAC computations.
*
* Once the state has been setup with a key, computing the CMAC of
* some data requires three steps:
*
* (1) first use tc_cmac_init to initialize a new CMAC computation.
* (2) next mix all of the data into the CMAC computation state using
* tc_cmac_update. If all of the data resides in a single data
* segment then only one tc_cmac_update call is needed; if data
* is scattered throughout memory in n data segments, then n calls
* will be needed. CMAC IS ORDER SENSITIVE, to be able to detect
* attacks that swap bytes, so the order in which data is mixed
* into the state is critical!
* (3) Once all of the data for a message has been mixed, use
* tc_cmac_final to compute the CMAC tag value.
*
* Steps (1)-(3) can be repeated as many times as you want to CMAC
* multiple messages. A practical limit is 2^48 1K messages before you
* have to change the key.
*
* Once you are done computing CMAC with a key, it is a good idea to
* destroy the state so an attacker cannot recover the key; use
* tc_cmac_erase to accomplish this.
*/
#ifndef __TC_CMAC_MODE_H__
#define __TC_CMAC_MODE_H__
#include <tinycrypt/aes.h>
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
/* padding for last message block */
#define TC_CMAC_PADDING 0x80
/* struct tc_cmac_struct represents the state of a CMAC computation */
typedef struct tc_cmac_struct {
/* initialization vector */
uint8_t iv[TC_AES_BLOCK_SIZE];
/* used if message length is a multiple of block_size bytes */
uint8_t K1[TC_AES_BLOCK_SIZE];
/* used if message length isn't a multiple block_size bytes */
uint8_t K2[TC_AES_BLOCK_SIZE];
/* where to put bytes that didn't fill a block */
uint8_t leftover[TC_AES_BLOCK_SIZE];
/* identifies the encryption key */
unsigned int keyid;
/* next available leftover location */
unsigned int leftover_offset;
/* AES key schedule */
TCAesKeySched_t sched;
/* calls to tc_cmac_update left before re-key */
uint64_t countdown;
} *TCCmacState_t;
/**
* @brief Configures the CMAC state to use the given AES key
* @return returns TC_CRYPTO_SUCCESS (1) after having configured the CMAC state
* returns TC_CRYPTO_FAIL (0) if:
* s == NULL or
* key == NULL
*
* @param s IN/OUT -- the state to set up
* @param key IN -- the key to use
* @param sched IN -- AES key schedule
*/
int tc_cmac_setup(TCCmacState_t s, const uint8_t *key,
TCAesKeySched_t sched);
/**
* @brief Erases the CMAC state
* @return returns TC_CRYPTO_SUCCESS (1) after having configured the CMAC state
* returns TC_CRYPTO_FAIL (0) if:
* s == NULL
*
* @param s IN/OUT -- the state to erase
*/
int tc_cmac_erase(TCCmacState_t s);
/**
* @brief Initializes a new CMAC computation
* @return returns TC_CRYPTO_SUCCESS (1) after having initialized the CMAC state
* returns TC_CRYPTO_FAIL (0) if:
* s == NULL
*
* @param s IN/OUT -- the state to initialize
*/
int tc_cmac_init(TCCmacState_t s);
/**
* @brief Incrementally computes CMAC over the next data segment
* @return returns TC_CRYPTO_SUCCESS (1) after successfully updating the CMAC state
* returns TC_CRYPTO_FAIL (0) if:
* s == NULL or
* if data == NULL when dlen > 0
*
* @param s IN/OUT -- the CMAC state
* @param data IN -- the next data segment to MAC
* @param dlen IN -- the length of data in bytes
*/
int tc_cmac_update(TCCmacState_t s, const uint8_t *data, size_t dlen);
/**
* @brief Generates the tag from the CMAC state
* @return returns TC_CRYPTO_SUCCESS (1) after successfully generating the tag
* returns TC_CRYPTO_FAIL (0) if:
* tag == NULL or
* s == NULL
*
* @param tag OUT -- the CMAC tag
* @param s IN -- CMAC state
*/
int tc_cmac_final(uint8_t *tag, TCCmacState_t s);
#ifdef __cplusplus
}
#endif
#endif /* __TC_CMAC_MODE_H__ */

View file

@ -0,0 +1,61 @@
/* constants.h - TinyCrypt interface to constants */
/*
* Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file
* @brief -- Interface to constants.
*
*/
#ifndef __TC_CONSTANTS_H__
#define __TC_CONSTANTS_H__
#ifdef __cplusplus
extern "C" {
#endif
#include <stdbool.h>
#ifndef NULL
#define NULL ((void *)0)
#endif
#define TC_CRYPTO_SUCCESS 1
#define TC_CRYPTO_FAIL 0
#define TC_ZERO_BYTE 0x00
#ifdef __cplusplus
}
#endif
#endif /* __TC_CONSTANTS_H__ */

View file

@ -0,0 +1,108 @@
/* ctr_mode.h - TinyCrypt interface to CTR mode */
/*
* Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file
* @brief Interface to CTR mode.
*
* Overview: CTR (pronounced "counter") mode is a NIST approved mode of
* operation defined in SP 800-38a. It can be used with any
* block cipher to provide confidentiality of strings of any
* length. TinyCrypt hard codes AES128 as the block cipher.
*
* Security: CTR mode achieves confidentiality only if the counter value is
* never reused with a same encryption key. If the counter is
* repeated, than an adversary might be able to defeat the scheme.
*
* A usual method to ensure different counter values refers to
* initialize the counter in a given value (0, for example) and
* increases it every time a new block is enciphered. This naturally
* leaves to a limitation on the number q of blocks that can be
* enciphered using a same key: q < 2^(counter size).
*
* TinyCrypt uses a counter of 32 bits. This means that after 2^32
* block encryptions, the counter will be reused (thus losing CBC
* security). 2^32 block encryptions should be enough for most of
* applications targeting constrained devices. Applications intended
* to encrypt a larger number of blocks must replace the key after
* 2^32 block encryptions.
*
* CTR mode provides NO data integrity.
*
* Requires: AES-128
*
* Usage: 1) call tc_ctr_mode to process the data to encrypt/decrypt.
*
*/
#ifndef __TC_CTR_MODE_H__
#define __TC_CTR_MODE_H__
#include <tinycrypt/aes.h>
#include <tinycrypt/constants.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief CTR mode encryption/decryption procedure.
* CTR mode encrypts (or decrypts) inlen bytes from in buffer into out buffer
* @return returns TC_CRYPTO_SUCCESS (1)
* returns TC_CRYPTO_FAIL (0) if:
* out == NULL or
* in == NULL or
* ctr == NULL or
* sched == NULL or
* inlen == 0 or
* outlen == 0 or
* inlen != outlen
* @note Assumes:- The current value in ctr has NOT been used with sched
* - out points to inlen bytes
* - in points to inlen bytes
* - ctr is an integer counter in littleEndian format
* - sched was initialized by aes_set_encrypt_key
* @param out OUT -- produced ciphertext (plaintext)
* @param outlen IN -- length of ciphertext buffer in bytes
* @param in IN -- data to encrypt (or decrypt)
* @param inlen IN -- length of input data in bytes
* @param ctr IN/OUT -- the current counter value
* @param sched IN -- an initialized AES key schedule
*/
int tc_ctr_mode(uint8_t *out, unsigned int outlen, const uint8_t *in,
unsigned int inlen, uint8_t *ctr, const TCAesKeySched_t sched);
#ifdef __cplusplus
}
#endif
#endif /* __TC_CTR_MODE_H__ */

View file

@ -0,0 +1,166 @@
/* ctr_prng.h - TinyCrypt interface to a CTR-PRNG implementation */
/*
* Copyright (c) 2016, Chris Morrison
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file
* @brief Interface to a CTR-PRNG implementation.
*
* Overview: A pseudo-random number generator (PRNG) generates a sequence
* of numbers that have a distribution close to the one expected
* for a sequence of truly random numbers. The NIST Special
* Publication 800-90A specifies several mechanisms to generate
* sequences of pseudo random numbers, including the CTR-PRNG one
* which is based on AES. TinyCrypt implements CTR-PRNG with
* AES-128.
*
* Security: A cryptographically secure PRNG depends on the existence of an
* entropy source to provide a truly random seed as well as the
* security of the primitives used as the building blocks (AES-128
* in this instance).
*
* Requires: - AES-128
*
* Usage: 1) call tc_ctr_prng_init to seed the prng context
*
* 2) call tc_ctr_prng_reseed to mix in additional entropy into
* the prng context
*
* 3) call tc_ctr_prng_generate to output the pseudo-random data
*
* 4) call tc_ctr_prng_uninstantiate to zero out the prng context
*/
#ifndef __TC_CTR_PRNG_H__
#define __TC_CTR_PRNG_H__
#include <tinycrypt/aes.h>
#define TC_CTR_PRNG_RESEED_REQ -1
#ifdef __cplusplus
extern "C" {
#endif
typedef struct {
/* updated each time another BLOCKLEN_BYTES bytes are produced */
uint8_t V[TC_AES_BLOCK_SIZE];
/* updated whenever the PRNG is reseeded */
struct tc_aes_key_sched_struct key;
/* number of requests since initialization/reseeding */
uint64_t reseedCount;
} TCCtrPrng_t;
/**
* @brief CTR-PRNG initialization procedure
* Initializes prng context with entropy and personalization string (if any)
* @return returns TC_CRYPTO_SUCCESS (1)
* returns TC_CRYPTO_FAIL (0) if:
* ctx == NULL,
* entropy == NULL,
* entropyLen < (TC_AES_KEY_SIZE + TC_AES_BLOCK_SIZE)
* @note Only the first (TC_AES_KEY_SIZE + TC_AES_BLOCK_SIZE) bytes of
* both the entropy and personalization inputs are used -
* supplying additional bytes has no effect.
* @param ctx IN/OUT -- the PRNG context to initialize
* @param entropy IN -- entropy used to seed the PRNG
* @param entropyLen IN -- entropy length in bytes
* @param personalization IN -- personalization string used to seed the PRNG
* (may be null)
* @param plen IN -- personalization length in bytes
*
*/
int tc_ctr_prng_init(TCCtrPrng_t * const ctx,
uint8_t const * const entropy,
unsigned int entropyLen,
uint8_t const * const personalization,
unsigned int pLen);
/**
* @brief CTR-PRNG reseed procedure
* Mixes entropy and additional_input into the prng context
* @return returns TC_CRYPTO_SUCCESS (1)
* returns TC_CRYPTO_FAIL (0) if:
* ctx == NULL,
* entropy == NULL,
* entropylen < (TC_AES_KEY_SIZE + TC_AES_BLOCK_SIZE)
* @note It is better to reseed an existing prng context rather than
* re-initialise, so that any existing entropy in the context is
* presereved. This offers some protection against undetected failures
* of the entropy source.
* @note Assumes tc_ctr_prng_init has been called for ctx
* @param ctx IN/OUT -- the PRNG state
* @param entropy IN -- entropy to mix into the prng
* @param entropylen IN -- length of entropy in bytes
* @param additional_input IN -- additional input to the prng (may be null)
* @param additionallen IN -- additional input length in bytes
*/
int tc_ctr_prng_reseed(TCCtrPrng_t * const ctx,
uint8_t const * const entropy,
unsigned int entropyLen,
uint8_t const * const additional_input,
unsigned int additionallen);
/**
* @brief CTR-PRNG generate procedure
* Generates outlen pseudo-random bytes into out buffer, updates prng
* @return returns TC_CRYPTO_SUCCESS (1)
* returns TC_CTR_PRNG_RESEED_REQ (-1) if a reseed is needed
* returns TC_CRYPTO_FAIL (0) if:
* ctx == NULL,
* out == NULL,
* outlen >= 2^16
* @note Assumes tc_ctr_prng_init has been called for ctx
* @param ctx IN/OUT -- the PRNG context
* @param additional_input IN -- additional input to the prng (may be null)
* @param additionallen IN -- additional input length in bytes
* @param out IN/OUT -- buffer to receive output
* @param outlen IN -- size of out buffer in bytes
*/
int tc_ctr_prng_generate(TCCtrPrng_t * const ctx,
uint8_t const * const additional_input,
unsigned int additionallen,
uint8_t * const out,
unsigned int outlen);
/**
* @brief CTR-PRNG uninstantiate procedure
* Zeroes the internal state of the supplied prng context
* @return none
* @param ctx IN/OUT -- the PRNG context
*/
void tc_ctr_prng_uninstantiate(TCCtrPrng_t * const ctx);
#ifdef __cplusplus
}
#endif
#endif /* __TC_CTR_PRNG_H__ */

View file

@ -0,0 +1,545 @@
/* ecc.h - TinyCrypt interface to common ECC functions */
/* Copyright (c) 2014, Kenneth MacKay
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file
* @brief -- Interface to common ECC functions.
*
* Overview: This software is an implementation of common functions
* necessary to elliptic curve cryptography. This implementation uses
* curve NIST p-256.
*
* Security: The curve NIST p-256 provides approximately 128 bits of security.
*
*/
#ifndef __TC_UECC_H__
#define __TC_UECC_H__
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Word size (4 bytes considering 32-bits architectures) */
#define uECC_WORD_SIZE 4
/* setting max number of calls to prng: */
#ifndef uECC_RNG_MAX_TRIES
#define uECC_RNG_MAX_TRIES 64
#endif
/* defining data types to store word and bit counts: */
typedef int8_t wordcount_t;
typedef int16_t bitcount_t;
/* defining data type for comparison result: */
typedef int8_t cmpresult_t;
/* defining data type to store ECC coordinate/point in 32bits words: */
typedef unsigned int uECC_word_t;
/* defining data type to store an ECC coordinate/point in 64bits words: */
typedef uint64_t uECC_dword_t;
/* defining masks useful for ecc computations: */
#define HIGH_BIT_SET 0x80000000
#define uECC_WORD_BITS 32
#define uECC_WORD_BITS_SHIFT 5
#define uECC_WORD_BITS_MASK 0x01F
/* Number of words of 32 bits to represent an element of the the curve p-256: */
#define NUM_ECC_WORDS 8
/* Number of bytes to represent an element of the the curve p-256: */
#define NUM_ECC_BYTES (uECC_WORD_SIZE*NUM_ECC_WORDS)
/* structure that represents an elliptic curve (e.g. p256):*/
struct uECC_Curve_t;
typedef const struct uECC_Curve_t * uECC_Curve;
struct uECC_Curve_t {
wordcount_t num_words;
wordcount_t num_bytes;
bitcount_t num_n_bits;
uECC_word_t p[NUM_ECC_WORDS];
uECC_word_t n[NUM_ECC_WORDS];
uECC_word_t G[NUM_ECC_WORDS * 2];
uECC_word_t b[NUM_ECC_WORDS];
void (*double_jacobian)(uECC_word_t * X1, uECC_word_t * Y1, uECC_word_t * Z1,
uECC_Curve curve);
void (*x_side)(uECC_word_t *result, const uECC_word_t *x, uECC_Curve curve);
void (*mmod_fast)(uECC_word_t *result, uECC_word_t *product);
};
/*
* @brief computes doubling of point ion jacobian coordinates, in place.
* @param X1 IN/OUT -- x coordinate
* @param Y1 IN/OUT -- y coordinate
* @param Z1 IN/OUT -- z coordinate
* @param curve IN -- elliptic curve
*/
void double_jacobian_default(uECC_word_t * X1, uECC_word_t * Y1,
uECC_word_t * Z1, uECC_Curve curve);
/*
* @brief Computes x^3 + ax + b. result must not overlap x.
* @param result OUT -- x^3 + ax + b
* @param x IN -- value of x
* @param curve IN -- elliptic curve
*/
void x_side_default(uECC_word_t *result, const uECC_word_t *x,
uECC_Curve curve);
/*
* @brief Computes result = product % curve_p
* from http://www.nsa.gov/ia/_files/nist-routines.pdf
* @param result OUT -- product % curve_p
* @param product IN -- value to be reduced mod curve_p
*/
void vli_mmod_fast_secp256r1(unsigned int *result, unsigned int *product);
/* Bytes to words ordering: */
#define BYTES_TO_WORDS_8(a, b, c, d, e, f, g, h) 0x##d##c##b##a, 0x##h##g##f##e
#define BYTES_TO_WORDS_4(a, b, c, d) 0x##d##c##b##a
#define BITS_TO_WORDS(num_bits) \
((num_bits + ((uECC_WORD_SIZE * 8) - 1)) / (uECC_WORD_SIZE * 8))
#define BITS_TO_BYTES(num_bits) ((num_bits + 7) / 8)
/* definition of curve NIST p-256: */
static const struct uECC_Curve_t curve_secp256r1 = {
NUM_ECC_WORDS,
NUM_ECC_BYTES,
256, /* num_n_bits */ {
BYTES_TO_WORDS_8(FF, FF, FF, FF, FF, FF, FF, FF),
BYTES_TO_WORDS_8(FF, FF, FF, FF, 00, 00, 00, 00),
BYTES_TO_WORDS_8(00, 00, 00, 00, 00, 00, 00, 00),
BYTES_TO_WORDS_8(01, 00, 00, 00, FF, FF, FF, FF)
}, {
BYTES_TO_WORDS_8(51, 25, 63, FC, C2, CA, B9, F3),
BYTES_TO_WORDS_8(84, 9E, 17, A7, AD, FA, E6, BC),
BYTES_TO_WORDS_8(FF, FF, FF, FF, FF, FF, FF, FF),
BYTES_TO_WORDS_8(00, 00, 00, 00, FF, FF, FF, FF)
}, {
BYTES_TO_WORDS_8(96, C2, 98, D8, 45, 39, A1, F4),
BYTES_TO_WORDS_8(A0, 33, EB, 2D, 81, 7D, 03, 77),
BYTES_TO_WORDS_8(F2, 40, A4, 63, E5, E6, BC, F8),
BYTES_TO_WORDS_8(47, 42, 2C, E1, F2, D1, 17, 6B),
BYTES_TO_WORDS_8(F5, 51, BF, 37, 68, 40, B6, CB),
BYTES_TO_WORDS_8(CE, 5E, 31, 6B, 57, 33, CE, 2B),
BYTES_TO_WORDS_8(16, 9E, 0F, 7C, 4A, EB, E7, 8E),
BYTES_TO_WORDS_8(9B, 7F, 1A, FE, E2, 42, E3, 4F)
}, {
BYTES_TO_WORDS_8(4B, 60, D2, 27, 3E, 3C, CE, 3B),
BYTES_TO_WORDS_8(F6, B0, 53, CC, B0, 06, 1D, 65),
BYTES_TO_WORDS_8(BC, 86, 98, 76, 55, BD, EB, B3),
BYTES_TO_WORDS_8(E7, 93, 3A, AA, D8, 35, C6, 5A)
},
&double_jacobian_default,
&x_side_default,
&vli_mmod_fast_secp256r1
};
uECC_Curve uECC_secp256r1(void);
/*
* @brief Generates a random integer in the range 0 < random < top.
* Both random and top have num_words words.
* @param random OUT -- random integer in the range 0 < random < top
* @param top IN -- upper limit
* @param num_words IN -- number of words
* @return a random integer in the range 0 < random < top
*/
int uECC_generate_random_int(uECC_word_t *random, const uECC_word_t *top,
wordcount_t num_words);
/* uECC_RNG_Function type
* The RNG function should fill 'size' random bytes into 'dest'. It should
* return 1 if 'dest' was filled with random data, or 0 if the random data could
* not be generated. The filled-in values should be either truly random, or from
* a cryptographically-secure PRNG.
*
* A correctly functioning RNG function must be set (using uECC_set_rng())
* before calling uECC_make_key() or uECC_sign().
*
* Setting a correctly functioning RNG function improves the resistance to
* side-channel attacks for uECC_shared_secret().
*
* A correct RNG function is set by default. If you are building on another
* POSIX-compliant system that supports /dev/random or /dev/urandom, you can
* define uECC_POSIX to use the predefined RNG.
*/
typedef int(*uECC_RNG_Function)(uint8_t *dest, unsigned int size);
/*
* @brief Set the function that will be used to generate random bytes. The RNG
* function should return 1 if the random data was generated, or 0 if the random
* data could not be generated.
*
* @note On platforms where there is no predefined RNG function, this must be
* called before uECC_make_key() or uECC_sign() are used.
*
* @param rng_function IN -- function that will be used to generate random bytes
*/
void uECC_set_rng(uECC_RNG_Function rng_function);
/*
* @brief provides current uECC_RNG_Function.
* @return Returns the function that will be used to generate random bytes.
*/
uECC_RNG_Function uECC_get_rng(void);
/*
* @brief computes the size of a private key for the curve in bytes.
* @param curve IN -- elliptic curve
* @return size of a private key for the curve in bytes.
*/
int uECC_curve_private_key_size(uECC_Curve curve);
/*
* @brief computes the size of a public key for the curve in bytes.
* @param curve IN -- elliptic curve
* @return the size of a public key for the curve in bytes.
*/
int uECC_curve_public_key_size(uECC_Curve curve);
/*
* @brief Compute the corresponding public key for a private key.
* @param private_key IN -- The private key to compute the public key for
* @param public_key OUT -- Will be filled in with the corresponding public key
* @param curve
* @return Returns 1 if key was computed successfully, 0 if an error occurred.
*/
int uECC_compute_public_key(const uint8_t *private_key,
uint8_t *public_key, uECC_Curve curve);
/*
* @brief Compute public-key.
* @return corresponding public-key.
* @param result OUT -- public-key
* @param private_key IN -- private-key
* @param curve IN -- elliptic curve
*/
uECC_word_t EccPoint_compute_public_key(uECC_word_t *result,
uECC_word_t *private_key, uECC_Curve curve);
/*
* @brief Regularize the bitcount for the private key so that attackers cannot
* use a side channel attack to learn the number of leading zeros.
* @return Regularized k
* @param k IN -- private-key
* @param k0 IN/OUT -- regularized k
* @param k1 IN/OUT -- regularized k
* @param curve IN -- elliptic curve
*/
uECC_word_t regularize_k(const uECC_word_t * const k, uECC_word_t *k0,
uECC_word_t *k1, uECC_Curve curve);
/*
* @brief Point multiplication algorithm using Montgomery's ladder with co-Z
* coordinates. See http://eprint.iacr.org/2011/338.pdf.
* @note Result may overlap point.
* @param result OUT -- returns scalar*point
* @param point IN -- elliptic curve point
* @param scalar IN -- scalar
* @param initial_Z IN -- initial value for z
* @param num_bits IN -- number of bits in scalar
* @param curve IN -- elliptic curve
*/
void EccPoint_mult(uECC_word_t * result, const uECC_word_t * point,
const uECC_word_t * scalar, const uECC_word_t * initial_Z,
bitcount_t num_bits, uECC_Curve curve);
/*
* @brief Constant-time comparison to zero - secure way to compare long integers
* @param vli IN -- very long integer
* @param num_words IN -- number of words in the vli
* @return 1 if vli == 0, 0 otherwise.
*/
uECC_word_t uECC_vli_isZero(const uECC_word_t *vli, wordcount_t num_words);
/*
* @brief Check if 'point' is the point at infinity
* @param point IN -- elliptic curve point
* @param curve IN -- elliptic curve
* @return if 'point' is the point at infinity, 0 otherwise.
*/
uECC_word_t EccPoint_isZero(const uECC_word_t *point, uECC_Curve curve);
/*
* @brief computes the sign of left - right, in constant time.
* @param left IN -- left term to be compared
* @param right IN -- right term to be compared
* @param num_words IN -- number of words
* @return the sign of left - right
*/
cmpresult_t uECC_vli_cmp(const uECC_word_t *left, const uECC_word_t *right,
wordcount_t num_words);
/*
* @brief computes sign of left - right, not in constant time.
* @note should not be used if inputs are part of a secret
* @param left IN -- left term to be compared
* @param right IN -- right term to be compared
* @param num_words IN -- number of words
* @return the sign of left - right
*/
cmpresult_t uECC_vli_cmp_unsafe(const uECC_word_t *left, const uECC_word_t *right,
wordcount_t num_words);
/*
* @brief Computes result = (left - right) % mod.
* @note Assumes that (left < mod) and (right < mod), and that result does not
* overlap mod.
* @param result OUT -- (left - right) % mod
* @param left IN -- leftright term in modular subtraction
* @param right IN -- right term in modular subtraction
* @param mod IN -- mod
* @param num_words IN -- number of words
*/
void uECC_vli_modSub(uECC_word_t *result, const uECC_word_t *left,
const uECC_word_t *right, const uECC_word_t *mod,
wordcount_t num_words);
/*
* @brief Computes P' = (x1', y1', Z3), P + Q = (x3, y3, Z3) or
* P => P', Q => P + Q
* @note assumes Input P = (x1, y1, Z), Q = (x2, y2, Z)
* @param X1 IN -- x coordinate of P
* @param Y1 IN -- y coordinate of P
* @param X2 IN -- x coordinate of Q
* @param Y2 IN -- y coordinate of Q
* @param curve IN -- elliptic curve
*/
void XYcZ_add(uECC_word_t * X1, uECC_word_t * Y1, uECC_word_t * X2,
uECC_word_t * Y2, uECC_Curve curve);
/*
* @brief Computes (x1 * z^2, y1 * z^3)
* @param X1 IN -- previous x1 coordinate
* @param Y1 IN -- previous y1 coordinate
* @param Z IN -- z value
* @param curve IN -- elliptic curve
*/
void apply_z(uECC_word_t * X1, uECC_word_t * Y1, const uECC_word_t * const Z,
uECC_Curve curve);
/*
* @brief Check if bit is set.
* @return Returns nonzero if bit 'bit' of vli is set.
* @warning It is assumed that the value provided in 'bit' is within the
* boundaries of the word-array 'vli'.
* @note The bit ordering layout assumed for vli is: {31, 30, ..., 0},
* {63, 62, ..., 32}, {95, 94, ..., 64}, {127, 126,..., 96} for a vli consisting
* of 4 uECC_word_t elements.
*/
uECC_word_t uECC_vli_testBit(const uECC_word_t *vli, bitcount_t bit);
/*
* @brief Computes result = product % mod, where product is 2N words long.
* @param result OUT -- product % mod
* @param mod IN -- module
* @param num_words IN -- number of words
* @warning Currently only designed to work for curve_p or curve_n.
*/
void uECC_vli_mmod(uECC_word_t *result, uECC_word_t *product,
const uECC_word_t *mod, wordcount_t num_words);
/*
* @brief Computes modular product (using curve->mmod_fast)
* @param result OUT -- (left * right) mod % curve_p
* @param left IN -- left term in product
* @param right IN -- right term in product
* @param curve IN -- elliptic curve
*/
void uECC_vli_modMult_fast(uECC_word_t *result, const uECC_word_t *left,
const uECC_word_t *right, uECC_Curve curve);
/*
* @brief Computes result = left - right.
* @note Can modify in place.
* @param result OUT -- left - right
* @param left IN -- left term in subtraction
* @param right IN -- right term in subtraction
* @param num_words IN -- number of words
* @return borrow
*/
uECC_word_t uECC_vli_sub(uECC_word_t *result, const uECC_word_t *left,
const uECC_word_t *right, wordcount_t num_words);
/*
* @brief Constant-time comparison function(secure way to compare long ints)
* @param left IN -- left term in comparison
* @param right IN -- right term in comparison
* @param num_words IN -- number of words
* @return Returns 0 if left == right, 1 otherwise.
*/
uECC_word_t uECC_vli_equal(const uECC_word_t *left, const uECC_word_t *right,
wordcount_t num_words);
/*
* @brief Computes (left * right) % mod
* @param result OUT -- (left * right) % mod
* @param left IN -- left term in product
* @param right IN -- right term in product
* @param mod IN -- mod
* @param num_words IN -- number of words
*/
void uECC_vli_modMult(uECC_word_t *result, const uECC_word_t *left,
const uECC_word_t *right, const uECC_word_t *mod,
wordcount_t num_words);
/*
* @brief Computes (1 / input) % mod
* @note All VLIs are the same size.
* @note See "Euclid's GCD to Montgomery Multiplication to the Great Divide"
* @param result OUT -- (1 / input) % mod
* @param input IN -- value to be modular inverted
* @param mod IN -- mod
* @param num_words -- number of words
*/
void uECC_vli_modInv(uECC_word_t *result, const uECC_word_t *input,
const uECC_word_t *mod, wordcount_t num_words);
/*
* @brief Sets dest = src.
* @param dest OUT -- destination buffer
* @param src IN -- origin buffer
* @param num_words IN -- number of words
*/
void uECC_vli_set(uECC_word_t *dest, const uECC_word_t *src,
wordcount_t num_words);
/*
* @brief Computes (left + right) % mod.
* @note Assumes that (left < mod) and right < mod), and that result does not
* overlap mod.
* @param result OUT -- (left + right) % mod.
* @param left IN -- left term in addition
* @param right IN -- right term in addition
* @param mod IN -- mod
* @param num_words IN -- number of words
*/
void uECC_vli_modAdd(uECC_word_t *result, const uECC_word_t *left,
const uECC_word_t *right, const uECC_word_t *mod,
wordcount_t num_words);
/*
* @brief Counts the number of bits required to represent vli.
* @param vli IN -- very long integer
* @param max_words IN -- number of words
* @return number of bits in given vli
*/
bitcount_t uECC_vli_numBits(const uECC_word_t *vli,
const wordcount_t max_words);
/*
* @brief Erases (set to 0) vli
* @param vli IN -- very long integer
* @param num_words IN -- number of words
*/
void uECC_vli_clear(uECC_word_t *vli, wordcount_t num_words);
/*
* @brief check if it is a valid point in the curve
* @param point IN -- point to be checked
* @param curve IN -- elliptic curve
* @return 0 if point is valid
* @exception returns -1 if it is a point at infinity
* @exception returns -2 if x or y is smaller than p,
* @exception returns -3 if y^2 != x^3 + ax + b.
*/
int uECC_valid_point(const uECC_word_t *point, uECC_Curve curve);
/*
* @brief Check if a public key is valid.
* @param public_key IN -- The public key to be checked.
* @return returns 0 if the public key is valid
* @exception returns -1 if it is a point at infinity
* @exception returns -2 if x or y is smaller than p,
* @exception returns -3 if y^2 != x^3 + ax + b.
* @exception returns -4 if public key is the group generator.
*
* @note Note that you are not required to check for a valid public key before
* using any other uECC functions. However, you may wish to avoid spending CPU
* time computing a shared secret or verifying a signature using an invalid
* public key.
*/
int uECC_valid_public_key(const uint8_t *public_key, uECC_Curve curve);
/*
* @brief Converts an integer in uECC native format to big-endian bytes.
* @param bytes OUT -- bytes representation
* @param num_bytes IN -- number of bytes
* @param native IN -- uECC native representation
*/
void uECC_vli_nativeToBytes(uint8_t *bytes, int num_bytes,
const unsigned int *native);
/*
* @brief Converts big-endian bytes to an integer in uECC native format.
* @param native OUT -- uECC native representation
* @param bytes IN -- bytes representation
* @param num_bytes IN -- number of bytes
*/
void uECC_vli_bytesToNative(unsigned int *native, const uint8_t *bytes,
int num_bytes);
#ifdef __cplusplus
}
#endif
#endif /* __TC_UECC_H__ */

View file

@ -0,0 +1,131 @@
/* ecc_dh.h - TinyCrypt interface to EC-DH implementation */
/*
* Copyright (c) 2014, Kenneth MacKay
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/* Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file
* @brief -- Interface to EC-DH implementation.
*
* Overview: This software is an implementation of EC-DH. This implementation
* uses curve NIST p-256.
*
* Security: The curve NIST p-256 provides approximately 128 bits of security.
*/
#ifndef __TC_ECC_DH_H__
#define __TC_ECC_DH_H__
#include <tinycrypt/ecc.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Create a public/private key pair.
* @return returns TC_CRYPTO_SUCCESS (1) if the key pair was generated successfully
* returns TC_CRYPTO_FAIL (0) if error while generating key pair
*
* @param p_public_key OUT -- Will be filled in with the public key. Must be at
* least 2 * the curve size (in bytes) long. For curve secp256r1, p_public_key
* must be 64 bytes long.
* @param p_private_key OUT -- Will be filled in with the private key. Must be as
* long as the curve order (for secp256r1, p_private_key must be 32 bytes long).
*
* @note side-channel countermeasure: algorithm strengthened against timing
* attack.
* @warning A cryptographically-secure PRNG function must be set (using
* uECC_set_rng()) before calling uECC_make_key().
*/
int uECC_make_key(uint8_t *p_public_key, uint8_t *p_private_key, uECC_Curve curve);
#ifdef ENABLE_TESTS
/**
* @brief Create a public/private key pair given a specific d.
*
* @note THIS FUNCTION SHOULD BE CALLED ONLY FOR TEST PURPOSES. Refer to
* uECC_make_key() function for real applications.
*/
int uECC_make_key_with_d(uint8_t *p_public_key, uint8_t *p_private_key,
unsigned int *d, uECC_Curve curve);
#endif
/**
* @brief Compute a shared secret given your secret key and someone else's
* public key.
* @return returns TC_CRYPTO_SUCCESS (1) if the shared secret was computed successfully
* returns TC_CRYPTO_FAIL (0) otherwise
*
* @param p_secret OUT -- Will be filled in with the shared secret value. Must be
* the same size as the curve size (for curve secp256r1, secret must be 32 bytes
* long.
* @param p_public_key IN -- The public key of the remote party.
* @param p_private_key IN -- Your private key.
*
* @warning It is recommended to use the output of uECC_shared_secret() as the
* input of a recommended Key Derivation Function (see NIST SP 800-108) in
* order to produce a cryptographically secure symmetric key.
*/
int uECC_shared_secret(const uint8_t *p_public_key, const uint8_t *p_private_key,
uint8_t *p_secret, uECC_Curve curve);
#ifdef __cplusplus
}
#endif
#endif /* __TC_ECC_DH_H__ */

View file

@ -0,0 +1,139 @@
/* ecc_dh.h - TinyCrypt interface to EC-DSA implementation */
/*
* Copyright (c) 2014, Kenneth MacKay
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file
* @brief -- Interface to EC-DSA implementation.
*
* Overview: This software is an implementation of EC-DSA. This implementation
* uses curve NIST p-256.
*
* Security: The curve NIST p-256 provides approximately 128 bits of security.
*
* Usage: - To sign: Compute a hash of the data you wish to sign (SHA-2 is
* recommended) and pass it in to ecdsa_sign function along with your
* private key and a random number. You must use a new non-predictable
* random number to generate each new signature.
* - To verify a signature: Compute the hash of the signed data using
* the same hash as the signer and pass it to this function along with
* the signer's public key and the signature values (r and s).
*/
#ifndef __TC_ECC_DSA_H__
#define __TC_ECC_DSA_H__
#include <tinycrypt/ecc.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Generate an ECDSA signature for a given hash value.
* @return returns TC_CRYPTO_SUCCESS (1) if the signature generated successfully
* returns TC_CRYPTO_FAIL (0) if an error occurred.
*
* @param p_private_key IN -- Your private key.
* @param p_message_hash IN -- The hash of the message to sign.
* @param p_hash_size IN -- The size of p_message_hash in bytes.
* @param p_signature OUT -- Will be filled in with the signature value. Must be
* at least 2 * curve size long (for secp256r1, signature must be 64 bytes long).
*
* @warning A cryptographically-secure PRNG function must be set (using
* uECC_set_rng()) before calling uECC_sign().
* @note Usage: Compute a hash of the data you wish to sign (SHA-2 is
* recommended) and pass it in to this function along with your private key.
* @note side-channel countermeasure: algorithm strengthened against timing
* attack.
*/
int uECC_sign(const uint8_t *p_private_key, const uint8_t *p_message_hash,
unsigned p_hash_size, uint8_t *p_signature, uECC_Curve curve);
#ifdef ENABLE_TESTS
/*
* THIS FUNCTION SHOULD BE CALLED FOR TEST PURPOSES ONLY.
* Refer to uECC_sign() function for real applications.
*/
int uECC_sign_with_k(const uint8_t *private_key, const uint8_t *message_hash,
unsigned int hash_size, uECC_word_t *k, uint8_t *signature,
uECC_Curve curve);
#endif
/**
* @brief Verify an ECDSA signature.
* @return returns TC_SUCCESS (1) if the signature is valid
* returns TC_FAIL (0) if the signature is invalid.
*
* @param p_public_key IN -- The signer's public key.
* @param p_message_hash IN -- The hash of the signed data.
* @param p_hash_size IN -- The size of p_message_hash in bytes.
* @param p_signature IN -- The signature values.
*
* @note Usage: Compute the hash of the signed data using the same hash as the
* signer and pass it to this function along with the signer's public key and
* the signature values (hash_size and signature).
*/
int uECC_verify(const uint8_t *p_public_key, const uint8_t *p_message_hash,
unsigned int p_hash_size, const uint8_t *p_signature, uECC_Curve curve);
#ifdef __cplusplus
}
#endif
#endif /* __TC_ECC_DSA_H__ */

View file

@ -0,0 +1,81 @@
/* uECC_platform_specific.h - Interface to platform specific functions*/
/* Copyright (c) 2014, Kenneth MacKay
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.*/
/*
* Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* uECC_platform_specific.h -- Interface to platform specific functions
*/
#ifndef __UECC_PLATFORM_SPECIFIC_H_
#define __UECC_PLATFORM_SPECIFIC_H_
/*
* The RNG function should fill 'size' random bytes into 'dest'. It should
* return 1 if 'dest' was filled with random data, or 0 if the random data could
* not be generated. The filled-in values should be either truly random, or from
* a cryptographically-secure PRNG.
*
* A cryptographically-secure PRNG function must be set (using uECC_set_rng())
* before calling uECC_make_key() or uECC_sign().
*
* Setting a cryptographically-secure PRNG function improves the resistance to
* side-channel attacks for uECC_shared_secret().
*
* A correct PRNG function is set by default (default_RNG_defined = 1) and works
* for some platforms, such as Unix and Linux. For other platforms, you may need
* to provide another PRNG function.
*/
#define default_RNG_defined 1
int default_CSPRNG(uint8_t *dest, unsigned int size);
#endif /* __UECC_PLATFORM_SPECIFIC_H_ */

View file

@ -0,0 +1,139 @@
/* hmac.h - TinyCrypt interface to an HMAC implementation */
/*
* Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file
* @brief Interface to an HMAC implementation.
*
* Overview: HMAC is a message authentication code based on hash functions.
* TinyCrypt hard codes SHA-256 as the hash function. A message
* authentication code based on hash functions is also called a
* keyed cryptographic hash function since it performs a
* transformation specified by a key in an arbitrary length data
* set into a fixed length data set (also called tag).
*
* Security: The security of the HMAC depends on the length of the key and
* on the security of the hash function. Note that HMAC primitives
* are much less affected by collision attacks than their
* corresponding hash functions.
*
* Requires: SHA-256
*
* Usage: 1) call tc_hmac_set_key to set the HMAC key.
*
* 2) call tc_hmac_init to initialize a struct hash_state before
* processing the data.
*
* 3) call tc_hmac_update to process the next input segment;
* tc_hmac_update can be called as many times as needed to process
* all of the segments of the input; the order is important.
*
* 4) call tc_hmac_final to out put the tag.
*/
#ifndef __TC_HMAC_H__
#define __TC_HMAC_H__
#include <tinycrypt/sha256.h>
#ifdef __cplusplus
extern "C" {
#endif
struct tc_hmac_state_struct {
/* the internal state required by h */
struct tc_sha256_state_struct hash_state;
/* HMAC key schedule */
uint8_t key[2*TC_SHA256_BLOCK_SIZE];
};
typedef struct tc_hmac_state_struct *TCHmacState_t;
/**
* @brief HMAC set key procedure
* Configures ctx to use key
* @return returns TC_CRYPTO_SUCCESS (1)
* returns TC_CRYPTO_FAIL (0) if
* ctx == NULL or
* key == NULL or
* key_size == 0
* @param ctx IN/OUT -- the struct tc_hmac_state_struct to initial
* @param key IN -- the HMAC key to configure
* @param key_size IN -- the HMAC key size
*/
int tc_hmac_set_key(TCHmacState_t ctx, const uint8_t *key,
unsigned int key_size);
/**
* @brief HMAC init procedure
* Initializes ctx to begin the next HMAC operation
* @return returns TC_CRYPTO_SUCCESS (1)
* returns TC_CRYPTO_FAIL (0) if: ctx == NULL or key == NULL
* @param ctx IN/OUT -- struct tc_hmac_state_struct buffer to init
*/
int tc_hmac_init(TCHmacState_t ctx);
/**
* @brief HMAC update procedure
* Mixes data_length bytes addressed by data into state
* @return returns TC_CRYPTO_SUCCCESS (1)
* returns TC_CRYPTO_FAIL (0) if: ctx == NULL or key == NULL
* @note Assumes state has been initialized by tc_hmac_init
* @param ctx IN/OUT -- state of HMAC computation so far
* @param data IN -- data to incorporate into state
* @param data_length IN -- size of data in bytes
*/
int tc_hmac_update(TCHmacState_t ctx, const void *data,
unsigned int data_length);
/**
* @brief HMAC final procedure
* Writes the HMAC tag into the tag buffer
* @return returns TC_CRYPTO_SUCCESS (1)
* returns TC_CRYPTO_FAIL (0) if:
* tag == NULL or
* ctx == NULL or
* key == NULL or
* taglen != TC_SHA256_DIGEST_SIZE
* @note ctx is erased before exiting. This should never be changed/removed.
* @note Assumes the tag bufer is at least sizeof(hmac_tag_size(state)) bytes
* state has been initialized by tc_hmac_init
* @param tag IN/OUT -- buffer to receive computed HMAC tag
* @param taglen IN -- size of tag in bytes
* @param ctx IN/OUT -- the HMAC state for computing tag
*/
int tc_hmac_final(uint8_t *tag, unsigned int taglen, TCHmacState_t ctx);
#ifdef __cplusplus
}
#endif
#endif /*__TC_HMAC_H__*/

View file

@ -0,0 +1,164 @@
/* hmac_prng.h - TinyCrypt interface to an HMAC-PRNG implementation */
/*
* Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file
* @brief Interface to an HMAC-PRNG implementation.
*
* Overview: A pseudo-random number generator (PRNG) generates a sequence
* of numbers that have a distribution close to the one expected
* for a sequence of truly random numbers. The NIST Special
* Publication 800-90A specifies several mechanisms to generate
* sequences of pseudo random numbers, including the HMAC-PRNG one
* which is based on HMAC. TinyCrypt implements HMAC-PRNG with
* certain modifications from the NIST SP 800-90A spec.
*
* Security: A cryptographically secure PRNG depends on the existence of an
* entropy source to provide a truly random seed as well as the
* security of the primitives used as the building blocks (HMAC and
* SHA256, for TinyCrypt).
*
* The NIST SP 800-90A standard tolerates a null personalization,
* while TinyCrypt requires a non-null personalization. This is
* because a personalization string (the host name concatenated
* with a time stamp, for example) is easily computed and might be
* the last line of defense against failure of the entropy source.
*
* Requires: - SHA-256
* - HMAC
*
* Usage: 1) call tc_hmac_prng_init to set the HMAC key and process the
* personalization data.
*
* 2) call tc_hmac_prng_reseed to process the seed and additional
* input.
*
* 3) call tc_hmac_prng_generate to out put the pseudo-random data.
*/
#ifndef __TC_HMAC_PRNG_H__
#define __TC_HMAC_PRNG_H__
#include <tinycrypt/sha256.h>
#include <tinycrypt/hmac.h>
#ifdef __cplusplus
extern "C" {
#endif
#define TC_HMAC_PRNG_RESEED_REQ -1
struct tc_hmac_prng_struct {
/* the HMAC instance for this PRNG */
struct tc_hmac_state_struct h;
/* the PRNG key */
uint8_t key[TC_SHA256_DIGEST_SIZE];
/* PRNG state */
uint8_t v[TC_SHA256_DIGEST_SIZE];
/* calls to tc_hmac_prng_generate left before re-seed */
unsigned int countdown;
};
typedef struct tc_hmac_prng_struct *TCHmacPrng_t;
/**
* @brief HMAC-PRNG initialization procedure
* Initializes prng with personalization, disables tc_hmac_prng_generate
* @return returns TC_CRYPTO_SUCCESS (1)
* returns TC_CRYPTO_FAIL (0) if:
* prng == NULL,
* personalization == NULL,
* plen > MAX_PLEN
* @note Assumes: - personalization != NULL.
* The personalization is a platform unique string (e.g., the host
* name) and is the last line of defense against failure of the
* entropy source
* @warning NIST SP 800-90A specifies 3 items as seed material during
* initialization: entropy seed, personalization, and an optional
* nonce. TinyCrypts requires instead a non-null personalization
* (which is easily computed) and indirectly requires an entropy
* seed (since the reseed function is mandatorily called after
* init)
* @param prng IN/OUT -- the PRNG state to initialize
* @param personalization IN -- personalization string
* @param plen IN -- personalization length in bytes
*/
int tc_hmac_prng_init(TCHmacPrng_t prng,
const uint8_t *personalization,
unsigned int plen);
/**
* @brief HMAC-PRNG reseed procedure
* Mixes seed into prng, enables tc_hmac_prng_generate
* @return returns TC_CRYPTO_SUCCESS (1)
* returns TC_CRYPTO_FAIL (0) if:
* prng == NULL,
* seed == NULL,
* seedlen < MIN_SLEN,
* seendlen > MAX_SLEN,
* additional_input != (const uint8_t *) 0 && additionallen == 0,
* additional_input != (const uint8_t *) 0 && additionallen > MAX_ALEN
* @note Assumes:- tc_hmac_prng_init has been called for prng
* - seed has sufficient entropy.
*
* @param prng IN/OUT -- the PRNG state
* @param seed IN -- entropy to mix into the prng
* @param seedlen IN -- length of seed in bytes
* @param additional_input IN -- additional input to the prng
* @param additionallen IN -- additional input length in bytes
*/
int tc_hmac_prng_reseed(TCHmacPrng_t prng, const uint8_t *seed,
unsigned int seedlen, const uint8_t *additional_input,
unsigned int additionallen);
/**
* @brief HMAC-PRNG generate procedure
* Generates outlen pseudo-random bytes into out buffer, updates prng
* @return returns TC_CRYPTO_SUCCESS (1)
* returns TC_HMAC_PRNG_RESEED_REQ (-1) if a reseed is needed
* returns TC_CRYPTO_FAIL (0) if:
* out == NULL,
* prng == NULL,
* outlen == 0,
* outlen >= MAX_OUT
* @note Assumes tc_hmac_prng_init has been called for prng
* @param out IN/OUT -- buffer to receive output
* @param outlen IN -- size of out buffer in bytes
* @param prng IN/OUT -- the PRNG state
*/
int tc_hmac_prng_generate(uint8_t *out, unsigned int outlen, TCHmacPrng_t prng);
#ifdef __cplusplus
}
#endif
#endif /* __TC_HMAC_PRNG_H__ */

View file

@ -0,0 +1,129 @@
/* sha256.h - TinyCrypt interface to a SHA-256 implementation */
/*
* Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file
* @brief Interface to a SHA-256 implementation.
*
* Overview: SHA-256 is a NIST approved cryptographic hashing algorithm
* specified in FIPS 180. A hash algorithm maps data of arbitrary
* size to data of fixed length.
*
* Security: SHA-256 provides 128 bits of security against collision attacks
* and 256 bits of security against pre-image attacks. SHA-256 does
* NOT behave like a random oracle, but it can be used as one if
* the string being hashed is prefix-free encoded before hashing.
*
* Usage: 1) call tc_sha256_init to initialize a struct
* tc_sha256_state_struct before hashing a new string.
*
* 2) call tc_sha256_update to hash the next string segment;
* tc_sha256_update can be called as many times as needed to hash
* all of the segments of a string; the order is important.
*
* 3) call tc_sha256_final to out put the digest from a hashing
* operation.
*/
#ifndef __TC_SHA256_H__
#define __TC_SHA256_H__
#include <stddef.h>
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
#define TC_SHA256_BLOCK_SIZE (64)
#define TC_SHA256_DIGEST_SIZE (32)
#define TC_SHA256_STATE_BLOCKS (TC_SHA256_DIGEST_SIZE/4)
struct tc_sha256_state_struct {
unsigned int iv[TC_SHA256_STATE_BLOCKS];
uint64_t bits_hashed;
uint8_t leftover[TC_SHA256_BLOCK_SIZE];
size_t leftover_offset;
};
typedef struct tc_sha256_state_struct *TCSha256State_t;
/**
* @brief SHA256 initialization procedure
* Initializes s
* @return returns TC_CRYPTO_SUCCESS (1)
* returns TC_CRYPTO_FAIL (0) if s == NULL
* @param s Sha256 state struct
*/
int tc_sha256_init(TCSha256State_t s);
/**
* @brief SHA256 update procedure
* Hashes data_length bytes addressed by data into state s
* @return returns TC_CRYPTO_SUCCESS (1)
* returns TC_CRYPTO_FAIL (0) if:
* s == NULL,
* s->iv == NULL,
* data == NULL
* @note Assumes s has been initialized by tc_sha256_init
* @warning The state buffer 'leftover' is left in memory after processing
* If your application intends to have sensitive data in this
* buffer, remind to erase it after the data has been processed
* @param s Sha256 state struct
* @param data message to hash
* @param datalen length of message to hash
*/
int tc_sha256_update (TCSha256State_t s, const uint8_t *data, size_t datalen);
/**
* @brief SHA256 final procedure
* Inserts the completed hash computation into digest
* @return returns TC_CRYPTO_SUCCESS (1)
* returns TC_CRYPTO_FAIL (0) if:
* s == NULL,
* s->iv == NULL,
* digest == NULL
* @note Assumes: s has been initialized by tc_sha256_init
* digest points to at least TC_SHA256_DIGEST_SIZE bytes
* @warning The state buffer 'leftover' is left in memory after processing
* If your application intends to have sensitive data in this
* buffer, remind to erase it after the data has been processed
* @param digest unsigned eight bit integer
* @param Sha256 state struct
*/
int tc_sha256_final(uint8_t *digest, TCSha256State_t s);
#ifdef __cplusplus
}
#endif
#endif /* __TC_SHA256_H__ */

View file

@ -0,0 +1,95 @@
/* utils.h - TinyCrypt interface to platform-dependent run-time operations */
/*
* Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file
* @brief Interface to platform-dependent run-time operations.
*
*/
#ifndef __TC_UTILS_H__
#define __TC_UTILS_H__
#include <stdint.h>
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Copy the the buffer 'from' to the buffer 'to'.
* @return returns TC_CRYPTO_SUCCESS (1)
* returns TC_CRYPTO_FAIL (0) if:
* from_len > to_len.
*
* @param to OUT -- destination buffer
* @param to_len IN -- length of destination buffer
* @param from IN -- origin buffer
* @param from_len IN -- length of origin buffer
*/
unsigned int _copy(uint8_t *to, unsigned int to_len,
const uint8_t *from, unsigned int from_len);
/**
* @brief Set the value 'val' into the buffer 'to', 'len' times.
*
* @param to OUT -- destination buffer
* @param val IN -- value to be set in 'to'
* @param len IN -- number of times the value will be copied
*/
void _set(void *to, uint8_t val, unsigned int len);
/*
* @brief AES specific doubling function, which utilizes
* the finite field used by AES.
* @return Returns a^2
*
* @param a IN/OUT -- value to be doubled
*/
uint8_t _double_byte(uint8_t a);
/*
* @brief Constant-time algorithm to compare if two sequences of bytes are equal
* @return Returns 0 if equal, and non-zero otherwise
*
* @param a IN -- sequence of bytes a
* @param b IN -- sequence of bytes b
* @param size IN -- size of sequences a and b
*/
int _compare(const uint8_t *a, const uint8_t *b, size_t size);
#ifdef __cplusplus
}
#endif
#endif /* __TC_UTILS_H__ */

View file

@ -0,0 +1,164 @@
/* aes_decrypt.c - TinyCrypt implementation of AES decryption procedure */
/*
* Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <tinycrypt/aes.h>
#include <tinycrypt/constants.h>
#include <tinycrypt/utils.h>
static const uint8_t inv_sbox[256] = {
0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e,
0x81, 0xf3, 0xd7, 0xfb, 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87,
0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, 0x54, 0x7b, 0x94, 0x32,
0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49,
0x6d, 0x8b, 0xd1, 0x25, 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16,
0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, 0x6c, 0x70, 0x48, 0x50,
0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05,
0xb8, 0xb3, 0x45, 0x06, 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02,
0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, 0x3a, 0x91, 0x11, 0x41,
0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8,
0x1c, 0x75, 0xdf, 0x6e, 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89,
0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, 0xfc, 0x56, 0x3e, 0x4b,
0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59,
0x27, 0x80, 0xec, 0x5f, 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d,
0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, 0xa0, 0xe0, 0x3b, 0x4d,
0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63,
0x55, 0x21, 0x0c, 0x7d
};
int tc_aes128_set_decrypt_key(TCAesKeySched_t s, const uint8_t *k)
{
return tc_aes128_set_encrypt_key(s, k);
}
#define mult8(a)(_double_byte(_double_byte(_double_byte(a))))
#define mult9(a)(mult8(a)^(a))
#define multb(a)(mult8(a)^_double_byte(a)^(a))
#define multd(a)(mult8(a)^_double_byte(_double_byte(a))^(a))
#define multe(a)(mult8(a)^_double_byte(_double_byte(a))^_double_byte(a))
static inline void mult_row_column(uint8_t *out, const uint8_t *in)
{
out[0] = multe(in[0]) ^ multb(in[1]) ^ multd(in[2]) ^ mult9(in[3]);
out[1] = mult9(in[0]) ^ multe(in[1]) ^ multb(in[2]) ^ multd(in[3]);
out[2] = multd(in[0]) ^ mult9(in[1]) ^ multe(in[2]) ^ multb(in[3]);
out[3] = multb(in[0]) ^ multd(in[1]) ^ mult9(in[2]) ^ multe(in[3]);
}
static inline void inv_mix_columns(uint8_t *s)
{
uint8_t t[Nb*Nk];
mult_row_column(t, s);
mult_row_column(&t[Nb], s+Nb);
mult_row_column(&t[2*Nb], s+(2*Nb));
mult_row_column(&t[3*Nb], s+(3*Nb));
(void)_copy(s, sizeof(t), t, sizeof(t));
}
static inline void add_round_key(uint8_t *s, const unsigned int *k)
{
s[0] ^= (uint8_t)(k[0] >> 24); s[1] ^= (uint8_t)(k[0] >> 16);
s[2] ^= (uint8_t)(k[0] >> 8); s[3] ^= (uint8_t)(k[0]);
s[4] ^= (uint8_t)(k[1] >> 24); s[5] ^= (uint8_t)(k[1] >> 16);
s[6] ^= (uint8_t)(k[1] >> 8); s[7] ^= (uint8_t)(k[1]);
s[8] ^= (uint8_t)(k[2] >> 24); s[9] ^= (uint8_t)(k[2] >> 16);
s[10] ^= (uint8_t)(k[2] >> 8); s[11] ^= (uint8_t)(k[2]);
s[12] ^= (uint8_t)(k[3] >> 24); s[13] ^= (uint8_t)(k[3] >> 16);
s[14] ^= (uint8_t)(k[3] >> 8); s[15] ^= (uint8_t)(k[3]);
}
static inline void inv_sub_bytes(uint8_t *s)
{
unsigned int i;
for (i = 0; i < (Nb*Nk); ++i) {
s[i] = inv_sbox[s[i]];
}
}
/*
* This inv_shift_rows also implements the matrix flip required for
* inv_mix_columns, but performs it here to reduce the number of memory
* operations.
*/
static inline void inv_shift_rows(uint8_t *s)
{
uint8_t t[Nb*Nk];
t[0] = s[0]; t[1] = s[13]; t[2] = s[10]; t[3] = s[7];
t[4] = s[4]; t[5] = s[1]; t[6] = s[14]; t[7] = s[11];
t[8] = s[8]; t[9] = s[5]; t[10] = s[2]; t[11] = s[15];
t[12] = s[12]; t[13] = s[9]; t[14] = s[6]; t[15] = s[3];
(void)_copy(s, sizeof(t), t, sizeof(t));
}
int tc_aes_decrypt(uint8_t *out, const uint8_t *in, const TCAesKeySched_t s)
{
uint8_t state[Nk*Nb];
unsigned int i;
if (out == (uint8_t *) 0) {
return TC_CRYPTO_FAIL;
} else if (in == (const uint8_t *) 0) {
return TC_CRYPTO_FAIL;
} else if (s == (TCAesKeySched_t) 0) {
return TC_CRYPTO_FAIL;
}
(void)_copy(state, sizeof(state), in, sizeof(state));
add_round_key(state, s->words + Nb*Nr);
for (i = Nr - 1; i > 0; --i) {
inv_shift_rows(state);
inv_sub_bytes(state);
add_round_key(state, s->words + Nb*i);
inv_mix_columns(state);
}
inv_shift_rows(state);
inv_sub_bytes(state);
add_round_key(state, s->words);
(void)_copy(out, sizeof(state), state, sizeof(state));
/*zeroing out the state buffer */
_set(state, TC_ZERO_BYTE, sizeof(state));
return TC_CRYPTO_SUCCESS;
}

View file

@ -0,0 +1,191 @@
/* aes_encrypt.c - TinyCrypt implementation of AES encryption procedure */
/*
* Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <tinycrypt/aes.h>
#include <tinycrypt/utils.h>
#include <tinycrypt/constants.h>
static const uint8_t sbox[256] = {
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b,
0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26,
0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2,
0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed,
0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f,
0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec,
0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14,
0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d,
0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f,
0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11,
0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f,
0xb0, 0x54, 0xbb, 0x16
};
static inline unsigned int rotword(unsigned int a)
{
return (((a) >> 24)|((a) << 8));
}
#define subbyte(a, o)(sbox[((a) >> (o))&0xff] << (o))
#define subword(a)(subbyte(a, 24)|subbyte(a, 16)|subbyte(a, 8)|subbyte(a, 0))
int tc_aes128_set_encrypt_key(TCAesKeySched_t s, const uint8_t *k)
{
const unsigned int rconst[11] = {
0x00000000, 0x01000000, 0x02000000, 0x04000000, 0x08000000, 0x10000000,
0x20000000, 0x40000000, 0x80000000, 0x1b000000, 0x36000000
};
unsigned int i;
unsigned int t;
if (s == (TCAesKeySched_t) 0) {
return TC_CRYPTO_FAIL;
} else if (k == (const uint8_t *) 0) {
return TC_CRYPTO_FAIL;
}
for (i = 0; i < Nk; ++i) {
s->words[i] = (k[Nb*i]<<24) | (k[Nb*i+1]<<16) |
(k[Nb*i+2]<<8) | (k[Nb*i+3]);
}
for (; i < (Nb * (Nr + 1)); ++i) {
t = s->words[i-1];
if ((i % Nk) == 0) {
t = subword(rotword(t)) ^ rconst[i/Nk];
}
s->words[i] = s->words[i-Nk] ^ t;
}
return TC_CRYPTO_SUCCESS;
}
static inline void add_round_key(uint8_t *s, const unsigned int *k)
{
s[0] ^= (uint8_t)(k[0] >> 24); s[1] ^= (uint8_t)(k[0] >> 16);
s[2] ^= (uint8_t)(k[0] >> 8); s[3] ^= (uint8_t)(k[0]);
s[4] ^= (uint8_t)(k[1] >> 24); s[5] ^= (uint8_t)(k[1] >> 16);
s[6] ^= (uint8_t)(k[1] >> 8); s[7] ^= (uint8_t)(k[1]);
s[8] ^= (uint8_t)(k[2] >> 24); s[9] ^= (uint8_t)(k[2] >> 16);
s[10] ^= (uint8_t)(k[2] >> 8); s[11] ^= (uint8_t)(k[2]);
s[12] ^= (uint8_t)(k[3] >> 24); s[13] ^= (uint8_t)(k[3] >> 16);
s[14] ^= (uint8_t)(k[3] >> 8); s[15] ^= (uint8_t)(k[3]);
}
static inline void sub_bytes(uint8_t *s)
{
unsigned int i;
for (i = 0; i < (Nb * Nk); ++i) {
s[i] = sbox[s[i]];
}
}
#define triple(a)(_double_byte(a)^(a))
static inline void mult_row_column(uint8_t *out, const uint8_t *in)
{
out[0] = _double_byte(in[0]) ^ triple(in[1]) ^ in[2] ^ in[3];
out[1] = in[0] ^ _double_byte(in[1]) ^ triple(in[2]) ^ in[3];
out[2] = in[0] ^ in[1] ^ _double_byte(in[2]) ^ triple(in[3]);
out[3] = triple(in[0]) ^ in[1] ^ in[2] ^ _double_byte(in[3]);
}
static inline void mix_columns(uint8_t *s)
{
uint8_t t[Nb*Nk];
mult_row_column(t, s);
mult_row_column(&t[Nb], s+Nb);
mult_row_column(&t[2 * Nb], s + (2 * Nb));
mult_row_column(&t[3 * Nb], s + (3 * Nb));
(void) _copy(s, sizeof(t), t, sizeof(t));
}
/*
* This shift_rows also implements the matrix flip required for mix_columns, but
* performs it here to reduce the number of memory operations.
*/
static inline void shift_rows(uint8_t *s)
{
uint8_t t[Nb * Nk];
t[0] = s[0]; t[1] = s[5]; t[2] = s[10]; t[3] = s[15];
t[4] = s[4]; t[5] = s[9]; t[6] = s[14]; t[7] = s[3];
t[8] = s[8]; t[9] = s[13]; t[10] = s[2]; t[11] = s[7];
t[12] = s[12]; t[13] = s[1]; t[14] = s[6]; t[15] = s[11];
(void) _copy(s, sizeof(t), t, sizeof(t));
}
int tc_aes_encrypt(uint8_t *out, const uint8_t *in, const TCAesKeySched_t s)
{
uint8_t state[Nk*Nb];
unsigned int i;
if (out == (uint8_t *) 0) {
return TC_CRYPTO_FAIL;
} else if (in == (const uint8_t *) 0) {
return TC_CRYPTO_FAIL;
} else if (s == (TCAesKeySched_t) 0) {
return TC_CRYPTO_FAIL;
}
(void)_copy(state, sizeof(state), in, sizeof(state));
add_round_key(state, s->words);
for (i = 0; i < (Nr - 1); ++i) {
sub_bytes(state);
shift_rows(state);
mix_columns(state);
add_round_key(state, s->words + Nb*(i+1));
}
sub_bytes(state);
shift_rows(state);
add_round_key(state, s->words + Nb*(i+1));
(void)_copy(out, sizeof(state), state, sizeof(state));
/* zeroing out the state buffer */
_set(state, TC_ZERO_BYTE, sizeof(state));
return TC_CRYPTO_SUCCESS;
}

View file

@ -0,0 +1,114 @@
/* cbc_mode.c - TinyCrypt implementation of CBC mode encryption & decryption */
/*
* Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <tinycrypt/cbc_mode.h>
#include <tinycrypt/constants.h>
#include <tinycrypt/utils.h>
int tc_cbc_mode_encrypt(uint8_t *out, unsigned int outlen, const uint8_t *in,
unsigned int inlen, const uint8_t *iv,
const TCAesKeySched_t sched)
{
uint8_t buffer[TC_AES_BLOCK_SIZE];
unsigned int n, m;
/* input sanity check: */
if (out == (uint8_t *) 0 ||
in == (const uint8_t *) 0 ||
sched == (TCAesKeySched_t) 0 ||
inlen == 0 ||
outlen == 0 ||
(inlen % TC_AES_BLOCK_SIZE) != 0 ||
(outlen % TC_AES_BLOCK_SIZE) != 0 ||
outlen != inlen + TC_AES_BLOCK_SIZE) {
return TC_CRYPTO_FAIL;
}
/* copy iv to the buffer */
(void)_copy(buffer, TC_AES_BLOCK_SIZE, iv, TC_AES_BLOCK_SIZE);
/* copy iv to the output buffer */
(void)_copy(out, TC_AES_BLOCK_SIZE, iv, TC_AES_BLOCK_SIZE);
out += TC_AES_BLOCK_SIZE;
for (n = m = 0; n < inlen; ++n) {
buffer[m++] ^= *in++;
if (m == TC_AES_BLOCK_SIZE) {
(void)tc_aes_encrypt(buffer, buffer, sched);
(void)_copy(out, TC_AES_BLOCK_SIZE,
buffer, TC_AES_BLOCK_SIZE);
out += TC_AES_BLOCK_SIZE;
m = 0;
}
}
return TC_CRYPTO_SUCCESS;
}
int tc_cbc_mode_decrypt(uint8_t *out, unsigned int outlen, const uint8_t *in,
unsigned int inlen, const uint8_t *iv,
const TCAesKeySched_t sched)
{
uint8_t buffer[TC_AES_BLOCK_SIZE];
const uint8_t *p;
unsigned int n, m;
/* sanity check the inputs */
if (out == (uint8_t *) 0 ||
in == (const uint8_t *) 0 ||
sched == (TCAesKeySched_t) 0 ||
inlen == 0 ||
outlen == 0 ||
(inlen % TC_AES_BLOCK_SIZE) != 0 ||
(outlen % TC_AES_BLOCK_SIZE) != 0 ||
outlen != inlen - TC_AES_BLOCK_SIZE) {
return TC_CRYPTO_FAIL;
}
/*
* Note that in == iv + ciphertext, i.e. the iv and the ciphertext are
* contiguous. This allows for a very efficient decryption algorithm
* that would not otherwise be possible.
*/
p = iv;
for (n = m = 0; n < inlen; ++n) {
if ((n % TC_AES_BLOCK_SIZE) == 0) {
(void)tc_aes_decrypt(buffer, in, sched);
in += TC_AES_BLOCK_SIZE;
m = 0;
}
*out++ = buffer[m++] ^ *p++;
}
return TC_CRYPTO_SUCCESS;
}

View file

@ -0,0 +1,266 @@
/* ccm_mode.c - TinyCrypt implementation of CCM mode */
/*
* Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <tinycrypt/ccm_mode.h>
#include <tinycrypt/constants.h>
#include <tinycrypt/utils.h>
#include <stdio.h>
int tc_ccm_config(TCCcmMode_t c, TCAesKeySched_t sched, uint8_t *nonce,
unsigned int nlen, unsigned int mlen)
{
/* input sanity check: */
if (c == (TCCcmMode_t) 0 ||
sched == (TCAesKeySched_t) 0 ||
nonce == (uint8_t *) 0) {
return TC_CRYPTO_FAIL;
} else if (nlen != 13) {
return TC_CRYPTO_FAIL; /* The allowed nonce size is: 13. See documentation.*/
} else if ((mlen < 4) || (mlen > 16) || (mlen & 1)) {
return TC_CRYPTO_FAIL; /* The allowed mac sizes are: 4, 6, 8, 10, 12, 14, 16.*/
}
c->mlen = mlen;
c->sched = sched;
c->nonce = nonce;
return TC_CRYPTO_SUCCESS;
}
/**
* Variation of CBC-MAC mode used in CCM.
*/
static void ccm_cbc_mac(uint8_t *T, const uint8_t *data, unsigned int dlen,
unsigned int flag, TCAesKeySched_t sched)
{
unsigned int i;
if (flag > 0) {
T[0] ^= (uint8_t)(dlen >> 8);
T[1] ^= (uint8_t)(dlen);
dlen += 2; i = 2;
} else {
i = 0;
}
while (i < dlen) {
T[i++ % (Nb * Nk)] ^= *data++;
if (((i % (Nb * Nk)) == 0) || dlen == i) {
(void) tc_aes_encrypt(T, T, sched);
}
}
}
/**
* Variation of CTR mode used in CCM.
* The CTR mode used by CCM is slightly different than the conventional CTR
* mode (the counter is increased before encryption, instead of after
* encryption). Besides, it is assumed that the counter is stored in the last
* 2 bytes of the nonce.
*/
static int ccm_ctr_mode(uint8_t *out, unsigned int outlen, const uint8_t *in,
unsigned int inlen, uint8_t *ctr, const TCAesKeySched_t sched)
{
uint8_t buffer[TC_AES_BLOCK_SIZE];
uint8_t nonce[TC_AES_BLOCK_SIZE];
uint16_t block_num;
unsigned int i;
/* input sanity check: */
if (out == (uint8_t *) 0 ||
in == (uint8_t *) 0 ||
ctr == (uint8_t *) 0 ||
sched == (TCAesKeySched_t) 0 ||
inlen == 0 ||
outlen == 0 ||
outlen != inlen) {
return TC_CRYPTO_FAIL;
}
/* copy the counter to the nonce */
(void) _copy(nonce, sizeof(nonce), ctr, sizeof(nonce));
/* select the last 2 bytes of the nonce to be incremented */
block_num = (uint16_t) ((nonce[14] << 8)|(nonce[15]));
for (i = 0; i < inlen; ++i) {
if ((i % (TC_AES_BLOCK_SIZE)) == 0) {
block_num++;
nonce[14] = (uint8_t)(block_num >> 8);
nonce[15] = (uint8_t)(block_num);
if (!tc_aes_encrypt(buffer, nonce, sched)) {
return TC_CRYPTO_FAIL;
}
}
/* update the output */
*out++ = buffer[i % (TC_AES_BLOCK_SIZE)] ^ *in++;
}
/* update the counter */
ctr[14] = nonce[14]; ctr[15] = nonce[15];
return TC_CRYPTO_SUCCESS;
}
int tc_ccm_generation_encryption(uint8_t *out, unsigned int olen,
const uint8_t *associated_data,
unsigned int alen, const uint8_t *payload,
unsigned int plen, TCCcmMode_t c)
{
/* input sanity check: */
if ((out == (uint8_t *) 0) ||
(c == (TCCcmMode_t) 0) ||
((plen > 0) && (payload == (uint8_t *) 0)) ||
((alen > 0) && (associated_data == (uint8_t *) 0)) ||
(alen >= TC_CCM_AAD_MAX_BYTES) || /* associated data size unsupported */
(plen >= TC_CCM_PAYLOAD_MAX_BYTES) || /* payload size unsupported */
(olen < (plen + c->mlen))) { /* invalid output buffer size */
return TC_CRYPTO_FAIL;
}
uint8_t b[Nb * Nk];
uint8_t tag[Nb * Nk];
unsigned int i;
/* GENERATING THE AUTHENTICATION TAG: */
/* formatting the sequence b for authentication: */
b[0] = ((alen > 0) ? 0x40:0) | (((c->mlen - 2) / 2 << 3)) | (1);
for (i = 1; i <= 13; ++i) {
b[i] = c->nonce[i - 1];
}
b[14] = (uint8_t)(plen >> 8);
b[15] = (uint8_t)(plen);
/* computing the authentication tag using cbc-mac: */
(void) tc_aes_encrypt(tag, b, c->sched);
if (alen > 0) {
ccm_cbc_mac(tag, associated_data, alen, 1, c->sched);
}
if (plen > 0) {
ccm_cbc_mac(tag, payload, plen, 0, c->sched);
}
/* ENCRYPTION: */
/* formatting the sequence b for encryption: */
b[0] = 1; /* q - 1 = 2 - 1 = 1 */
b[14] = b[15] = TC_ZERO_BYTE;
/* encrypting payload using ctr mode: */
ccm_ctr_mode(out, plen, payload, plen, b, c->sched);
b[14] = b[15] = TC_ZERO_BYTE; /* restoring initial counter for ctr_mode (0):*/
/* encrypting b and adding the tag to the output: */
(void) tc_aes_encrypt(b, b, c->sched);
out += plen;
for (i = 0; i < c->mlen; ++i) {
*out++ = tag[i] ^ b[i];
}
return TC_CRYPTO_SUCCESS;
}
int tc_ccm_decryption_verification(uint8_t *out, unsigned int olen,
const uint8_t *associated_data,
unsigned int alen, const uint8_t *payload,
unsigned int plen, TCCcmMode_t c)
{
/* input sanity check: */
if ((out == (uint8_t *) 0) ||
(c == (TCCcmMode_t) 0) ||
((plen > 0) && (payload == (uint8_t *) 0)) ||
((alen > 0) && (associated_data == (uint8_t *) 0)) ||
(alen >= TC_CCM_AAD_MAX_BYTES) || /* associated data size unsupported */
(plen >= TC_CCM_PAYLOAD_MAX_BYTES) || /* payload size unsupported */
(olen < plen - c->mlen)) { /* invalid output buffer size */
return TC_CRYPTO_FAIL;
}
uint8_t b[Nb * Nk];
uint8_t tag[Nb * Nk];
unsigned int i;
/* DECRYPTION: */
/* formatting the sequence b for decryption: */
b[0] = 1; /* q - 1 = 2 - 1 = 1 */
for (i = 1; i < 14; ++i) {
b[i] = c->nonce[i - 1];
}
b[14] = b[15] = TC_ZERO_BYTE; /* initial counter value is 0 */
/* decrypting payload using ctr mode: */
ccm_ctr_mode(out, plen - c->mlen, payload, plen - c->mlen, b, c->sched);
b[14] = b[15] = TC_ZERO_BYTE; /* restoring initial counter value (0) */
/* encrypting b and restoring the tag from input: */
(void) tc_aes_encrypt(b, b, c->sched);
for (i = 0; i < c->mlen; ++i) {
tag[i] = *(payload + plen - c->mlen + i) ^ b[i];
}
/* VERIFYING THE AUTHENTICATION TAG: */
/* formatting the sequence b for authentication: */
b[0] = ((alen > 0) ? 0x40:0)|(((c->mlen - 2) / 2 << 3)) | (1);
for (i = 1; i < 14; ++i) {
b[i] = c->nonce[i - 1];
}
b[14] = (uint8_t)((plen - c->mlen) >> 8);
b[15] = (uint8_t)(plen - c->mlen);
/* computing the authentication tag using cbc-mac: */
(void) tc_aes_encrypt(b, b, c->sched);
if (alen > 0) {
ccm_cbc_mac(b, associated_data, alen, 1, c->sched);
}
if (plen > 0) {
ccm_cbc_mac(b, out, plen - c->mlen, 0, c->sched);
}
/* comparing the received tag and the computed one: */
if (_compare(b, tag, c->mlen) == 0) {
return TC_CRYPTO_SUCCESS;
} else {
/* erase the decrypted buffer in case of mac validation failure: */
_set(out, 0, plen - c->mlen);
return TC_CRYPTO_FAIL;
}
}

View file

@ -0,0 +1,254 @@
/* cmac_mode.c - TinyCrypt CMAC mode implementation */
/*
* Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <tinycrypt/aes.h>
#include <tinycrypt/cmac_mode.h>
#include <tinycrypt/constants.h>
#include <tinycrypt/utils.h>
/* max number of calls until change the key (2^48).*/
const static uint64_t MAX_CALLS = ((uint64_t)1 << 48);
/*
* gf_wrap -- In our implementation, GF(2^128) is represented as a 16 byte
* array with byte 0 the most significant and byte 15 the least significant.
* High bit carry reduction is based on the primitive polynomial
*
* X^128 + X^7 + X^2 + X + 1,
*
* which leads to the reduction formula X^128 = X^7 + X^2 + X + 1. Indeed,
* since 0 = (X^128 + X^7 + X^2 + 1) mod (X^128 + X^7 + X^2 + X + 1) and since
* addition of polynomials with coefficients in Z/Z(2) is just XOR, we can
* add X^128 to both sides to get
*
* X^128 = (X^7 + X^2 + X + 1) mod (X^128 + X^7 + X^2 + X + 1)
*
* and the coefficients of the polynomial on the right hand side form the
* string 1000 0111 = 0x87, which is the value of gf_wrap.
*
* This gets used in the following way. Doubling in GF(2^128) is just a left
* shift by 1 bit, except when the most significant bit is 1. In the latter
* case, the relation X^128 = X^7 + X^2 + X + 1 says that the high order bit
* that overflows beyond 128 bits can be replaced by addition of
* X^7 + X^2 + X + 1 <--> 0x87 to the low order 128 bits. Since addition
* in GF(2^128) is represented by XOR, we therefore only have to XOR 0x87
* into the low order byte after a left shift when the starting high order
* bit is 1.
*/
const unsigned char gf_wrap = 0x87;
/*
* assumes: out != NULL and points to a GF(2^n) value to receive the
* doubled value;
* in != NULL and points to a 16 byte GF(2^n) value
* to double;
* the in and out buffers do not overlap.
* effects: doubles the GF(2^n) value pointed to by "in" and places
* the result in the GF(2^n) value pointed to by "out."
*/
void gf_double(uint8_t *out, uint8_t *in)
{
/* start with low order byte */
uint8_t *x = in + (TC_AES_BLOCK_SIZE - 1);
/* if msb == 1, we need to add the gf_wrap value, otherwise add 0 */
uint8_t carry = (in[0] >> 7) ? gf_wrap : 0;
out += (TC_AES_BLOCK_SIZE - 1);
for (;;) {
*out-- = (*x << 1) ^ carry;
if (x == in) {
break;
}
carry = *x-- >> 7;
}
}
int tc_cmac_setup(TCCmacState_t s, const uint8_t *key, TCAesKeySched_t sched)
{
/* input sanity check: */
if (s == (TCCmacState_t) 0 ||
key == (const uint8_t *) 0) {
return TC_CRYPTO_FAIL;
}
/* put s into a known state */
_set(s, 0, sizeof(*s));
s->sched = sched;
/* configure the encryption key used by the underlying block cipher */
tc_aes128_set_encrypt_key(s->sched, key);
/* compute s->K1 and s->K2 from s->iv using s->keyid */
_set(s->iv, 0, TC_AES_BLOCK_SIZE);
tc_aes_encrypt(s->iv, s->iv, s->sched);
gf_double (s->K1, s->iv);
gf_double (s->K2, s->K1);
/* reset s->iv to 0 in case someone wants to compute now */
tc_cmac_init(s);
return TC_CRYPTO_SUCCESS;
}
int tc_cmac_erase(TCCmacState_t s)
{
if (s == (TCCmacState_t) 0) {
return TC_CRYPTO_FAIL;
}
/* destroy the current state */
_set(s, 0, sizeof(*s));
return TC_CRYPTO_SUCCESS;
}
int tc_cmac_init(TCCmacState_t s)
{
/* input sanity check: */
if (s == (TCCmacState_t) 0) {
return TC_CRYPTO_FAIL;
}
/* CMAC starts with an all zero initialization vector */
_set(s->iv, 0, TC_AES_BLOCK_SIZE);
/* and the leftover buffer is empty */
_set(s->leftover, 0, TC_AES_BLOCK_SIZE);
s->leftover_offset = 0;
/* Set countdown to max number of calls allowed before re-keying: */
s->countdown = MAX_CALLS;
return TC_CRYPTO_SUCCESS;
}
int tc_cmac_update(TCCmacState_t s, const uint8_t *data, size_t data_length)
{
unsigned int i;
/* input sanity check: */
if (s == (TCCmacState_t) 0) {
return TC_CRYPTO_FAIL;
}
if (data_length == 0) {
return TC_CRYPTO_SUCCESS;
}
if (data == (const uint8_t *) 0) {
return TC_CRYPTO_FAIL;
}
if (s->countdown == 0) {
return TC_CRYPTO_FAIL;
}
s->countdown--;
if (s->leftover_offset > 0) {
/* last data added to s didn't end on a TC_AES_BLOCK_SIZE byte boundary */
size_t remaining_space = TC_AES_BLOCK_SIZE - s->leftover_offset;
if (data_length < remaining_space) {
/* still not enough data to encrypt this time either */
_copy(&s->leftover[s->leftover_offset], data_length, data, data_length);
s->leftover_offset += data_length;
return TC_CRYPTO_SUCCESS;
}
/* leftover block is now full; encrypt it first */
_copy(&s->leftover[s->leftover_offset],
remaining_space,
data,
remaining_space);
data_length -= remaining_space;
data += remaining_space;
s->leftover_offset = 0;
for (i = 0; i < TC_AES_BLOCK_SIZE; ++i) {
s->iv[i] ^= s->leftover[i];
}
tc_aes_encrypt(s->iv, s->iv, s->sched);
}
/* CBC encrypt each (except the last) of the data blocks */
while (data_length > TC_AES_BLOCK_SIZE) {
for (i = 0; i < TC_AES_BLOCK_SIZE; ++i) {
s->iv[i] ^= data[i];
}
tc_aes_encrypt(s->iv, s->iv, s->sched);
data += TC_AES_BLOCK_SIZE;
data_length -= TC_AES_BLOCK_SIZE;
}
if (data_length > 0) {
/* save leftover data for next time */
_copy(s->leftover, data_length, data, data_length);
s->leftover_offset = data_length;
}
return TC_CRYPTO_SUCCESS;
}
int tc_cmac_final(uint8_t *tag, TCCmacState_t s)
{
uint8_t *k;
unsigned int i;
/* input sanity check: */
if (tag == (uint8_t *) 0 ||
s == (TCCmacState_t) 0) {
return TC_CRYPTO_FAIL;
}
if (s->leftover_offset == TC_AES_BLOCK_SIZE) {
/* the last message block is a full-sized block */
k = (uint8_t *) s->K1;
} else {
/* the final message block is not a full-sized block */
size_t remaining = TC_AES_BLOCK_SIZE - s->leftover_offset;
_set(&s->leftover[s->leftover_offset], 0, remaining);
s->leftover[s->leftover_offset] = TC_CMAC_PADDING;
k = (uint8_t *) s->K2;
}
for (i = 0; i < TC_AES_BLOCK_SIZE; ++i) {
s->iv[i] ^= s->leftover[i] ^ k[i];
}
tc_aes_encrypt(tag, s->iv, s->sched);
/* erasing state: */
tc_cmac_erase(s);
return TC_CRYPTO_SUCCESS;
}

View file

@ -0,0 +1,85 @@
/* ctr_mode.c - TinyCrypt CTR mode implementation */
/*
* Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <tinycrypt/constants.h>
#include <tinycrypt/ctr_mode.h>
#include <tinycrypt/utils.h>
int tc_ctr_mode(uint8_t *out, unsigned int outlen, const uint8_t *in,
unsigned int inlen, uint8_t *ctr, const TCAesKeySched_t sched)
{
uint8_t buffer[TC_AES_BLOCK_SIZE];
uint8_t nonce[TC_AES_BLOCK_SIZE];
unsigned int block_num;
unsigned int i;
/* input sanity check: */
if (out == (uint8_t *) 0 ||
in == (uint8_t *) 0 ||
ctr == (uint8_t *) 0 ||
sched == (TCAesKeySched_t) 0 ||
inlen == 0 ||
outlen == 0 ||
outlen != inlen) {
return TC_CRYPTO_FAIL;
}
/* copy the ctr to the nonce */
(void)_copy(nonce, sizeof(nonce), ctr, sizeof(nonce));
/* select the last 4 bytes of the nonce to be incremented */
block_num = (nonce[12] << 24) | (nonce[13] << 16) |
(nonce[14] << 8) | (nonce[15]);
for (i = 0; i < inlen; ++i) {
if ((i % (TC_AES_BLOCK_SIZE)) == 0) {
/* encrypt data using the current nonce */
if (tc_aes_encrypt(buffer, nonce, sched)) {
block_num++;
nonce[12] = (uint8_t)(block_num >> 24);
nonce[13] = (uint8_t)(block_num >> 16);
nonce[14] = (uint8_t)(block_num >> 8);
nonce[15] = (uint8_t)(block_num);
} else {
return TC_CRYPTO_FAIL;
}
}
/* update the output */
*out++ = buffer[i%(TC_AES_BLOCK_SIZE)] ^ *in++;
}
/* update the counter */
ctr[12] = nonce[12]; ctr[13] = nonce[13];
ctr[14] = nonce[14]; ctr[15] = nonce[15];
return TC_CRYPTO_SUCCESS;
}

View file

@ -0,0 +1,283 @@
/* ctr_prng.c - TinyCrypt implementation of CTR-PRNG */
/*
* Copyright (c) 2016, Chris Morrison
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <tinycrypt/ctr_prng.h>
#include <tinycrypt/utils.h>
#include <tinycrypt/constants.h>
#include <string.h>
/*
* This PRNG is based on the CTR_DRBG described in Recommendation for Random
* Number Generation Using Deterministic Random Bit Generators,
* NIST SP 800-90A Rev. 1.
*
* Annotations to particular steps (e.g. 10.2.1.2 Step 1) refer to the steps
* described in that document.
*
*/
/**
* @brief Array incrementer
* Treats the supplied array as one contiguous number (MSB in arr[0]), and
* increments it by one
* @return none
* @param arr IN/OUT -- array to be incremented
* @param len IN -- size of arr in bytes
*/
static void arrInc(uint8_t arr[], unsigned int len)
{
unsigned int i;
if (0 != arr) {
for (i = len; i > 0U; i--) {
if (++arr[i-1] != 0U) {
break;
}
}
}
}
/**
* @brief CTR PRNG update
* Updates the internal state of supplied the CTR PRNG context
* increments it by one
* @return none
* @note Assumes: providedData is (TC_AES_KEY_SIZE + TC_AES_BLOCK_SIZE) bytes long
* @param ctx IN/OUT -- CTR PRNG state
* @param providedData IN -- data used when updating the internal state
*/
static void tc_ctr_prng_update(TCCtrPrng_t * const ctx, uint8_t const * const providedData)
{
if (0 != ctx) {
/* 10.2.1.2 step 1 */
uint8_t temp[TC_AES_KEY_SIZE + TC_AES_BLOCK_SIZE];
unsigned int len = 0U;
/* 10.2.1.2 step 2 */
while (len < sizeof temp) {
unsigned int blocklen = sizeof(temp) - len;
uint8_t output_block[TC_AES_BLOCK_SIZE];
/* 10.2.1.2 step 2.1 */
arrInc(ctx->V, sizeof ctx->V);
/* 10.2.1.2 step 2.2 */
if (blocklen > TC_AES_BLOCK_SIZE) {
blocklen = TC_AES_BLOCK_SIZE;
}
(void)tc_aes_encrypt(output_block, ctx->V, &ctx->key);
/* 10.2.1.2 step 2.3/step 3 */
memcpy(&(temp[len]), output_block, blocklen);
len += blocklen;
}
/* 10.2.1.2 step 4 */
if (0 != providedData) {
unsigned int i;
for (i = 0U; i < sizeof temp; i++) {
temp[i] ^= providedData[i];
}
}
/* 10.2.1.2 step 5 */
(void)tc_aes128_set_encrypt_key(&ctx->key, temp);
/* 10.2.1.2 step 6 */
memcpy(ctx->V, &(temp[TC_AES_KEY_SIZE]), TC_AES_BLOCK_SIZE);
}
}
int tc_ctr_prng_init(TCCtrPrng_t * const ctx,
uint8_t const * const entropy,
unsigned int entropyLen,
uint8_t const * const personalization,
unsigned int pLen)
{
int result = TC_CRYPTO_FAIL;
unsigned int i;
uint8_t personalization_buf[TC_AES_KEY_SIZE + TC_AES_BLOCK_SIZE] = {0U};
uint8_t seed_material[TC_AES_KEY_SIZE + TC_AES_BLOCK_SIZE];
uint8_t zeroArr[TC_AES_BLOCK_SIZE] = {0U};
if (0 != personalization) {
/* 10.2.1.3.1 step 1 */
unsigned int len = pLen;
if (len > sizeof personalization_buf) {
len = sizeof personalization_buf;
}
/* 10.2.1.3.1 step 2 */
memcpy(personalization_buf, personalization, len);
}
if ((0 != ctx) && (0 != entropy) && (entropyLen >= sizeof seed_material)) {
/* 10.2.1.3.1 step 3 */
memcpy(seed_material, entropy, sizeof seed_material);
for (i = 0U; i < sizeof seed_material; i++) {
seed_material[i] ^= personalization_buf[i];
}
/* 10.2.1.3.1 step 4 */
(void)tc_aes128_set_encrypt_key(&ctx->key, zeroArr);
/* 10.2.1.3.1 step 5 */
memset(ctx->V, 0x00, sizeof ctx->V);
/* 10.2.1.3.1 step 6 */
tc_ctr_prng_update(ctx, seed_material);
/* 10.2.1.3.1 step 7 */
ctx->reseedCount = 1U;
result = TC_CRYPTO_SUCCESS;
}
return result;
}
int tc_ctr_prng_reseed(TCCtrPrng_t * const ctx,
uint8_t const * const entropy,
unsigned int entropyLen,
uint8_t const * const additional_input,
unsigned int additionallen)
{
unsigned int i;
int result = TC_CRYPTO_FAIL;
uint8_t additional_input_buf[TC_AES_KEY_SIZE + TC_AES_BLOCK_SIZE] = {0U};
uint8_t seed_material[TC_AES_KEY_SIZE + TC_AES_BLOCK_SIZE];
if (0 != additional_input) {
/* 10.2.1.4.1 step 1 */
unsigned int len = additionallen;
if (len > sizeof additional_input_buf) {
len = sizeof additional_input_buf;
}
/* 10.2.1.4.1 step 2 */
memcpy(additional_input_buf, additional_input, len);
}
unsigned int seedlen = (unsigned int)TC_AES_KEY_SIZE + (unsigned int)TC_AES_BLOCK_SIZE;
if ((0 != ctx) && (entropyLen >= seedlen)) {
/* 10.2.1.4.1 step 3 */
memcpy(seed_material, entropy, sizeof seed_material);
for (i = 0U; i < sizeof seed_material; i++) {
seed_material[i] ^= additional_input_buf[i];
}
/* 10.2.1.4.1 step 4 */
tc_ctr_prng_update(ctx, seed_material);
/* 10.2.1.4.1 step 5 */
ctx->reseedCount = 1U;
result = TC_CRYPTO_SUCCESS;
}
return result;
}
int tc_ctr_prng_generate(TCCtrPrng_t * const ctx,
uint8_t const * const additional_input,
unsigned int additionallen,
uint8_t * const out,
unsigned int outlen)
{
/* 2^48 - see section 10.2.1 */
static const uint64_t MAX_REQS_BEFORE_RESEED = 0x1000000000000ULL;
/* 2^19 bits - see section 10.2.1 */
static const unsigned int MAX_BYTES_PER_REQ = 65536U;
unsigned int result = TC_CRYPTO_FAIL;
if ((0 != ctx) && (0 != out) && (outlen < MAX_BYTES_PER_REQ)) {
/* 10.2.1.5.1 step 1 */
if (ctx->reseedCount > MAX_REQS_BEFORE_RESEED) {
result = TC_CTR_PRNG_RESEED_REQ;
} else {
uint8_t additional_input_buf[TC_AES_KEY_SIZE + TC_AES_BLOCK_SIZE] = {0U};
if (0 != additional_input) {
/* 10.2.1.5.1 step 2 */
unsigned int len = additionallen;
if (len > sizeof additional_input_buf) {
len = sizeof additional_input_buf;
}
memcpy(additional_input_buf, additional_input, len);
tc_ctr_prng_update(ctx, additional_input_buf);
}
/* 10.2.1.5.1 step 3 - implicit */
/* 10.2.1.5.1 step 4 */
unsigned int len = 0U;
while (len < outlen) {
unsigned int blocklen = outlen - len;
uint8_t output_block[TC_AES_BLOCK_SIZE];
/* 10.2.1.5.1 step 4.1 */
arrInc(ctx->V, sizeof ctx->V);
/* 10.2.1.5.1 step 4.2 */
(void)tc_aes_encrypt(output_block, ctx->V, &ctx->key);
/* 10.2.1.5.1 step 4.3/step 5 */
if (blocklen > TC_AES_BLOCK_SIZE) {
blocklen = TC_AES_BLOCK_SIZE;
}
memcpy(&(out[len]), output_block, blocklen);
len += blocklen;
}
/* 10.2.1.5.1 step 6 */
tc_ctr_prng_update(ctx, additional_input_buf);
/* 10.2.1.5.1 step 7 */
ctx->reseedCount++;
/* 10.2.1.5.1 step 8 */
result = TC_CRYPTO_SUCCESS;
}
}
return result;
}
void tc_ctr_prng_uninstantiate(TCCtrPrng_t * const ctx)
{
if (0 != ctx) {
memset(ctx->key.words, 0x00, sizeof ctx->key.words);
memset(ctx->V, 0x00, sizeof ctx->V);
ctx->reseedCount = 0U;
}
}

View file

@ -0,0 +1,942 @@
/* ecc.c - TinyCrypt implementation of common ECC functions */
/*
* Copyright (c) 2014, Kenneth MacKay
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <tinycrypt/ecc.h>
#include <tinycrypt/ecc_platform_specific.h>
#include <string.h>
/* IMPORTANT: Make sure a cryptographically-secure PRNG is set and the platform
* has access to enough entropy in order to feed the PRNG regularly. */
#if default_RNG_defined
static uECC_RNG_Function g_rng_function = &default_CSPRNG;
#else
static uECC_RNG_Function g_rng_function = 0;
#endif
void uECC_set_rng(uECC_RNG_Function rng_function)
{
g_rng_function = rng_function;
}
uECC_RNG_Function uECC_get_rng(void)
{
return g_rng_function;
}
int uECC_curve_private_key_size(uECC_Curve curve)
{
return BITS_TO_BYTES(curve->num_n_bits);
}
int uECC_curve_public_key_size(uECC_Curve curve)
{
return 2 * curve->num_bytes;
}
void uECC_vli_clear(uECC_word_t *vli, wordcount_t num_words)
{
wordcount_t i;
for (i = 0; i < num_words; ++i) {
vli[i] = 0;
}
}
uECC_word_t uECC_vli_isZero(const uECC_word_t *vli, wordcount_t num_words)
{
uECC_word_t bits = 0;
wordcount_t i;
for (i = 0; i < num_words; ++i) {
bits |= vli[i];
}
return (bits == 0);
}
uECC_word_t uECC_vli_testBit(const uECC_word_t *vli, bitcount_t bit)
{
return (vli[bit >> uECC_WORD_BITS_SHIFT] &
((uECC_word_t)1 << (bit & uECC_WORD_BITS_MASK)));
}
/* Counts the number of words in vli. */
static wordcount_t vli_numDigits(const uECC_word_t *vli,
const wordcount_t max_words)
{
wordcount_t i;
/* Search from the end until we find a non-zero digit. We do it in reverse
* because we expect that most digits will be nonzero. */
for (i = max_words - 1; i >= 0 && vli[i] == 0; --i) {
}
return (i + 1);
}
bitcount_t uECC_vli_numBits(const uECC_word_t *vli,
const wordcount_t max_words)
{
uECC_word_t i;
uECC_word_t digit;
wordcount_t num_digits = vli_numDigits(vli, max_words);
if (num_digits == 0) {
return 0;
}
digit = vli[num_digits - 1];
for (i = 0; digit; ++i) {
digit >>= 1;
}
return (((bitcount_t)(num_digits - 1) << uECC_WORD_BITS_SHIFT) + i);
}
void uECC_vli_set(uECC_word_t *dest, const uECC_word_t *src,
wordcount_t num_words)
{
wordcount_t i;
for (i = 0; i < num_words; ++i) {
dest[i] = src[i];
}
}
cmpresult_t uECC_vli_cmp_unsafe(const uECC_word_t *left,
const uECC_word_t *right,
wordcount_t num_words)
{
wordcount_t i;
for (i = num_words - 1; i >= 0; --i) {
if (left[i] > right[i]) {
return 1;
} else if (left[i] < right[i]) {
return -1;
}
}
return 0;
}
uECC_word_t uECC_vli_equal(const uECC_word_t *left, const uECC_word_t *right,
wordcount_t num_words)
{
uECC_word_t diff = 0;
wordcount_t i;
for (i = num_words - 1; i >= 0; --i) {
diff |= (left[i] ^ right[i]);
}
return !(diff == 0);
}
uECC_word_t cond_set(uECC_word_t p_true, uECC_word_t p_false, unsigned int cond)
{
return (p_true*(cond)) | (p_false*(!cond));
}
/* Computes result = left - right, returning borrow, in constant time.
* Can modify in place. */
uECC_word_t uECC_vli_sub(uECC_word_t *result, const uECC_word_t *left,
const uECC_word_t *right, wordcount_t num_words)
{
uECC_word_t borrow = 0;
wordcount_t i;
for (i = 0; i < num_words; ++i) {
uECC_word_t diff = left[i] - right[i] - borrow;
uECC_word_t val = (diff > left[i]);
borrow = cond_set(val, borrow, (diff != left[i]));
result[i] = diff;
}
return borrow;
}
/* Computes result = left + right, returning carry, in constant time.
* Can modify in place. */
static uECC_word_t uECC_vli_add(uECC_word_t *result, const uECC_word_t *left,
const uECC_word_t *right, wordcount_t num_words)
{
uECC_word_t carry = 0;
wordcount_t i;
for (i = 0; i < num_words; ++i) {
uECC_word_t sum = left[i] + right[i] + carry;
uECC_word_t val = (sum < left[i]);
carry = cond_set(val, carry, (sum != left[i]));
result[i] = sum;
}
return carry;
}
cmpresult_t uECC_vli_cmp(const uECC_word_t *left, const uECC_word_t *right,
wordcount_t num_words)
{
uECC_word_t tmp[NUM_ECC_WORDS];
uECC_word_t neg = !!uECC_vli_sub(tmp, left, right, num_words);
uECC_word_t equal = uECC_vli_isZero(tmp, num_words);
return (!equal - 2 * neg);
}
/* Computes vli = vli >> 1. */
static void uECC_vli_rshift1(uECC_word_t *vli, wordcount_t num_words)
{
uECC_word_t *end = vli;
uECC_word_t carry = 0;
vli += num_words;
while (vli-- > end) {
uECC_word_t temp = *vli;
*vli = (temp >> 1) | carry;
carry = temp << (uECC_WORD_BITS - 1);
}
}
static void muladd(uECC_word_t a, uECC_word_t b, uECC_word_t *r0,
uECC_word_t *r1, uECC_word_t *r2)
{
uECC_dword_t p = (uECC_dword_t)a * b;
uECC_dword_t r01 = ((uECC_dword_t)(*r1) << uECC_WORD_BITS) | *r0;
r01 += p;
*r2 += (r01 < p);
*r1 = r01 >> uECC_WORD_BITS;
*r0 = (uECC_word_t)r01;
}
/* Computes result = left * right. Result must be 2 * num_words long. */
static void uECC_vli_mult(uECC_word_t *result, const uECC_word_t *left,
const uECC_word_t *right, wordcount_t num_words)
{
uECC_word_t r0 = 0;
uECC_word_t r1 = 0;
uECC_word_t r2 = 0;
wordcount_t i, k;
/* Compute each digit of result in sequence, maintaining the carries. */
for (k = 0; k < num_words; ++k) {
for (i = 0; i <= k; ++i) {
muladd(left[i], right[k - i], &r0, &r1, &r2);
}
result[k] = r0;
r0 = r1;
r1 = r2;
r2 = 0;
}
for (k = num_words; k < num_words * 2 - 1; ++k) {
for (i = (k + 1) - num_words; i < num_words; ++i) {
muladd(left[i], right[k - i], &r0, &r1, &r2);
}
result[k] = r0;
r0 = r1;
r1 = r2;
r2 = 0;
}
result[num_words * 2 - 1] = r0;
}
void uECC_vli_modAdd(uECC_word_t *result, const uECC_word_t *left,
const uECC_word_t *right, const uECC_word_t *mod,
wordcount_t num_words)
{
uECC_word_t carry = uECC_vli_add(result, left, right, num_words);
if (carry || uECC_vli_cmp_unsafe(mod, result, num_words) != 1) {
/* result > mod (result = mod + remainder), so subtract mod to get
* remainder. */
uECC_vli_sub(result, result, mod, num_words);
}
}
void uECC_vli_modSub(uECC_word_t *result, const uECC_word_t *left,
const uECC_word_t *right, const uECC_word_t *mod,
wordcount_t num_words)
{
uECC_word_t l_borrow = uECC_vli_sub(result, left, right, num_words);
if (l_borrow) {
/* In this case, result == -diff == (max int) - diff. Since -x % d == d - x,
* we can get the correct result from result + mod (with overflow). */
uECC_vli_add(result, result, mod, num_words);
}
}
/* Computes result = product % mod, where product is 2N words long. */
/* Currently only designed to work for curve_p or curve_n. */
void uECC_vli_mmod(uECC_word_t *result, uECC_word_t *product,
const uECC_word_t *mod, wordcount_t num_words)
{
uECC_word_t mod_multiple[2 * NUM_ECC_WORDS];
uECC_word_t tmp[2 * NUM_ECC_WORDS];
uECC_word_t *v[2] = {tmp, product};
uECC_word_t index;
/* Shift mod so its highest set bit is at the maximum position. */
bitcount_t shift = (num_words * 2 * uECC_WORD_BITS) -
uECC_vli_numBits(mod, num_words);
wordcount_t word_shift = shift / uECC_WORD_BITS;
wordcount_t bit_shift = shift % uECC_WORD_BITS;
uECC_word_t carry = 0;
uECC_vli_clear(mod_multiple, word_shift);
if (bit_shift > 0) {
for(index = 0; index < (uECC_word_t)num_words; ++index) {
mod_multiple[word_shift + index] = (mod[index] << bit_shift) | carry;
carry = mod[index] >> (uECC_WORD_BITS - bit_shift);
}
} else {
uECC_vli_set(mod_multiple + word_shift, mod, num_words);
}
for (index = 1; shift >= 0; --shift) {
uECC_word_t borrow = 0;
wordcount_t i;
for (i = 0; i < num_words * 2; ++i) {
uECC_word_t diff = v[index][i] - mod_multiple[i] - borrow;
if (diff != v[index][i]) {
borrow = (diff > v[index][i]);
}
v[1 - index][i] = diff;
}
/* Swap the index if there was no borrow */
index = !(index ^ borrow);
uECC_vli_rshift1(mod_multiple, num_words);
mod_multiple[num_words - 1] |= mod_multiple[num_words] <<
(uECC_WORD_BITS - 1);
uECC_vli_rshift1(mod_multiple + num_words, num_words);
}
uECC_vli_set(result, v[index], num_words);
}
void uECC_vli_modMult(uECC_word_t *result, const uECC_word_t *left,
const uECC_word_t *right, const uECC_word_t *mod,
wordcount_t num_words)
{
uECC_word_t product[2 * NUM_ECC_WORDS];
uECC_vli_mult(product, left, right, num_words);
uECC_vli_mmod(result, product, mod, num_words);
}
void uECC_vli_modMult_fast(uECC_word_t *result, const uECC_word_t *left,
const uECC_word_t *right, uECC_Curve curve)
{
uECC_word_t product[2 * NUM_ECC_WORDS];
uECC_vli_mult(product, left, right, curve->num_words);
curve->mmod_fast(result, product);
}
static void uECC_vli_modSquare_fast(uECC_word_t *result,
const uECC_word_t *left,
uECC_Curve curve)
{
uECC_vli_modMult_fast(result, left, left, curve);
}
#define EVEN(vli) (!(vli[0] & 1))
static void vli_modInv_update(uECC_word_t *uv,
const uECC_word_t *mod,
wordcount_t num_words)
{
uECC_word_t carry = 0;
if (!EVEN(uv)) {
carry = uECC_vli_add(uv, uv, mod, num_words);
}
uECC_vli_rshift1(uv, num_words);
if (carry) {
uv[num_words - 1] |= HIGH_BIT_SET;
}
}
void uECC_vli_modInv(uECC_word_t *result, const uECC_word_t *input,
const uECC_word_t *mod, wordcount_t num_words)
{
uECC_word_t a[NUM_ECC_WORDS], b[NUM_ECC_WORDS];
uECC_word_t u[NUM_ECC_WORDS], v[NUM_ECC_WORDS];
cmpresult_t cmpResult;
if (uECC_vli_isZero(input, num_words)) {
uECC_vli_clear(result, num_words);
return;
}
uECC_vli_set(a, input, num_words);
uECC_vli_set(b, mod, num_words);
uECC_vli_clear(u, num_words);
u[0] = 1;
uECC_vli_clear(v, num_words);
while ((cmpResult = uECC_vli_cmp_unsafe(a, b, num_words)) != 0) {
if (EVEN(a)) {
uECC_vli_rshift1(a, num_words);
vli_modInv_update(u, mod, num_words);
} else if (EVEN(b)) {
uECC_vli_rshift1(b, num_words);
vli_modInv_update(v, mod, num_words);
} else if (cmpResult > 0) {
uECC_vli_sub(a, a, b, num_words);
uECC_vli_rshift1(a, num_words);
if (uECC_vli_cmp_unsafe(u, v, num_words) < 0) {
uECC_vli_add(u, u, mod, num_words);
}
uECC_vli_sub(u, u, v, num_words);
vli_modInv_update(u, mod, num_words);
} else {
uECC_vli_sub(b, b, a, num_words);
uECC_vli_rshift1(b, num_words);
if (uECC_vli_cmp_unsafe(v, u, num_words) < 0) {
uECC_vli_add(v, v, mod, num_words);
}
uECC_vli_sub(v, v, u, num_words);
vli_modInv_update(v, mod, num_words);
}
}
uECC_vli_set(result, u, num_words);
}
/* ------ Point operations ------ */
void double_jacobian_default(uECC_word_t * X1, uECC_word_t * Y1,
uECC_word_t * Z1, uECC_Curve curve)
{
/* t1 = X, t2 = Y, t3 = Z */
uECC_word_t t4[NUM_ECC_WORDS];
uECC_word_t t5[NUM_ECC_WORDS];
wordcount_t num_words = curve->num_words;
if (uECC_vli_isZero(Z1, num_words)) {
return;
}
uECC_vli_modSquare_fast(t4, Y1, curve); /* t4 = y1^2 */
uECC_vli_modMult_fast(t5, X1, t4, curve); /* t5 = x1*y1^2 = A */
uECC_vli_modSquare_fast(t4, t4, curve); /* t4 = y1^4 */
uECC_vli_modMult_fast(Y1, Y1, Z1, curve); /* t2 = y1*z1 = z3 */
uECC_vli_modSquare_fast(Z1, Z1, curve); /* t3 = z1^2 */
uECC_vli_modAdd(X1, X1, Z1, curve->p, num_words); /* t1 = x1 + z1^2 */
uECC_vli_modAdd(Z1, Z1, Z1, curve->p, num_words); /* t3 = 2*z1^2 */
uECC_vli_modSub(Z1, X1, Z1, curve->p, num_words); /* t3 = x1 - z1^2 */
uECC_vli_modMult_fast(X1, X1, Z1, curve); /* t1 = x1^2 - z1^4 */
uECC_vli_modAdd(Z1, X1, X1, curve->p, num_words); /* t3 = 2*(x1^2 - z1^4) */
uECC_vli_modAdd(X1, X1, Z1, curve->p, num_words); /* t1 = 3*(x1^2 - z1^4) */
if (uECC_vli_testBit(X1, 0)) {
uECC_word_t l_carry = uECC_vli_add(X1, X1, curve->p, num_words);
uECC_vli_rshift1(X1, num_words);
X1[num_words - 1] |= l_carry << (uECC_WORD_BITS - 1);
} else {
uECC_vli_rshift1(X1, num_words);
}
/* t1 = 3/2*(x1^2 - z1^4) = B */
uECC_vli_modSquare_fast(Z1, X1, curve); /* t3 = B^2 */
uECC_vli_modSub(Z1, Z1, t5, curve->p, num_words); /* t3 = B^2 - A */
uECC_vli_modSub(Z1, Z1, t5, curve->p, num_words); /* t3 = B^2 - 2A = x3 */
uECC_vli_modSub(t5, t5, Z1, curve->p, num_words); /* t5 = A - x3 */
uECC_vli_modMult_fast(X1, X1, t5, curve); /* t1 = B * (A - x3) */
/* t4 = B * (A - x3) - y1^4 = y3: */
uECC_vli_modSub(t4, X1, t4, curve->p, num_words);
uECC_vli_set(X1, Z1, num_words);
uECC_vli_set(Z1, Y1, num_words);
uECC_vli_set(Y1, t4, num_words);
}
void x_side_default(uECC_word_t *result,
const uECC_word_t *x,
uECC_Curve curve)
{
uECC_word_t _3[NUM_ECC_WORDS] = {3}; /* -a = 3 */
wordcount_t num_words = curve->num_words;
uECC_vli_modSquare_fast(result, x, curve); /* r = x^2 */
uECC_vli_modSub(result, result, _3, curve->p, num_words); /* r = x^2 - 3 */
uECC_vli_modMult_fast(result, result, x, curve); /* r = x^3 - 3x */
/* r = x^3 - 3x + b: */
uECC_vli_modAdd(result, result, curve->b, curve->p, num_words);
}
uECC_Curve uECC_secp256r1(void)
{
return &curve_secp256r1;
}
void vli_mmod_fast_secp256r1(unsigned int *result, unsigned int*product)
{
unsigned int tmp[NUM_ECC_WORDS];
int carry;
/* t */
uECC_vli_set(result, product, NUM_ECC_WORDS);
/* s1 */
tmp[0] = tmp[1] = tmp[2] = 0;
tmp[3] = product[11];
tmp[4] = product[12];
tmp[5] = product[13];
tmp[6] = product[14];
tmp[7] = product[15];
carry = uECC_vli_add(tmp, tmp, tmp, NUM_ECC_WORDS);
carry += uECC_vli_add(result, result, tmp, NUM_ECC_WORDS);
/* s2 */
tmp[3] = product[12];
tmp[4] = product[13];
tmp[5] = product[14];
tmp[6] = product[15];
tmp[7] = 0;
carry += uECC_vli_add(tmp, tmp, tmp, NUM_ECC_WORDS);
carry += uECC_vli_add(result, result, tmp, NUM_ECC_WORDS);
/* s3 */
tmp[0] = product[8];
tmp[1] = product[9];
tmp[2] = product[10];
tmp[3] = tmp[4] = tmp[5] = 0;
tmp[6] = product[14];
tmp[7] = product[15];
carry += uECC_vli_add(result, result, tmp, NUM_ECC_WORDS);
/* s4 */
tmp[0] = product[9];
tmp[1] = product[10];
tmp[2] = product[11];
tmp[3] = product[13];
tmp[4] = product[14];
tmp[5] = product[15];
tmp[6] = product[13];
tmp[7] = product[8];
carry += uECC_vli_add(result, result, tmp, NUM_ECC_WORDS);
/* d1 */
tmp[0] = product[11];
tmp[1] = product[12];
tmp[2] = product[13];
tmp[3] = tmp[4] = tmp[5] = 0;
tmp[6] = product[8];
tmp[7] = product[10];
carry -= uECC_vli_sub(result, result, tmp, NUM_ECC_WORDS);
/* d2 */
tmp[0] = product[12];
tmp[1] = product[13];
tmp[2] = product[14];
tmp[3] = product[15];
tmp[4] = tmp[5] = 0;
tmp[6] = product[9];
tmp[7] = product[11];
carry -= uECC_vli_sub(result, result, tmp, NUM_ECC_WORDS);
/* d3 */
tmp[0] = product[13];
tmp[1] = product[14];
tmp[2] = product[15];
tmp[3] = product[8];
tmp[4] = product[9];
tmp[5] = product[10];
tmp[6] = 0;
tmp[7] = product[12];
carry -= uECC_vli_sub(result, result, tmp, NUM_ECC_WORDS);
/* d4 */
tmp[0] = product[14];
tmp[1] = product[15];
tmp[2] = 0;
tmp[3] = product[9];
tmp[4] = product[10];
tmp[5] = product[11];
tmp[6] = 0;
tmp[7] = product[13];
carry -= uECC_vli_sub(result, result, tmp, NUM_ECC_WORDS);
if (carry < 0) {
do {
carry += uECC_vli_add(result, result, curve_secp256r1.p, NUM_ECC_WORDS);
}
while (carry < 0);
} else {
while (carry ||
uECC_vli_cmp_unsafe(curve_secp256r1.p, result, NUM_ECC_WORDS) != 1) {
carry -= uECC_vli_sub(result, result, curve_secp256r1.p, NUM_ECC_WORDS);
}
}
}
uECC_word_t EccPoint_isZero(const uECC_word_t *point, uECC_Curve curve)
{
return uECC_vli_isZero(point, curve->num_words * 2);
}
void apply_z(uECC_word_t * X1, uECC_word_t * Y1, const uECC_word_t * const Z,
uECC_Curve curve)
{
uECC_word_t t1[NUM_ECC_WORDS];
uECC_vli_modSquare_fast(t1, Z, curve); /* z^2 */
uECC_vli_modMult_fast(X1, X1, t1, curve); /* x1 * z^2 */
uECC_vli_modMult_fast(t1, t1, Z, curve); /* z^3 */
uECC_vli_modMult_fast(Y1, Y1, t1, curve); /* y1 * z^3 */
}
/* P = (x1, y1) => 2P, (x2, y2) => P' */
static void XYcZ_initial_double(uECC_word_t * X1, uECC_word_t * Y1,
uECC_word_t * X2, uECC_word_t * Y2,
const uECC_word_t * const initial_Z,
uECC_Curve curve)
{
uECC_word_t z[NUM_ECC_WORDS];
wordcount_t num_words = curve->num_words;
if (initial_Z) {
uECC_vli_set(z, initial_Z, num_words);
} else {
uECC_vli_clear(z, num_words);
z[0] = 1;
}
uECC_vli_set(X2, X1, num_words);
uECC_vli_set(Y2, Y1, num_words);
apply_z(X1, Y1, z, curve);
curve->double_jacobian(X1, Y1, z, curve);
apply_z(X2, Y2, z, curve);
}
void XYcZ_add(uECC_word_t * X1, uECC_word_t * Y1,
uECC_word_t * X2, uECC_word_t * Y2,
uECC_Curve curve)
{
/* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
uECC_word_t t5[NUM_ECC_WORDS];
wordcount_t num_words = curve->num_words;
uECC_vli_modSub(t5, X2, X1, curve->p, num_words); /* t5 = x2 - x1 */
uECC_vli_modSquare_fast(t5, t5, curve); /* t5 = (x2 - x1)^2 = A */
uECC_vli_modMult_fast(X1, X1, t5, curve); /* t1 = x1*A = B */
uECC_vli_modMult_fast(X2, X2, t5, curve); /* t3 = x2*A = C */
uECC_vli_modSub(Y2, Y2, Y1, curve->p, num_words); /* t4 = y2 - y1 */
uECC_vli_modSquare_fast(t5, Y2, curve); /* t5 = (y2 - y1)^2 = D */
uECC_vli_modSub(t5, t5, X1, curve->p, num_words); /* t5 = D - B */
uECC_vli_modSub(t5, t5, X2, curve->p, num_words); /* t5 = D - B - C = x3 */
uECC_vli_modSub(X2, X2, X1, curve->p, num_words); /* t3 = C - B */
uECC_vli_modMult_fast(Y1, Y1, X2, curve); /* t2 = y1*(C - B) */
uECC_vli_modSub(X2, X1, t5, curve->p, num_words); /* t3 = B - x3 */
uECC_vli_modMult_fast(Y2, Y2, X2, curve); /* t4 = (y2 - y1)*(B - x3) */
uECC_vli_modSub(Y2, Y2, Y1, curve->p, num_words); /* t4 = y3 */
uECC_vli_set(X2, t5, num_words);
}
/* Input P = (x1, y1, Z), Q = (x2, y2, Z)
Output P + Q = (x3, y3, Z3), P - Q = (x3', y3', Z3)
or P => P - Q, Q => P + Q
*/
static void XYcZ_addC(uECC_word_t * X1, uECC_word_t * Y1,
uECC_word_t * X2, uECC_word_t * Y2,
uECC_Curve curve)
{
/* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
uECC_word_t t5[NUM_ECC_WORDS];
uECC_word_t t6[NUM_ECC_WORDS];
uECC_word_t t7[NUM_ECC_WORDS];
wordcount_t num_words = curve->num_words;
uECC_vli_modSub(t5, X2, X1, curve->p, num_words); /* t5 = x2 - x1 */
uECC_vli_modSquare_fast(t5, t5, curve); /* t5 = (x2 - x1)^2 = A */
uECC_vli_modMult_fast(X1, X1, t5, curve); /* t1 = x1*A = B */
uECC_vli_modMult_fast(X2, X2, t5, curve); /* t3 = x2*A = C */
uECC_vli_modAdd(t5, Y2, Y1, curve->p, num_words); /* t5 = y2 + y1 */
uECC_vli_modSub(Y2, Y2, Y1, curve->p, num_words); /* t4 = y2 - y1 */
uECC_vli_modSub(t6, X2, X1, curve->p, num_words); /* t6 = C - B */
uECC_vli_modMult_fast(Y1, Y1, t6, curve); /* t2 = y1 * (C - B) = E */
uECC_vli_modAdd(t6, X1, X2, curve->p, num_words); /* t6 = B + C */
uECC_vli_modSquare_fast(X2, Y2, curve); /* t3 = (y2 - y1)^2 = D */
uECC_vli_modSub(X2, X2, t6, curve->p, num_words); /* t3 = D - (B + C) = x3 */
uECC_vli_modSub(t7, X1, X2, curve->p, num_words); /* t7 = B - x3 */
uECC_vli_modMult_fast(Y2, Y2, t7, curve); /* t4 = (y2 - y1)*(B - x3) */
/* t4 = (y2 - y1)*(B - x3) - E = y3: */
uECC_vli_modSub(Y2, Y2, Y1, curve->p, num_words);
uECC_vli_modSquare_fast(t7, t5, curve); /* t7 = (y2 + y1)^2 = F */
uECC_vli_modSub(t7, t7, t6, curve->p, num_words); /* t7 = F - (B + C) = x3' */
uECC_vli_modSub(t6, t7, X1, curve->p, num_words); /* t6 = x3' - B */
uECC_vli_modMult_fast(t6, t6, t5, curve); /* t6 = (y2+y1)*(x3' - B) */
/* t2 = (y2+y1)*(x3' - B) - E = y3': */
uECC_vli_modSub(Y1, t6, Y1, curve->p, num_words);
uECC_vli_set(X1, t7, num_words);
}
void EccPoint_mult(uECC_word_t * result, const uECC_word_t * point,
const uECC_word_t * scalar,
const uECC_word_t * initial_Z,
bitcount_t num_bits, uECC_Curve curve)
{
/* R0 and R1 */
uECC_word_t Rx[2][NUM_ECC_WORDS];
uECC_word_t Ry[2][NUM_ECC_WORDS];
uECC_word_t z[NUM_ECC_WORDS];
bitcount_t i;
uECC_word_t nb;
wordcount_t num_words = curve->num_words;
uECC_vli_set(Rx[1], point, num_words);
uECC_vli_set(Ry[1], point + num_words, num_words);
XYcZ_initial_double(Rx[1], Ry[1], Rx[0], Ry[0], initial_Z, curve);
for (i = num_bits - 2; i > 0; --i) {
nb = !uECC_vli_testBit(scalar, i);
XYcZ_addC(Rx[1 - nb], Ry[1 - nb], Rx[nb], Ry[nb], curve);
XYcZ_add(Rx[nb], Ry[nb], Rx[1 - nb], Ry[1 - nb], curve);
}
nb = !uECC_vli_testBit(scalar, 0);
XYcZ_addC(Rx[1 - nb], Ry[1 - nb], Rx[nb], Ry[nb], curve);
/* Find final 1/Z value. */
uECC_vli_modSub(z, Rx[1], Rx[0], curve->p, num_words); /* X1 - X0 */
uECC_vli_modMult_fast(z, z, Ry[1 - nb], curve); /* Yb * (X1 - X0) */
uECC_vli_modMult_fast(z, z, point, curve); /* xP * Yb * (X1 - X0) */
uECC_vli_modInv(z, z, curve->p, num_words); /* 1 / (xP * Yb * (X1 - X0))*/
/* yP / (xP * Yb * (X1 - X0)) */
uECC_vli_modMult_fast(z, z, point + num_words, curve);
/* Xb * yP / (xP * Yb * (X1 - X0)) */
uECC_vli_modMult_fast(z, z, Rx[1 - nb], curve);
/* End 1/Z calculation */
XYcZ_add(Rx[nb], Ry[nb], Rx[1 - nb], Ry[1 - nb], curve);
apply_z(Rx[0], Ry[0], z, curve);
uECC_vli_set(result, Rx[0], num_words);
uECC_vli_set(result + num_words, Ry[0], num_words);
}
uECC_word_t regularize_k(const uECC_word_t * const k, uECC_word_t *k0,
uECC_word_t *k1, uECC_Curve curve)
{
wordcount_t num_n_words = BITS_TO_WORDS(curve->num_n_bits);
bitcount_t num_n_bits = curve->num_n_bits;
uECC_word_t carry = uECC_vli_add(k0, k, curve->n, num_n_words) ||
(num_n_bits < ((bitcount_t)num_n_words * uECC_WORD_SIZE * 8) &&
uECC_vli_testBit(k0, num_n_bits));
uECC_vli_add(k1, k0, curve->n, num_n_words);
return carry;
}
uECC_word_t EccPoint_compute_public_key(uECC_word_t *result,
uECC_word_t *private_key,
uECC_Curve curve)
{
uECC_word_t tmp1[NUM_ECC_WORDS];
uECC_word_t tmp2[NUM_ECC_WORDS];
uECC_word_t *p2[2] = {tmp1, tmp2};
uECC_word_t carry;
/* Regularize the bitcount for the private key so that attackers cannot
* use a side channel attack to learn the number of leading zeros. */
carry = regularize_k(private_key, tmp1, tmp2, curve);
EccPoint_mult(result, curve->G, p2[!carry], 0, curve->num_n_bits + 1, curve);
if (EccPoint_isZero(result, curve)) {
return 0;
}
return 1;
}
/* Converts an integer in uECC native format to big-endian bytes. */
void uECC_vli_nativeToBytes(uint8_t *bytes, int num_bytes,
const unsigned int *native)
{
wordcount_t i;
for (i = 0; i < num_bytes; ++i) {
unsigned b = num_bytes - 1 - i;
bytes[i] = native[b / uECC_WORD_SIZE] >> (8 * (b % uECC_WORD_SIZE));
}
}
/* Converts big-endian bytes to an integer in uECC native format. */
void uECC_vli_bytesToNative(unsigned int *native, const uint8_t *bytes,
int num_bytes)
{
wordcount_t i;
uECC_vli_clear(native, (num_bytes + (uECC_WORD_SIZE - 1)) / uECC_WORD_SIZE);
for (i = 0; i < num_bytes; ++i) {
unsigned b = num_bytes - 1 - i;
native[b / uECC_WORD_SIZE] |=
(uECC_word_t)bytes[i] << (8 * (b % uECC_WORD_SIZE));
}
}
int uECC_generate_random_int(uECC_word_t *random, const uECC_word_t *top,
wordcount_t num_words)
{
uECC_word_t mask = (uECC_word_t)-1;
uECC_word_t tries;
bitcount_t num_bits = uECC_vli_numBits(top, num_words);
if (!g_rng_function) {
return 0;
}
for (tries = 0; tries < uECC_RNG_MAX_TRIES; ++tries) {
if (!g_rng_function((uint8_t *)random, num_words * uECC_WORD_SIZE)) {
return 0;
}
random[num_words - 1] &=
mask >> ((bitcount_t)(num_words * uECC_WORD_SIZE * 8 - num_bits));
if (!uECC_vli_isZero(random, num_words) &&
uECC_vli_cmp(top, random, num_words) == 1) {
return 1;
}
}
return 0;
}
int uECC_valid_point(const uECC_word_t *point, uECC_Curve curve)
{
uECC_word_t tmp1[NUM_ECC_WORDS];
uECC_word_t tmp2[NUM_ECC_WORDS];
wordcount_t num_words = curve->num_words;
/* The point at infinity is invalid. */
if (EccPoint_isZero(point, curve)) {
return -1;
}
/* x and y must be smaller than p. */
if (uECC_vli_cmp_unsafe(curve->p, point, num_words) != 1 ||
uECC_vli_cmp_unsafe(curve->p, point + num_words, num_words) != 1) {
return -2;
}
uECC_vli_modSquare_fast(tmp1, point + num_words, curve);
curve->x_side(tmp2, point, curve); /* tmp2 = x^3 + ax + b */
/* Make sure that y^2 == x^3 + ax + b */
if (uECC_vli_equal(tmp1, tmp2, num_words) != 0)
return -3;
return 0;
}
int uECC_valid_public_key(const uint8_t *public_key, uECC_Curve curve)
{
uECC_word_t _public[NUM_ECC_WORDS * 2];
uECC_vli_bytesToNative(_public, public_key, curve->num_bytes);
uECC_vli_bytesToNative(
_public + curve->num_words,
public_key + curve->num_bytes,
curve->num_bytes);
if (uECC_vli_cmp_unsafe(_public, curve->G, NUM_ECC_WORDS * 2) == 0) {
return -4;
}
return uECC_valid_point(_public, curve);
}
int uECC_compute_public_key(const uint8_t *private_key, uint8_t *public_key,
uECC_Curve curve)
{
uECC_word_t _private[NUM_ECC_WORDS];
uECC_word_t _public[NUM_ECC_WORDS * 2];
uECC_vli_bytesToNative(
_private,
private_key,
BITS_TO_BYTES(curve->num_n_bits));
/* Make sure the private key is in the range [1, n-1]. */
if (uECC_vli_isZero(_private, BITS_TO_WORDS(curve->num_n_bits))) {
return 0;
}
if (uECC_vli_cmp(curve->n, _private, BITS_TO_WORDS(curve->num_n_bits)) != 1) {
return 0;
}
/* Compute public key. */
if (!EccPoint_compute_public_key(_public, _private, curve)) {
return 0;
}
uECC_vli_nativeToBytes(public_key, curve->num_bytes, _public);
uECC_vli_nativeToBytes(
public_key +
curve->num_bytes, curve->num_bytes, _public + curve->num_words);
return 1;
}

View file

@ -0,0 +1,200 @@
/* ec_dh.c - TinyCrypt implementation of EC-DH */
/*
* Copyright (c) 2014, Kenneth MacKay
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <tinycrypt/constants.h>
#include <tinycrypt/ecc.h>
#include <tinycrypt/ecc_dh.h>
#include <string.h>
#if default_RNG_defined
static uECC_RNG_Function g_rng_function = &default_CSPRNG;
#else
static uECC_RNG_Function g_rng_function = 0;
#endif
int uECC_make_key_with_d(uint8_t *public_key, uint8_t *private_key,
unsigned int *d, uECC_Curve curve)
{
uECC_word_t _private[NUM_ECC_WORDS];
uECC_word_t _public[NUM_ECC_WORDS * 2];
/* This function is designed for test purposes-only (such as validating NIST
* test vectors) as it uses a provided value for d instead of generating
* it uniformly at random. */
memcpy (_private, d, NUM_ECC_BYTES);
/* Computing public-key from private: */
if (EccPoint_compute_public_key(_public, _private, curve)) {
/* Converting buffers to correct bit order: */
uECC_vli_nativeToBytes(private_key,
BITS_TO_BYTES(curve->num_n_bits),
_private);
uECC_vli_nativeToBytes(public_key,
curve->num_bytes,
_public);
uECC_vli_nativeToBytes(public_key + curve->num_bytes,
curve->num_bytes,
_public + curve->num_words);
/* erasing temporary buffer used to store secret: */
memset(_private, 0, NUM_ECC_BYTES);
return 1;
}
return 0;
}
int uECC_make_key(uint8_t *public_key, uint8_t *private_key, uECC_Curve curve)
{
uECC_word_t _random[NUM_ECC_WORDS * 2];
uECC_word_t _private[NUM_ECC_WORDS];
uECC_word_t _public[NUM_ECC_WORDS * 2];
uECC_word_t tries;
for (tries = 0; tries < uECC_RNG_MAX_TRIES; ++tries) {
/* Generating _private uniformly at random: */
uECC_RNG_Function rng_function = uECC_get_rng();
if (!rng_function ||
!rng_function((uint8_t *)_random, 2 * NUM_ECC_WORDS*uECC_WORD_SIZE)) {
return 0;
}
/* computing modular reduction of _random (see FIPS 186.4 B.4.1): */
uECC_vli_mmod(_private, _random, curve->n, BITS_TO_WORDS(curve->num_n_bits));
/* Computing public-key from private: */
if (EccPoint_compute_public_key(_public, _private, curve)) {
/* Converting buffers to correct bit order: */
uECC_vli_nativeToBytes(private_key,
BITS_TO_BYTES(curve->num_n_bits),
_private);
uECC_vli_nativeToBytes(public_key,
curve->num_bytes,
_public);
uECC_vli_nativeToBytes(public_key + curve->num_bytes,
curve->num_bytes,
_public + curve->num_words);
/* erasing temporary buffer that stored secret: */
memset(_private, 0, NUM_ECC_BYTES);
return 1;
}
}
return 0;
}
int uECC_shared_secret(const uint8_t *public_key, const uint8_t *private_key,
uint8_t *secret, uECC_Curve curve)
{
uECC_word_t _public[NUM_ECC_WORDS * 2];
uECC_word_t _private[NUM_ECC_WORDS];
uECC_word_t tmp[NUM_ECC_WORDS];
uECC_word_t *p2[2] = {_private, tmp};
uECC_word_t *initial_Z = 0;
uECC_word_t carry;
wordcount_t num_words = curve->num_words;
wordcount_t num_bytes = curve->num_bytes;
int r;
/* Converting buffers to correct bit order: */
uECC_vli_bytesToNative(_private,
private_key,
BITS_TO_BYTES(curve->num_n_bits));
uECC_vli_bytesToNative(_public,
public_key,
num_bytes);
uECC_vli_bytesToNative(_public + num_words,
public_key + num_bytes,
num_bytes);
/* Regularize the bitcount for the private key so that attackers cannot use a
* side channel attack to learn the number of leading zeros. */
carry = regularize_k(_private, _private, tmp, curve);
/* If an RNG function was specified, try to get a random initial Z value to
* improve protection against side-channel attacks. */
if (g_rng_function) {
if (!uECC_generate_random_int(p2[carry], curve->p, num_words)) {
r = 0;
goto clear_and_out;
}
initial_Z = p2[carry];
}
EccPoint_mult(_public, _public, p2[!carry], initial_Z, curve->num_n_bits + 1,
curve);
uECC_vli_nativeToBytes(secret, num_bytes, _public);
r = !EccPoint_isZero(_public, curve);
clear_and_out:
/* erasing temporary buffer used to store secret: */
memset(p2, 0, sizeof(p2));
/*__asm volatile("" :: "g"(p2) : "memory");*/
memset(tmp, 0, sizeof(tmp));
/*__asm volatile("" :: "g"(tmp) : "memory");*/
memset(_private, 0, sizeof(_private));
/*__asm volatile("" :: "g"(_private) : "memory");*/
return r;
}

View file

@ -0,0 +1,295 @@
/* ec_dsa.c - TinyCrypt implementation of EC-DSA */
/* Copyright (c) 2014, Kenneth MacKay
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.*/
/*
* Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <tinycrypt/constants.h>
#include <tinycrypt/ecc.h>
#include <tinycrypt/ecc_dsa.h>
#if default_RNG_defined
static uECC_RNG_Function g_rng_function = &default_CSPRNG;
#else
static uECC_RNG_Function g_rng_function = 0;
#endif
static void bits2int(uECC_word_t *native, const uint8_t *bits,
unsigned bits_size, uECC_Curve curve)
{
unsigned num_n_bytes = BITS_TO_BYTES(curve->num_n_bits);
unsigned num_n_words = BITS_TO_WORDS(curve->num_n_bits);
int shift;
uECC_word_t carry;
uECC_word_t *ptr;
if (bits_size > num_n_bytes) {
bits_size = num_n_bytes;
}
uECC_vli_clear(native, num_n_words);
uECC_vli_bytesToNative(native, bits, bits_size);
if (bits_size * 8 <= (unsigned)curve->num_n_bits) {
return;
}
shift = bits_size * 8 - curve->num_n_bits;
carry = 0;
ptr = native + num_n_words;
while (ptr-- > native) {
uECC_word_t temp = *ptr;
*ptr = (temp >> shift) | carry;
carry = temp << (uECC_WORD_BITS - shift);
}
/* Reduce mod curve_n */
if (uECC_vli_cmp_unsafe(curve->n, native, num_n_words) != 1) {
uECC_vli_sub(native, native, curve->n, num_n_words);
}
}
int uECC_sign_with_k(const uint8_t *private_key, const uint8_t *message_hash,
unsigned hash_size, uECC_word_t *k, uint8_t *signature,
uECC_Curve curve)
{
uECC_word_t tmp[NUM_ECC_WORDS];
uECC_word_t s[NUM_ECC_WORDS];
uECC_word_t *k2[2] = {tmp, s};
uECC_word_t p[NUM_ECC_WORDS * 2];
uECC_word_t carry;
wordcount_t num_words = curve->num_words;
wordcount_t num_n_words = BITS_TO_WORDS(curve->num_n_bits);
bitcount_t num_n_bits = curve->num_n_bits;
/* Make sure 0 < k < curve_n */
if (uECC_vli_isZero(k, num_words) ||
uECC_vli_cmp(curve->n, k, num_n_words) != 1) {
return 0;
}
carry = regularize_k(k, tmp, s, curve);
EccPoint_mult(p, curve->G, k2[!carry], 0, num_n_bits + 1, curve);
if (uECC_vli_isZero(p, num_words)) {
return 0;
}
/* If an RNG function was specified, get a random number
to prevent side channel analysis of k. */
if (!g_rng_function) {
uECC_vli_clear(tmp, num_n_words);
tmp[0] = 1;
}
else if (!uECC_generate_random_int(tmp, curve->n, num_n_words)) {
return 0;
}
/* Prevent side channel analysis of uECC_vli_modInv() to determine
bits of k / the private key by premultiplying by a random number */
uECC_vli_modMult(k, k, tmp, curve->n, num_n_words); /* k' = rand * k */
uECC_vli_modInv(k, k, curve->n, num_n_words); /* k = 1 / k' */
uECC_vli_modMult(k, k, tmp, curve->n, num_n_words); /* k = 1 / k */
uECC_vli_nativeToBytes(signature, curve->num_bytes, p); /* store r */
/* tmp = d: */
uECC_vli_bytesToNative(tmp, private_key, BITS_TO_BYTES(curve->num_n_bits));
s[num_n_words - 1] = 0;
uECC_vli_set(s, p, num_words);
uECC_vli_modMult(s, tmp, s, curve->n, num_n_words); /* s = r*d */
bits2int(tmp, message_hash, hash_size, curve);
uECC_vli_modAdd(s, tmp, s, curve->n, num_n_words); /* s = e + r*d */
uECC_vli_modMult(s, s, k, curve->n, num_n_words); /* s = (e + r*d) / k */
if (uECC_vli_numBits(s, num_n_words) > (bitcount_t)curve->num_bytes * 8) {
return 0;
}
uECC_vli_nativeToBytes(signature + curve->num_bytes, curve->num_bytes, s);
return 1;
}
int uECC_sign(const uint8_t *private_key, const uint8_t *message_hash,
unsigned hash_size, uint8_t *signature, uECC_Curve curve)
{
uECC_word_t _random[2*NUM_ECC_WORDS];
uECC_word_t k[NUM_ECC_WORDS];
uECC_word_t tries;
for (tries = 0; tries < uECC_RNG_MAX_TRIES; ++tries) {
/* Generating _random uniformly at random: */
uECC_RNG_Function rng_function = uECC_get_rng();
if (!rng_function ||
!rng_function((uint8_t *)_random, 2*NUM_ECC_WORDS*uECC_WORD_SIZE)) {
return 0;
}
// computing k as modular reduction of _random (see FIPS 186.4 B.5.1):
uECC_vli_mmod(k, _random, curve->n, BITS_TO_WORDS(curve->num_n_bits));
if (uECC_sign_with_k(private_key, message_hash, hash_size, k, signature,
curve)) {
return 1;
}
}
return 0;
}
static bitcount_t smax(bitcount_t a, bitcount_t b)
{
return (a > b ? a : b);
}
int uECC_verify(const uint8_t *public_key, const uint8_t *message_hash,
unsigned hash_size, const uint8_t *signature,
uECC_Curve curve)
{
uECC_word_t u1[NUM_ECC_WORDS], u2[NUM_ECC_WORDS];
uECC_word_t z[NUM_ECC_WORDS];
uECC_word_t sum[NUM_ECC_WORDS * 2];
uECC_word_t rx[NUM_ECC_WORDS];
uECC_word_t ry[NUM_ECC_WORDS];
uECC_word_t tx[NUM_ECC_WORDS];
uECC_word_t ty[NUM_ECC_WORDS];
uECC_word_t tz[NUM_ECC_WORDS];
const uECC_word_t *points[4];
const uECC_word_t *point;
bitcount_t num_bits;
bitcount_t i;
uECC_word_t _public[NUM_ECC_WORDS * 2];
uECC_word_t r[NUM_ECC_WORDS], s[NUM_ECC_WORDS];
wordcount_t num_words = curve->num_words;
wordcount_t num_n_words = BITS_TO_WORDS(curve->num_n_bits);
rx[num_n_words - 1] = 0;
r[num_n_words - 1] = 0;
s[num_n_words - 1] = 0;
uECC_vli_bytesToNative(_public, public_key, curve->num_bytes);
uECC_vli_bytesToNative(_public + num_words, public_key + curve->num_bytes,
curve->num_bytes);
uECC_vli_bytesToNative(r, signature, curve->num_bytes);
uECC_vli_bytesToNative(s, signature + curve->num_bytes, curve->num_bytes);
/* r, s must not be 0. */
if (uECC_vli_isZero(r, num_words) || uECC_vli_isZero(s, num_words)) {
return 0;
}
/* r, s must be < n. */
if (uECC_vli_cmp_unsafe(curve->n, r, num_n_words) != 1 ||
uECC_vli_cmp_unsafe(curve->n, s, num_n_words) != 1) {
return 0;
}
/* Calculate u1 and u2. */
uECC_vli_modInv(z, s, curve->n, num_n_words); /* z = 1/s */
u1[num_n_words - 1] = 0;
bits2int(u1, message_hash, hash_size, curve);
uECC_vli_modMult(u1, u1, z, curve->n, num_n_words); /* u1 = e/s */
uECC_vli_modMult(u2, r, z, curve->n, num_n_words); /* u2 = r/s */
/* Calculate sum = G + Q. */
uECC_vli_set(sum, _public, num_words);
uECC_vli_set(sum + num_words, _public + num_words, num_words);
uECC_vli_set(tx, curve->G, num_words);
uECC_vli_set(ty, curve->G + num_words, num_words);
uECC_vli_modSub(z, sum, tx, curve->p, num_words); /* z = x2 - x1 */
XYcZ_add(tx, ty, sum, sum + num_words, curve);
uECC_vli_modInv(z, z, curve->p, num_words); /* z = 1/z */
apply_z(sum, sum + num_words, z, curve);
/* Use Shamir's trick to calculate u1*G + u2*Q */
points[0] = 0;
points[1] = curve->G;
points[2] = _public;
points[3] = sum;
num_bits = smax(uECC_vli_numBits(u1, num_n_words),
uECC_vli_numBits(u2, num_n_words));
point = points[(!!uECC_vli_testBit(u1, num_bits - 1)) |
((!!uECC_vli_testBit(u2, num_bits - 1)) << 1)];
uECC_vli_set(rx, point, num_words);
uECC_vli_set(ry, point + num_words, num_words);
uECC_vli_clear(z, num_words);
z[0] = 1;
for (i = num_bits - 2; i >= 0; --i) {
uECC_word_t index;
curve->double_jacobian(rx, ry, z, curve);
index = (!!uECC_vli_testBit(u1, i)) | ((!!uECC_vli_testBit(u2, i)) << 1);
point = points[index];
if (point) {
uECC_vli_set(tx, point, num_words);
uECC_vli_set(ty, point + num_words, num_words);
apply_z(tx, ty, z, curve);
uECC_vli_modSub(tz, rx, tx, curve->p, num_words); /* Z = x2 - x1 */
XYcZ_add(tx, ty, rx, ry, curve);
uECC_vli_modMult_fast(z, z, tz, curve);
}
}
uECC_vli_modInv(z, z, curve->p, num_words); /* Z = 1/Z */
apply_z(rx, ry, z, curve);
/* v = x1 (mod n) */
if (uECC_vli_cmp_unsafe(curve->n, rx, num_n_words) != 1) {
uECC_vli_sub(rx, rx, curve->n, num_n_words);
}
/* Accept only if v == r. */
return (int)(uECC_vli_equal(rx, r, num_words) == 0);
}

View file

@ -0,0 +1,105 @@
/* uECC_platform_specific.c - Implementation of platform specific functions*/
/* Copyright (c) 2014, Kenneth MacKay
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.*/
/*
* Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* uECC_platform_specific.c -- Implementation of platform specific functions
*/
#if defined(unix) || defined(__linux__) || defined(__unix__) || \
defined(__unix) | (defined(__APPLE__) && defined(__MACH__)) || \
defined(uECC_POSIX)
/* Some POSIX-like system with /dev/urandom or /dev/random. */
#include <sys/types.h>
#include <fcntl.h>
#include <unistd.h>
#include <stdint.h>
#ifndef O_CLOEXEC
#define O_CLOEXEC 0
#endif
int default_CSPRNG(uint8_t *dest, unsigned int size) {
/* input sanity check: */
if (dest == (uint8_t *) 0 || (size <= 0))
return 0;
int fd = open("/dev/urandom", O_RDONLY | O_CLOEXEC);
if (fd == -1) {
fd = open("/dev/random", O_RDONLY | O_CLOEXEC);
if (fd == -1) {
return 0;
}
}
char *ptr = (char *)dest;
size_t left = (size_t) size;
while (left > 0) {
ssize_t bytes_read = read(fd, ptr, left);
if (bytes_read <= 0) { // read failed
close(fd);
return 0;
}
left -= bytes_read;
ptr += bytes_read;
}
close(fd);
return 1;
}
#endif /* platform */

View file

@ -0,0 +1,148 @@
/* hmac.c - TinyCrypt implementation of the HMAC algorithm */
/*
* Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <tinycrypt/hmac.h>
#include <tinycrypt/constants.h>
#include <tinycrypt/utils.h>
static void rekey(uint8_t *key, const uint8_t *new_key, unsigned int key_size)
{
const uint8_t inner_pad = (uint8_t) 0x36;
const uint8_t outer_pad = (uint8_t) 0x5c;
unsigned int i;
for (i = 0; i < key_size; ++i) {
key[i] = inner_pad ^ new_key[i];
key[i + TC_SHA256_BLOCK_SIZE] = outer_pad ^ new_key[i];
}
for (; i < TC_SHA256_BLOCK_SIZE; ++i) {
key[i] = inner_pad; key[i + TC_SHA256_BLOCK_SIZE] = outer_pad;
}
}
int tc_hmac_set_key(TCHmacState_t ctx, const uint8_t *key,
unsigned int key_size)
{
/* input sanity check: */
if (ctx == (TCHmacState_t) 0 ||
key == (const uint8_t *) 0 ||
key_size == 0) {
return TC_CRYPTO_FAIL;
}
const uint8_t dummy_key[key_size];
struct tc_hmac_state_struct dummy_state;
if (key_size <= TC_SHA256_BLOCK_SIZE) {
/*
* The next three lines consist of dummy calls just to avoid
* certain timing attacks. Without these dummy calls,
* adversaries would be able to learn whether the key_size is
* greater than TC_SHA256_BLOCK_SIZE by measuring the time
* consumed in this process.
*/
(void)tc_sha256_init(&dummy_state.hash_state);
(void)tc_sha256_update(&dummy_state.hash_state,
dummy_key,
key_size);
(void)tc_sha256_final(&dummy_state.key[TC_SHA256_DIGEST_SIZE],
&dummy_state.hash_state);
/* Actual code for when key_size <= TC_SHA256_BLOCK_SIZE: */
rekey(ctx->key, key, key_size);
} else {
(void)tc_sha256_init(&ctx->hash_state);
(void)tc_sha256_update(&ctx->hash_state, key, key_size);
(void)tc_sha256_final(&ctx->key[TC_SHA256_DIGEST_SIZE],
&ctx->hash_state);
rekey(ctx->key,
&ctx->key[TC_SHA256_DIGEST_SIZE],
TC_SHA256_DIGEST_SIZE);
}
return TC_CRYPTO_SUCCESS;
}
int tc_hmac_init(TCHmacState_t ctx)
{
/* input sanity check: */
if (ctx == (TCHmacState_t) 0) {
return TC_CRYPTO_FAIL;
}
(void) tc_sha256_init(&ctx->hash_state);
(void) tc_sha256_update(&ctx->hash_state, ctx->key, TC_SHA256_BLOCK_SIZE);
return TC_CRYPTO_SUCCESS;
}
int tc_hmac_update(TCHmacState_t ctx,
const void *data,
unsigned int data_length)
{
/* input sanity check: */
if (ctx == (TCHmacState_t) 0) {
return TC_CRYPTO_FAIL;
}
(void)tc_sha256_update(&ctx->hash_state, data, data_length);
return TC_CRYPTO_SUCCESS;
}
int tc_hmac_final(uint8_t *tag, unsigned int taglen, TCHmacState_t ctx)
{
/* input sanity check: */
if (tag == (uint8_t *) 0 ||
taglen != TC_SHA256_DIGEST_SIZE ||
ctx == (TCHmacState_t) 0) {
return TC_CRYPTO_FAIL;
}
(void) tc_sha256_final(tag, &ctx->hash_state);
(void)tc_sha256_init(&ctx->hash_state);
(void)tc_sha256_update(&ctx->hash_state,
&ctx->key[TC_SHA256_BLOCK_SIZE],
TC_SHA256_BLOCK_SIZE);
(void)tc_sha256_update(&ctx->hash_state, tag, TC_SHA256_DIGEST_SIZE);
(void)tc_sha256_final(tag, &ctx->hash_state);
/* destroy the current state */
_set(ctx, 0, sizeof(*ctx));
return TC_CRYPTO_SUCCESS;
}

View file

@ -0,0 +1,212 @@
/* hmac_prng.c - TinyCrypt implementation of HMAC-PRNG */
/*
* Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <tinycrypt/hmac_prng.h>
#include <tinycrypt/hmac.h>
#include <tinycrypt/constants.h>
#include <tinycrypt/utils.h>
/*
* min bytes in the seed string.
* MIN_SLEN*8 must be at least the expected security level.
*/
static const unsigned int MIN_SLEN = 32;
/*
* max bytes in the seed string;
* SP800-90A specifies a maximum of 2^35 bits (i.e., 2^32 bytes).
*/
static const unsigned int MAX_SLEN = UINT32_MAX;
/*
* max bytes in the personalization string;
* SP800-90A specifies a maximum of 2^35 bits (i.e., 2^32 bytes).
*/
static const unsigned int MAX_PLEN = UINT32_MAX;
/*
* max bytes in the additional_info string;
* SP800-90A specifies a maximum of 2^35 bits (i.e., 2^32 bytes).
*/
static const unsigned int MAX_ALEN = UINT32_MAX;
/*
* max number of generates between re-seeds;
* TinyCrypt accepts up to (2^32 - 1) which is the maximal value of
* a 32-bit unsigned int variable, while SP800-90A specifies a maximum of 2^48.
*/
static const unsigned int MAX_GENS = UINT32_MAX;
/*
* maximum bytes per generate call;
* SP800-90A specifies a maximum up to 2^19.
*/
static const unsigned int MAX_OUT = (1 << 19);
/*
* Assumes: prng != NULL, e != NULL, len >= 0.
*/
static void update(TCHmacPrng_t prng, const uint8_t *e, unsigned int len)
{
const uint8_t separator0 = 0x00;
const uint8_t separator1 = 0x01;
/* use current state, e and separator 0 to compute a new prng key: */
(void)tc_hmac_init(&prng->h);
(void)tc_hmac_update(&prng->h, prng->v, sizeof(prng->v));
(void)tc_hmac_update(&prng->h, &separator0, sizeof(separator0));
(void)tc_hmac_update(&prng->h, e, len);
(void)tc_hmac_final(prng->key, sizeof(prng->key), &prng->h);
/* configure the new prng key into the prng's instance of hmac */
(void)tc_hmac_set_key(&prng->h, prng->key, sizeof(prng->key));
/* use the new key to compute a new state variable v */
(void)tc_hmac_init(&prng->h);
(void)tc_hmac_update(&prng->h, prng->v, sizeof(prng->v));
(void)tc_hmac_final(prng->v, sizeof(prng->v), &prng->h);
/* use current state, e and separator 1 to compute a new prng key: */
(void)tc_hmac_init(&prng->h);
(void)tc_hmac_update(&prng->h, prng->v, sizeof(prng->v));
(void)tc_hmac_update(&prng->h, &separator1, sizeof(separator1));
(void)tc_hmac_update(&prng->h, e, len);
(void)tc_hmac_final(prng->key, sizeof(prng->key), &prng->h);
/* configure the new prng key into the prng's instance of hmac */
(void)tc_hmac_set_key(&prng->h, prng->key, sizeof(prng->key));
/* use the new key to compute a new state variable v */
(void)tc_hmac_init(&prng->h);
(void)tc_hmac_update(&prng->h, prng->v, sizeof(prng->v));
(void)tc_hmac_final(prng->v, sizeof(prng->v), &prng->h);
}
int tc_hmac_prng_init(TCHmacPrng_t prng,
const uint8_t *personalization,
unsigned int plen)
{
/* input sanity check: */
if (prng == (TCHmacPrng_t) 0 ||
personalization == (uint8_t *) 0 ||
plen > MAX_PLEN) {
return TC_CRYPTO_FAIL;
}
/* put the generator into a known state: */
_set(prng->key, 0x00, sizeof(prng->key));
_set(prng->v, 0x01, sizeof(prng->v));
tc_hmac_set_key(&prng->h, prng->key, sizeof(prng->key));
/* update assumes SOME key has been configured into HMAC */
update(prng, personalization, plen);
/* force a reseed before allowing tc_hmac_prng_generate to succeed: */
prng->countdown = 0;
return TC_CRYPTO_SUCCESS;
}
int tc_hmac_prng_reseed(TCHmacPrng_t prng,
const uint8_t *seed,
unsigned int seedlen,
const uint8_t *additional_input,
unsigned int additionallen)
{
/* input sanity check: */
if (prng == (TCHmacPrng_t) 0 ||
seed == (const uint8_t *) 0 ||
seedlen < MIN_SLEN ||
seedlen > MAX_SLEN) {
return TC_CRYPTO_FAIL;
}
if (additional_input != (const uint8_t *) 0) {
/*
* Abort if additional_input is provided but has inappropriate
* length
*/
if (additionallen == 0 ||
additionallen > MAX_ALEN) {
return TC_CRYPTO_FAIL;
} else {
/* call update for the seed and additional_input */
update(prng, seed, seedlen);
update(prng, additional_input, additionallen);
}
} else {
/* call update only for the seed */
update(prng, seed, seedlen);
}
/* ... and enable hmac_prng_generate */
prng->countdown = MAX_GENS;
return TC_CRYPTO_SUCCESS;
}
int tc_hmac_prng_generate(uint8_t *out, unsigned int outlen, TCHmacPrng_t prng)
{
unsigned int bufferlen;
/* input sanity check: */
if (out == (uint8_t *) 0 ||
prng == (TCHmacPrng_t) 0 ||
outlen == 0 ||
outlen > MAX_OUT) {
return TC_CRYPTO_FAIL;
} else if (prng->countdown == 0) {
return TC_HMAC_PRNG_RESEED_REQ;
}
prng->countdown--;
while (outlen != 0) {
/* operate HMAC in OFB mode to create "random" outputs */
(void)tc_hmac_init(&prng->h);
(void)tc_hmac_update(&prng->h, prng->v, sizeof(prng->v));
(void)tc_hmac_final(prng->v, sizeof(prng->v), &prng->h);
bufferlen = (TC_SHA256_DIGEST_SIZE > outlen) ?
outlen : TC_SHA256_DIGEST_SIZE;
(void)_copy(out, bufferlen, prng->v, bufferlen);
out += bufferlen;
outlen = (outlen > TC_SHA256_DIGEST_SIZE) ?
(outlen - TC_SHA256_DIGEST_SIZE) : 0;
}
/* block future PRNG compromises from revealing past state */
update(prng, prng->v, TC_SHA256_DIGEST_SIZE);
return TC_CRYPTO_SUCCESS;
}

View file

@ -0,0 +1,217 @@
/* sha256.c - TinyCrypt SHA-256 crypto hash algorithm implementation */
/*
* Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <tinycrypt/sha256.h>
#include <tinycrypt/constants.h>
#include <tinycrypt/utils.h>
static void compress(unsigned int *iv, const uint8_t *data);
int tc_sha256_init(TCSha256State_t s)
{
/* input sanity check: */
if (s == (TCSha256State_t) 0) {
return TC_CRYPTO_FAIL;
}
/*
* Setting the initial state values.
* These values correspond to the first 32 bits of the fractional parts
* of the square roots of the first 8 primes: 2, 3, 5, 7, 11, 13, 17
* and 19.
*/
_set((uint8_t *) s, 0x00, sizeof(*s));
s->iv[0] = 0x6a09e667;
s->iv[1] = 0xbb67ae85;
s->iv[2] = 0x3c6ef372;
s->iv[3] = 0xa54ff53a;
s->iv[4] = 0x510e527f;
s->iv[5] = 0x9b05688c;
s->iv[6] = 0x1f83d9ab;
s->iv[7] = 0x5be0cd19;
return TC_CRYPTO_SUCCESS;
}
int tc_sha256_update(TCSha256State_t s, const uint8_t *data, size_t datalen)
{
/* input sanity check: */
if (s == (TCSha256State_t) 0 ||
data == (void *) 0) {
return TC_CRYPTO_FAIL;
} else if (datalen == 0) {
return TC_CRYPTO_SUCCESS;
}
while (datalen-- > 0) {
s->leftover[s->leftover_offset++] = *(data++);
if (s->leftover_offset >= TC_SHA256_BLOCK_SIZE) {
compress(s->iv, s->leftover);
s->leftover_offset = 0;
s->bits_hashed += (TC_SHA256_BLOCK_SIZE << 3);
}
}
return TC_CRYPTO_SUCCESS;
}
int tc_sha256_final(uint8_t *digest, TCSha256State_t s)
{
unsigned int i;
/* input sanity check: */
if (digest == (uint8_t *) 0 ||
s == (TCSha256State_t) 0) {
return TC_CRYPTO_FAIL;
}
s->bits_hashed += (s->leftover_offset << 3);
s->leftover[s->leftover_offset++] = 0x80; /* always room for one byte */
if (s->leftover_offset > (sizeof(s->leftover) - 8)) {
/* there is not room for all the padding in this block */
_set(s->leftover + s->leftover_offset, 0x00,
sizeof(s->leftover) - s->leftover_offset);
compress(s->iv, s->leftover);
s->leftover_offset = 0;
}
/* add the padding and the length in big-Endian format */
_set(s->leftover + s->leftover_offset, 0x00,
sizeof(s->leftover) - 8 - s->leftover_offset);
s->leftover[sizeof(s->leftover) - 1] = (uint8_t)(s->bits_hashed);
s->leftover[sizeof(s->leftover) - 2] = (uint8_t)(s->bits_hashed >> 8);
s->leftover[sizeof(s->leftover) - 3] = (uint8_t)(s->bits_hashed >> 16);
s->leftover[sizeof(s->leftover) - 4] = (uint8_t)(s->bits_hashed >> 24);
s->leftover[sizeof(s->leftover) - 5] = (uint8_t)(s->bits_hashed >> 32);
s->leftover[sizeof(s->leftover) - 6] = (uint8_t)(s->bits_hashed >> 40);
s->leftover[sizeof(s->leftover) - 7] = (uint8_t)(s->bits_hashed >> 48);
s->leftover[sizeof(s->leftover) - 8] = (uint8_t)(s->bits_hashed >> 56);
/* hash the padding and length */
compress(s->iv, s->leftover);
/* copy the iv out to digest */
for (i = 0; i < TC_SHA256_STATE_BLOCKS; ++i) {
unsigned int t = *((unsigned int *) &s->iv[i]);
*digest++ = (uint8_t)(t >> 24);
*digest++ = (uint8_t)(t >> 16);
*digest++ = (uint8_t)(t >> 8);
*digest++ = (uint8_t)(t);
}
/* destroy the current state */
_set(s, 0, sizeof(*s));
return TC_CRYPTO_SUCCESS;
}
/*
* Initializing SHA-256 Hash constant words K.
* These values correspond to the first 32 bits of the fractional parts of the
* cube roots of the first 64 primes between 2 and 311.
*/
static const unsigned int k256[64] = {
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1,
0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786,
0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147,
0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b,
0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a,
0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
};
static inline unsigned int ROTR(unsigned int a, unsigned int n)
{
return (((a) >> n) | ((a) << (32 - n)));
}
#define Sigma0(a)(ROTR((a), 2) ^ ROTR((a), 13) ^ ROTR((a), 22))
#define Sigma1(a)(ROTR((a), 6) ^ ROTR((a), 11) ^ ROTR((a), 25))
#define sigma0(a)(ROTR((a), 7) ^ ROTR((a), 18) ^ ((a) >> 3))
#define sigma1(a)(ROTR((a), 17) ^ ROTR((a), 19) ^ ((a) >> 10))
#define Ch(a, b, c)(((a) & (b)) ^ ((~(a)) & (c)))
#define Maj(a, b, c)(((a) & (b)) ^ ((a) & (c)) ^ ((b) & (c)))
static inline unsigned int BigEndian(const uint8_t **c)
{
unsigned int n = 0;
n = (((unsigned int)(*((*c)++))) << 24);
n |= ((unsigned int)(*((*c)++)) << 16);
n |= ((unsigned int)(*((*c)++)) << 8);
n |= ((unsigned int)(*((*c)++)));
return n;
}
static void compress(unsigned int *iv, const uint8_t *data)
{
unsigned int a, b, c, d, e, f, g, h;
unsigned int s0, s1;
unsigned int t1, t2;
unsigned int work_space[16];
unsigned int n;
unsigned int i;
a = iv[0]; b = iv[1]; c = iv[2]; d = iv[3];
e = iv[4]; f = iv[5]; g = iv[6]; h = iv[7];
for (i = 0; i < 16; ++i) {
n = BigEndian(&data);
t1 = work_space[i] = n;
t1 += h + Sigma1(e) + Ch(e, f, g) + k256[i];
t2 = Sigma0(a) + Maj(a, b, c);
h = g; g = f; f = e; e = d + t1;
d = c; c = b; b = a; a = t1 + t2;
}
for ( ; i < 64; ++i) {
s0 = work_space[(i+1)&0x0f];
s0 = sigma0(s0);
s1 = work_space[(i+14)&0x0f];
s1 = sigma1(s1);
t1 = work_space[i&0xf] += s0 + s1 + work_space[(i+9)&0xf];
t1 += h + Sigma1(e) + Ch(e, f, g) + k256[i];
t2 = Sigma0(a) + Maj(a, b, c);
h = g; g = f; f = e; e = d + t1;
d = c; c = b; b = a; a = t1 + t2;
}
iv[0] += a; iv[1] += b; iv[2] += c; iv[3] += d;
iv[4] += e; iv[5] += f; iv[6] += g; iv[7] += h;
}

View file

@ -0,0 +1,74 @@
/* utils.c - TinyCrypt platform-dependent run-time operations */
/*
* Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <tinycrypt/utils.h>
#include <tinycrypt/constants.h>
#include <string.h>
#define MASK_TWENTY_SEVEN 0x1b
unsigned int _copy(uint8_t *to, unsigned int to_len,
const uint8_t *from, unsigned int from_len)
{
if (from_len <= to_len) {
(void)memcpy(to, from, from_len);
return from_len;
} else {
return TC_CRYPTO_FAIL;
}
}
void _set(void *to, uint8_t val, unsigned int len)
{
(void)memset(to, val, len);
}
/*
* Doubles the value of a byte for values up to 127.
*/
uint8_t _double_byte(uint8_t a)
{
return ((a<<1) ^ ((a>>7) * MASK_TWENTY_SEVEN));
}
int _compare(const uint8_t *a, const uint8_t *b, size_t size)
{
const uint8_t *tempa = a;
const uint8_t *tempb = b;
uint8_t result = 0;
for (unsigned int i = 0; i < size; i++) {
result |= tempa[i] ^ tempb[i];
}
return result;
}

View file

@ -0,0 +1,161 @@
/*
* Copyright (C) 2015-2017 Alibaba Group Holding Limited
*/
#include <zephyr.h>
#include <common/log.h>
#include "errno.h"
static struct k_thread work_q_thread;
static BT_STACK_NOINIT(work_q_stack, CONFIG_BT_WORK_QUEUE_STACK_SIZE);
static struct k_work_q g_work_queue_main;
static void k_work_submit_to_queue(struct k_work_q *work_q,
struct k_work *work)
{
if (!atomic_test_and_set_bit(work->flags, K_WORK_STATE_PENDING)) {
k_fifo_put(&work_q->fifo, work);
}
}
static void work_queue_thread(void *p1, void *p2, void *p3)
{
struct k_work *work;
UNUSED(p1);
while (1) {
work = k_fifo_get(&g_work_queue_main.fifo, K_FOREVER);
if (atomic_test_and_clear_bit(work->flags, K_WORK_STATE_PENDING)) {
work->handler(work);
}
k_yield();
}
}
int k_work_q_start(void)
{
k_fifo_init(&g_work_queue_main.fifo);
return k_thread_create(&work_q_thread, work_q_stack,
K_THREAD_STACK_SIZEOF(work_q_stack),
work_queue_thread, NULL, NULL, NULL, CONFIG_BT_WORK_QUEUE_PRIO, 0, K_NO_WAIT);
}
int k_work_init(struct k_work *work, k_work_handler_t handler)
{
ASSERT(work, "work is NULL");
atomic_clear_bit(work->flags, K_WORK_STATE_PENDING);
work->handler = handler;
return 0;
}
void k_work_submit(struct k_work *work)
{
k_work_submit_to_queue(&g_work_queue_main, work);
}
static void work_timeout(void *timer, void *args)
{
struct k_delayed_work *w = (struct k_delayed_work *)args;
/* submit work to workqueue */
k_timer_stop(&w->timer);
k_work_submit_to_queue(w->work_q, &w->work);
/* detach from workqueue, for cancel to return appropriate status */
w->work_q = NULL;
}
void k_delayed_work_init(struct k_delayed_work *work, k_work_handler_t handler)
{
ASSERT(work, "delay work is NULL");
k_work_init(&work->work, handler);
k_timer_init(&work->timer, work_timeout, work);
work->work_q = NULL;
}
static int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
struct k_delayed_work *work,
uint32_t delay)
{
int key = irq_lock();
int err;
/* Work cannot be active in multiple queues */
if (work->work_q && work->work_q != work_q) {
err = -EADDRINUSE;
goto done;
}
/* Cancel if work has been submitted */
if (work->work_q == work_q) {
err = k_delayed_work_cancel(work);
if (err < 0) {
goto done;
}
}
/* Attach workqueue so the timeout callback can submit it */
work->work_q = work_q;
if (!delay) {
/* Submit work if no ticks is 0 */
k_work_submit_to_queue(work_q, &work->work);
work->work_q = NULL;
} else {
/* Add timeout */
k_timer_start(&work->timer, delay);
}
err = 0;
done:
irq_unlock(key);
return err;
}
int k_delayed_work_submit(struct k_delayed_work *work, uint32_t delay)
{
return k_delayed_work_submit_to_queue(&g_work_queue_main, work, delay);
}
int k_delayed_work_cancel(struct k_delayed_work *work)
{
int err = 0;
int key = irq_lock();
if (atomic_test_bit(work->work.flags, K_WORK_STATE_PENDING)) {
err = -EINPROGRESS;
goto exit;
}
if (!work->work_q) {
err = -EINVAL;
goto exit;
}
k_timer_stop(&work->timer);
work->work_q = NULL;
exit:
irq_unlock(key);
return err;
}
s32_t k_delayed_work_remaining_get(struct k_delayed_work *work)
{
int32_t remain;
k_timer_t *timer;
if (work == NULL) {
return 0;
}
timer = &work->timer;
remain = timer->timeout - (aos_now_ms() - timer->start_ms);
if (remain < 0) {
remain = 0;
}
return remain;
}

View file

@ -0,0 +1,39 @@
zephyr_library()
zephyr_library_sources(
util/mem.c
util/memq.c
util/mayfly.c
util/util.c
hal/nrf5/cntr.c
hal/nrf5/rand.c
hal/nrf5/ecb.c
hal/nrf5/radio.c
ticker/ticker.c
ll_sw/ctrl.c
ll_sw/crypto.c
ll_sw/ll.c
ll_sw/ll_filter.c
hci/hci_driver.c
hci/hci.c
)
zephyr_library_sources_ifdef(CONFIG_BT_BROADCASTER ll_sw/ll_adv.c)
zephyr_library_sources_ifdef(CONFIG_BT_OBSERVER ll_sw/ll_scan.c)
zephyr_library_sources_ifdef(CONFIG_BT_CENTRAL ll_sw/ll_master.c)
zephyr_library_sources_ifdef(CONFIG_BT_CTLR_DTM ll_sw/ll_test.c)
zephyr_library_include_directories(
.
util
hal
ticker
ll
include
)
zephyr_library_compile_options_ifdef(
CONFIG_BT_CTLR_FAST_ENC
-Ofast
)
zephyr_library_link_libraries(subsys__bluetooth)

View file

@ -0,0 +1,494 @@
# Kconfig - Bluetooth Controller configuration options
#
# Copyright (c) 2016-2017 Nordic Semiconductor ASA
#
# SPDX-License-Identifier: Apache-2.0
#
comment "BLE Controller support"
config BT_CTLR
bool "Bluetooth Controller"
select BT_RECV_IS_RX_THREAD
help
Enables support for SoC native controller implementations.
if BT_CTLR
choice
prompt "Bluetooth Link Layer Selection"
default BT_LL_SW
help
Select the Bluetooth Link Layer to compile.
config BT_LL_SW
bool "Use the software-based BLE Link Layer"
help
Use Zephyr software BLE Link Layer implementation.
endchoice
comment "BLE Controller configuration"
config BT_CTLR_HCI_VS_BUILD_INFO
string "Zephyr HCI VS Build Info string"
default ""
depends on BT_HCI_VS_EXT
help
User-defined string that will be returned by the Zephyr VS Read Build
Information command after the Zephyr version and build time. When
setting this to a value different from an empty string, a space
character is required at the beginning to separate it from the
already included information.
config BT_CTLR_DUP_FILTER_LEN
prompt "Number of addresses in the scan duplicate filter"
int
depends on BT_OBSERVER
default 16
help
Set the number of unique BLE addresses that can be filtered as
duplicates while scanning.
config BT_CTLR_RX_BUFFERS
prompt "Number of Rx buffers"
int
default 1
default 6 if BT_HCI_RAW
range 1 18
help
Set the number of Rx PDUs to be buffered in the controller. In a 7.5ms
connection interval and 2M PHY, maximum 18 packets with L2CAP payload
size of 1 byte can be received.
config BT_CTLR_TX_BUFFERS
prompt "Number of Tx buffers"
int
default 2
default 7 if BT_HCI_RAW
range 1 19
help
Set the number of Tx PDUs to be queued for transmission in the
controller. In a 7.5ms connection interval and 2M PHY, maximum 19
packets can be enqueued, with 18 packets with L2CAP payload size of 1
byte can be acknowledged.
config BT_CTLR_TX_BUFFER_SIZE
prompt "Tx buffer size"
int
range 27 16384
default 27
help
Size of the Tx buffers and the value returned in HCI LE Read Buffer
Size command response. If this size if greater than effective PDU size
then controller will perform fragmentation before transmitting on the
the packet on air.
Maximum is set to 16384 due to implementation limitations (use of
u16_t for size/length variables).
config BT_CTLR_COMPANY_ID
prompt "Company Id"
hex
default 0xFFFF
range 0x0000 0xFFFF
help
Set the Company Id that will be used in VERSION_IND PDU.
config BT_CTLR_SUBVERSION_NUMBER
prompt "Subversion Number"
hex
default 0xFFFF
range 0x0000 0xFFFF
help
Set the Subversion Number that will be used in VERSION_IND PDU.
config BT_CTLR_RX_PRIO_STACK_SIZE
int
default 448
config BT_CTLR_RX_PRIO
# Hidden option for Controller's Co-Operative high priority Rx thread
# priority.
int
default 6
comment "BLE Controller features"
if BT_CONN
config BT_CTLR_LE_ENC
bool
depends on !BT_CTLR_DATA_LENGTH_CLEAR && !BT_CTLR_PHY_2M_NRF
default y
# Enable support for Bluetooth v4.0 LE Encryption feature in the
# Controller.
config BT_CTLR_CONN_PARAM_REQ
bool "Connection Parameter Request"
default y
help
Enable support for Bluetooth v4.1 Connection Parameter Request feature
in the Controller.
config BT_CTLR_LE_PING
bool "LE Ping"
default y
help
Enable support for Bluetooth v4.1 LE Ping feature in the Controller.
config BT_CTLR_PRIVACY
bool "LE Controller-based Privacy"
depends on !SOC_SERIES_NRF51X
default y
select BT_RPA
help
Enable support for Bluetooth v4.2 LE Controller-based Privacy feature
in the Controller.
config BT_CTLR_RL_SIZE
prompt "LE Controller-based Privacy Resolving List size"
depends on BT_CTLR_PRIVACY
int
default 8
range 1 8 if SOC_FAMILY_NRF5
help
Set the size of the Resolving List for LE Controller-based Privacy.
On nRF5x-based controllers, the hardware imposes a limit of 8 devices.
config BT_CTLR_EXT_SCAN_FP
bool "LE Extended Scanner Filter Policies"
default y
help
Enable support for Bluetooth v4.2 LE Extended Scanner Filter Policies
in the Controller.
config BT_CTLR_DATA_LENGTH
bool "Data Length Update"
default y if SOC_SERIES_NRF52X
help
Enable support for Bluetooth v4.2 LE Data Length Update procedure in
the Controller.
config BT_CTLR_DATA_LENGTH_MAX
prompt "Maximum data length supported"
depends on BT_CTLR_DATA_LENGTH
int
default 27
range 27 251 if SOC_SERIES_NRF52X || BT_CTLR_DATA_LENGTH_CLEAR
range 27 27
help
Set the maximum data length of PDU supported in the Controller.
config BT_CTLR_PHY
bool "PHY Update"
default y if SOC_SERIES_NRF52X
help
Enable support for Bluetooth 5.0 PHY Update Procedure in the
Controller.
endif # BT_CONN
config BT_CTLR_CHAN_SEL_2
bool "Channel Selection Algorithm #2"
default y
help
Enable support for Bluetooth 5.0 LE Channel Selection Algorithm #2 in
the Controller.
config BT_CTLR_MIN_USED_CHAN
bool "Minimum Number of Used Channels"
default y
help
Enable support for Bluetooth 5.0 Minimum Number of Used Channels
Procedure in the Controller.
config BT_CTLR_ADV_EXT
bool "LE Advertising Extensions"
select BT_CTLR_SCAN_REQ_NOTIFY
select BT_CTLR_CHAN_SEL_2
default y
help
Enable support for Bluetooth 5.0 LE Advertising Extensions in the
Controller.
config BT_CTLR_DTM
bool
help
Enable support for Direct Test Mode in the Controller.
config BT_CTLR_DTM_HCI
bool "Direct Test Mode over HCI"
select BT_CTLR_DTM
help
Enable support for Direct Test Mode over the HCI transport.
config BT_CTLR_ADVANCED_FEATURES
bool "Show advanced features"
help
Makes advanced features visible to controller developers.
menu "Advanced features"
visible if BT_CTLR_ADVANCED_FEATURES
config BT_CTLR_DATA_LENGTH_CLEAR
bool "Data Length Support (Cleartext only)"
depends on BT_CTLR_DATA_LENGTH && SOC_SERIES_NRF51X
help
Enable support for Bluetooth v4.2 LE Data Length Update procedure, up to
251 byte cleartext payloads in the Controller. Encrypted connections
are not supported.
if BT_CTLR_PHY
config BT_CTLR_PHY_2M
bool "2Mbps PHY Support"
depends on !SOC_SERIES_NRF51X || BT_CTLR_PHY_2M_NRF
default y
help
Enable support for Bluetooth 5.0 2Mbps PHY in the Controller.
config BT_CTLR_PHY_2M_NRF
bool "2Mbps Nordic Semiconductor PHY Support (Cleartext only)"
depends on SOC_SERIES_NRF51X
select BT_CTLR_PHY_2M
help
Enable support for Nordic Semiconductor proprietary 2Mbps PHY in the
Controller. Encrypted connections are not supported.
config BT_CTLR_PHY_CODED
bool "Coded PHY Support"
depends on SOC_NRF52840
default y
help
Enable support for Bluetooth 5.0 Coded PHY in the Controller.
endif # BT_CTLR_PHY
config BT_CTLR_WORKER_PRIO
prompt "Radio and Ticker's Worker IRQ priority"
int
range 0 3 if SOC_SERIES_NRF51X
range 0 6 if SOC_SERIES_NRF52X
default 0
help
The interrupt priority for event preparation and radio IRQ. This value
shall be less than or equal to the Ticker's Job priority value.
config BT_CTLR_JOB_PRIO
prompt "Ticker's JOB IRQ priority"
int
range BT_CTLR_WORKER_PRIO 3 if SOC_SERIES_NRF51X
range BT_CTLR_WORKER_PRIO 6 if SOC_SERIES_NRF52X
default 0
help
The interrupt priority for Ticker's Job (SWI4) IRQ. This value shall
be greater than or equal to the Ticker's Worker IRQ priority value.
config BT_CTLR_XTAL_ADVANCED
bool "Advanced event preparation"
default y
help
Enables advanced event preparation offset ahead of radio tx/rx, taking
into account predictive processing time requirements in preparation to
the event, like control procedure handling and CPU execution speeds.
Crystal oscillator is retained between closely spaced consecutive
radio events to reduce the overall number of crystal settling current
consumptions.
This feature maximizes radio utilization in an average role event
timeslice when they are closely spaced by using a reduced offset
between preparation and radio event.
By disabling this feature, the controller will use a constant offset
between the preparation and radio event. The controller will toggle
crystal oscillator between two closely spaced radio events leading to
higher average current due to increased number of crystal settling
current consumptions.
config BT_CTLR_XTAL_THRESHOLD
prompt "Crystal shutdown threshold in uS"
depends on BT_CTLR_XTAL_ADVANCED
int
default 5168
help
Configure the optimal delta in micro seconds between two consecutive
radio events below which (active clock) crystal will be retained. This
value is board dependent. The value 5168 is based on crude calculation
for nRF51 current versus startup time of high frequency crystal.
config BT_CTLR_SCHED_ADVANCED
bool "Advanced scheduling"
depends on (BT_MAX_CONN != 0)
default y
default n if BT_PERIPHERAL && !BT_CENTRAL
help
Enable non-overlapping placement of observer, initiator and master
roles in timespace. Uses window offset in connection updates and uses
connection parameter request in slave role to negotiate
non-overlapping placement with active master roles to avoid slave
roles drifting into active master roles in the local controller.
This feature maximizes the average data transmission amongst active
concurrent master and slave connections while other observer,
initiator, master or slave roles are active in the local controller.
Disabling this feature will lead to overlapping role in timespace
leading to skipped events amongst active roles.
config BT_CTLR_RADIO_ENABLE_FAST
bool "Use tTXEN/RXEN,FAST ramp-up"
depends on SOC_SERIES_NRF52X
default y
help
Enable use of fast radio ramp-up mode.
config BT_CTLR_TIFS_HW
bool "H/w Accelerated tIFS Trx switching"
depends on !BT_CTLR_RADIO_ENABLE_FAST
default y
help
Enable use of hardware accelerated tIFS Trx switching.
if BT_CONN
config BT_CTLR_FAST_ENC
bool "Fast Encryption Setup"
depends on BT_CTLR_LE_ENC
help
Enable connection encryption setup in 3 connection intervals.
Peripheral will respond to Encryption Request with Encryption Response
in the same connection interval, and also, will respond with Start
Encryption Response PDU in the 3rd connection interval, hence
completing encryption setup in 3 connection intervals. Encrypted data
would be transmitted as fast as in 3rd connection interval from the
connection establishment.
Maximum CPU time in Radio ISR will increase if this feature is
selected.
config BT_CTLR_CONN_RSSI
bool "Connection RSSI"
help
Enable connection RSSI measurement.
endif # BT_CONN
config BT_CTLR_ADV_INDICATION
bool "Advertisement indications"
help
Generate events indicating on air advertisement events.
config BT_CTLR_SCAN_REQ_NOTIFY
bool "Scan Request Notifications"
help
Generate events notifying the on air scan requests received.
config BT_CTLR_SCAN_REQ_RSSI
bool "Measure Scan Request RSSI"
depends on BT_CTLR_SCAN_REQ_NOTIFY
help
Measure RSSI of the on air scan requests received.
endmenu
comment "BLE Controller hardware configuration"
menuconfig BT_CTLR_GPIO_PA
bool "Power Amplifier GPIO interface"
depends on !SOC_SERIES_NRF51X
help
Enable GPIO interface to a Power Amplifier. This allows hardware
designs using PA to let the Controller toggle their state based on
radio activity.
if BT_CTLR_GPIO_PA
config BT_CTLR_GPIO_PA_PIN
prompt "Power Amplifier GPIO pin number"
int
help
GPIO Pin number connected to a Power Amplifier.
config BT_CTLR_GPIO_PA_POL_INV
bool "Inverted polarity for the PA pin"
help
Enable inverted polarity (active low) for the PA pin.
config BT_CTLR_GPIO_PA_OFFSET
prompt "Time from PA ON to Tx ready"
int
default 5
range 0 10
help
Time before Tx ready to turn on PA.
endif # BT_CTLR_GPIO_PA
menuconfig BT_CTLR_GPIO_LNA
bool "Low Noise Amplifier GPIO interface"
depends on !SOC_SERIES_NRF51X
help
Enable GPIO interface to a Low Noise Amplifier. This allows hardware
designs using LNAs to let the Controller toggle their state based on
radio activity.
if BT_CTLR_GPIO_LNA
config BT_CTLR_GPIO_LNA_PIN
prompt "Low Noise Amplifier GPIO pin number"
int
help
GPIO Pin number connected to a Low Noise Amplifier.
config BT_CTLR_GPIO_LNA_POL_INV
bool "Inverted polarity for the LNA pin"
help
Enable inverted polarity (active low) for the LNA pin.
config BT_CTLR_GPIO_LNA_OFFSET
prompt "Time from LNA ON to Rx ready"
int
default 5
range 0 10
help
Time before Rx ready to turn on LNA.
endif # BT_CTLR_GPIO_LNA
config BT_CTLR_PA_LNA_GPIOTE_CHAN
# Hidden "nRF5 GPIO PA/LNA GPIOTE Channel"
depends on SOC_FAMILY_NRF5 && (BT_CTLR_GPIO_PA || BT_CTLR_GPIO_LNA)
int
default 3
help
Select the nRF5 GPIOTE channel to use for PA/LNA GPIO feature.
comment "BLE Controller debug configuration"
config BT_CTLR_ASSERT_HANDLER
bool "Bluetooth Controller Assertion Handler"
depends on BT_HCI_RAW
help
This option enables an application-defined sink for the
controller assertion mechanism. This must be defined in
application code as void \"bt_controller_assert_handle(char \*, int)\"
and will be invoked whenever the controller code encounters
an unrecoverable error.
config BT_CTLR_PROFILE_ISR
bool "Profile radio ISR"
help
Turn on measurement of radio ISR latency, CPU usage and generation of
controller event with these profiling data. The controller event
contains current, minimum and maximum ISR entry latencies; and
current, minimum and maximum ISR CPU use in micro-seconds.
config BT_CTLR_DEBUG_PINS
bool "Bluetooth Controller Debug Pins"
depends on BOARD_NRF51_PCA10028 || BOARD_NRF52_PCA10040 || BOARD_NRF52840_PCA10056
help
Turn on debug GPIO toggling for the BLE Controller. This is useful
when debugging with a logic analyzer or profiling certain sections of
the code. When enabled, pins P0.16 to P0.25 are taken over exclusively
by the controller and cannot be used outside of it.
endif # BT_CTLR

View file

@ -0,0 +1,46 @@
NAME := controller
$(NAME)_TYPE := kernel
$(NAME)_MBINS_TYPE := kernel
GLOBAL_INCLUDES += .
$(NAME)_INCLUDES += ../common \
. \
hal/nrf5 \
hci \
include \
ll_sw \
ticker \
util
$(NAME)_SOURCES += hal/nrf5/cntr.c \
hal/nrf5/ecb.c \
hal/nrf5/radio.c \
hal/nrf5/rand.c \
hci/hci.c \
hci/hci_driver.c \
ll_sw/ctrl.c \
ll_sw/ll.c \
ll_sw/ll_adv.c \
ll_sw/ll_filter.c \
ll_sw/ll_master.c \
ll_sw/ll_scan.c \
ll_sw/crypto.c \
ticker/ticker.c \
util/mayfly.c \
util/mem.c \
util/memq.c \
util/util.c \
hal/device.c \
../common/irq_manage.c
GLOBAL_DEFINES += CONFIG_BT_OBSERVER
GLOBAL_DEFINES += CONFIG_BT_BROADCASTER
GLOBAL_DEFINES += CONFIG_DEVICE_POWER_MANAGEMENT
GLOBAL_DEFINES += CONFIG_BT_CTLR_LE_ENC
#GLOBAL_DEFINES += CONFIG_BT_CTLR_PHY_2M
GLOBAL_DEFINES += CONFIG_BT_CTLR_MIN_USED_CHAN
GLOBAL_DEFINES += CONFIG_BT_CTLR_PHY
GLOBAL_DEFINES += CONFIG_BT_CTLR_CHAN_SEL_2
GLOBAL_DEFINES += CONFIG_BT_CTLR_CONN_PARAM_REQ

View file

@ -0,0 +1,19 @@
/*
* Copyright (c) 2016 Nordic Semiconductor ASA
* Copyright (c) 2016 Vinayak Kariappa Chettimada
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _CCM_H_
#define _CCM_H_
struct ccm {
u8_t key[16];
u64_t counter;
u8_t direction:1;
u8_t resv1:7;
u8_t iv[8];
} __packed;
#endif /* _CCM_H_ */

View file

@ -0,0 +1,19 @@
/*
* Copyright (c) 2016 Nordic Semiconductor ASA
* Copyright (c) 2016 Vinayak Kariappa Chettimada
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _CNTR_H_
#define _CNTR_H_
#include <net/buf.h>
void cntr_init(void);
u32_t cntr_start(void);
u32_t cntr_stop(void);
u32_t cntr_cnt_get(void);
void cntr_cmp_set(u8_t cmp, u32_t value);
#endif /* _CNTR_H_ */

View file

@ -0,0 +1,21 @@
/*
* Copyright (c) 2016 Nordic Semiconductor ASA
* Copyright (c) 2016 Vinayak Kariappa Chettimada
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _CPU_H_
#define _CPU_H_
#include "nrf.h"
static inline void cpu_sleep(void)
{
__WFE();
__SEV();
__WFE();
}
#endif /* _CPU_H_ */

View file

@ -0,0 +1,23 @@
/*
* Copyright (c) 2016 Nordic Semiconductor ASA
* Copyright (c) 2016 Vinayak Kariappa Chettimada
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _HAL_DEBUG_H_
#define _HAL_DEBUG_H_
#ifdef CONFIG_BT_CTLR_ASSERT_HANDLER
void bt_ctlr_assert_handle(char *file, u32_t line);
#define LL_ASSERT(cond) if (!(cond)) { \
bt_ctlr_assert_handle(__FILE__, \
__LINE__); \
}
#else
#define LL_ASSERT(cond) BT_ASSERT(cond)
#endif
#include "nrf5/debug.h"
#endif /* _HAL_DEBUG_H_ */

View file

@ -0,0 +1,137 @@
/*
* Copyright (c) 2015-2016 Intel Corporation.
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <errno.h>
#include <string.h>
#include <device.h>
#include <misc/util.h>
#include <atomic.h>
#include "linker-defs.h"
extern struct device __device_init_start[];
extern struct device __device_PRE_KERNEL_1_start[];
extern struct device __device_PRE_KERNEL_2_start[];
extern struct device __device_POST_KERNEL_start[];
extern struct device __device_APPLICATION_start[];
extern struct device __device_init_end[];
static struct device *config_levels[] = {
__device_PRE_KERNEL_1_start,
__device_PRE_KERNEL_2_start,
__device_POST_KERNEL_start,
__device_APPLICATION_start,
/* End marker */
__device_init_end,
};
#ifdef CONFIG_DEVICE_POWER_MANAGEMENT
extern u32_t __device_busy_start[];
extern u32_t __device_busy_end[];
#define DEVICE_BUSY_SIZE (__device_busy_end - __device_busy_start)
#endif
/**
* @brief Execute all the device initialization functions at a given level
*
* @details Invokes the initialization routine for each device object
* created by the DEVICE_INIT() macro using the specified level.
* The linker script places the device objects in memory in the order
* they need to be invoked, with symbols indicating where one level leaves
* off and the next one begins.
*
* @param level init level to run.
*/
void _sys_device_do_config_level(int level)
{
struct device *info;
for (info = config_levels[level]; info < config_levels[level+1];
info++) {
struct device_config *device = info->config;
device->init(info);
_k_object_init(info);
}
}
struct device *device_get_binding(const char *name)
{
struct device *info;
for (info = __device_init_start; info != __device_init_end; info++) {
if (!info->driver_api) {
continue;
}
if (name == info->config->name) {
return info;
}
if (!strcmp(name, info->config->name)) {
return info;
}
}
return NULL;
}
#ifdef CONFIG_DEVICE_POWER_MANAGEMENT
int device_pm_control_nop(struct device *unused_device,
u32_t unused_ctrl_command, void *unused_context)
{
return 0;
}
void device_list_get(struct device **device_list, int *device_count)
{
*device_list = __device_init_start;
*device_count = __device_init_end - __device_init_start;
}
int device_any_busy_check(void)
{
int i = 0;
for (i = 0; i < DEVICE_BUSY_SIZE; i++) {
if (__device_busy_start[i] != 0) {
return -EBUSY;
}
}
return 0;
}
int device_busy_check(struct device *chk_dev)
{
if (atomic_test_bit((const atomic_t *)__device_busy_start,
(chk_dev - __device_init_start))) {
return -EBUSY;
}
return 0;
}
#endif
void device_busy_set(struct device *busy_dev)
{
#ifdef CONFIG_DEVICE_POWER_MANAGEMENT
atomic_set_bit((atomic_t *) __device_busy_start,
(busy_dev - __device_init_start));
#else
ARG_UNUSED(busy_dev);
#endif
}
void device_busy_clear(struct device *busy_dev)
{
#ifdef CONFIG_DEVICE_POWER_MANAGEMENT
atomic_clear_bit((atomic_t *) __device_busy_start,
(busy_dev - __device_init_start));
#else
ARG_UNUSED(busy_dev);
#endif
}

View file

@ -0,0 +1,34 @@
/*
* Copyright (c) 2016 Nordic Semiconductor ASA
* Copyright (c) 2016 Vinayak Kariappa Chettimada
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _ECB_H_
#define _ECB_H_
typedef void (*ecb_fp) (u32_t status, u8_t *cipher_be, void *context);
struct ecb {
u8_t in_key_be[16];
u8_t in_clear_text_be[16];
u8_t out_cipher_text_be[16];
/* if not null reverse copy into in_key_be */
u8_t *in_key_le;
/* if not null reverse copy into in_clear_text_be */
u8_t *in_clear_text_le;
ecb_fp fp_ecb;
void *context;
};
void ecb_encrypt_be(u8_t const *const key_be, u8_t const *const clear_text_be,
u8_t * const cipher_text_be);
void ecb_encrypt(u8_t const *const key_le, u8_t const *const clear_text_le,
u8_t * const cipher_text_le, u8_t * const cipher_text_be);
u32_t ecb_encrypt_nonblocking(struct ecb *ecb);
void isr_ecb(void *param);
u32_t ecb_ut(void);
#endif /* _ECB_H_ */

View file

@ -0,0 +1,62 @@
/*
* Copyright (c) 2016 Nordic Semiconductor ASA
* Copyright (c) 2016 Vinayak Kariappa Chettimada
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <soc.h>
#include "hal/cntr.h"
#include "common/log.h"
#include "hal/debug.h"
#ifndef NRF_RTC
#define NRF_RTC NRF_RTC0
#endif
static u8_t _refcount;
void cntr_init(void)
{
NRF_RTC->PRESCALER = 0;
NRF_RTC->EVTENSET = (RTC_EVTENSET_COMPARE0_Msk |
RTC_EVTENSET_COMPARE1_Msk);
NRF_RTC->INTENSET = (RTC_INTENSET_COMPARE0_Msk |
RTC_INTENSET_COMPARE1_Msk);
}
u32_t cntr_start(void)
{
if (_refcount++) {
return 1;
}
NRF_RTC->TASKS_START = 1;
return 0;
}
u32_t cntr_stop(void)
{
LL_ASSERT(_refcount);
if (--_refcount) {
return 1;
}
NRF_RTC->TASKS_STOP = 1;
return 0;
}
u32_t cntr_cnt_get(void)
{
return NRF_RTC->COUNTER;
}
void cntr_cmp_set(u8_t cmp, u32_t value)
{
NRF_RTC->CC[cmp] = value;
}

View file

@ -0,0 +1,251 @@
/*
* Copyright (c) 2016-2017 Nordic Semiconductor ASA
* Copyright (c) 2016 Vinayak Kariappa Chettimada
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _DEBUG_H_
#define _DEBUG_H_
#include "nrf.h"
#ifdef CONFIG_BT_CTLR_DEBUG_PINS
#if defined(CONFIG_BOARD_NRF52840_PCA10056)
#define DEBUG_PORT NRF_P1
#define DEBUG_PIN0 BIT(1)
#define DEBUG_PIN1 BIT(2)
#define DEBUG_PIN2 BIT(3)
#define DEBUG_PIN3 BIT(4)
#define DEBUG_PIN4 BIT(5)
#define DEBUG_PIN5 BIT(6)
#define DEBUG_PIN6 BIT(7)
#define DEBUG_PIN7 BIT(8)
#define DEBUG_PIN8 BIT(10)
#define DEBUG_PIN9 BIT(11)
#elif defined(CONFIG_BOARD_NRF52_PCA10040)
#define DEBUG_PORT NRF_GPIO
#define DEBUG_PIN0 BIT(11)
#define DEBUG_PIN1 BIT(12)
#define DEBUG_PIN2 BIT(13)
#define DEBUG_PIN3 BIT(14)
#define DEBUG_PIN4 BIT(15)
#define DEBUG_PIN5 BIT(16)
#define DEBUG_PIN6 BIT(17)
#define DEBUG_PIN7 BIT(18)
#define DEBUG_PIN8 BIT(19)
#define DEBUG_PIN9 BIT(20)
#elif defined(CONFIG_BOARD_NRF51_PCA10028)
#define DEBUG_PORT NRF_GPIO
#define DEBUG_PIN0 BIT(12)
#define DEBUG_PIN1 BIT(13)
#define DEBUG_PIN2 BIT(14)
#define DEBUG_PIN3 BIT(15)
#define DEBUG_PIN4 BIT(16)
#define DEBUG_PIN5 BIT(17)
#define DEBUG_PIN6 BIT(18)
#define DEBUG_PIN7 BIT(19)
#define DEBUG_PIN8 BIT(20)
#define DEBUG_PIN9 BIT(23)
#else
#error BT_CTLR_DEBUG_PINS not supported on this board.
#endif
#define DEBUG_PIN_MASK (DEBUG_PIN0 | DEBUG_PIN1 | DEBUG_PIN2 | DEBUG_PIN3 | \
DEBUG_PIN4 | DEBUG_PIN5 | DEBUG_PIN6 | DEBUG_PIN7 | \
DEBUG_PIN8 | DEBUG_PIN9)
#define DEBUG_CLOSE_MASK (DEBUG_PIN3 | DEBUG_PIN4 | DEBUG_PIN5 | DEBUG_PIN6)
/* below are some interesting macros referenced by controller
* which can be defined to SoC's GPIO toggle to observe/debug the
* controller's runtime behavior.
*/
#define DEBUG_INIT() do { \
DEBUG_PORT->DIRSET = DEBUG_PIN_MASK; \
DEBUG_PORT->OUTCLR = DEBUG_PIN_MASK; } \
while (0)
#define DEBUG_CPU_SLEEP(flag) do { \
if (flag) { \
DEBUG_PORT->OUTSET = DEBUG_PIN0; \
DEBUG_PORT->OUTCLR = DEBUG_PIN0; } \
else { \
DEBUG_PORT->OUTCLR = DEBUG_PIN0; \
DEBUG_PORT->OUTSET = DEBUG_PIN0; } \
} while (0)
#define DEBUG_TICKER_ISR(flag) do { \
if (flag) { \
DEBUG_PORT->OUTCLR = DEBUG_PIN1; \
DEBUG_PORT->OUTSET = DEBUG_PIN1; } \
else { \
DEBUG_PORT->OUTSET = DEBUG_PIN1; \
DEBUG_PORT->OUTCLR = DEBUG_PIN1; } \
} while (0)
#define DEBUG_TICKER_TASK(flag) do { \
if (flag) { \
DEBUG_PORT->OUTCLR = DEBUG_PIN1; \
DEBUG_PORT->OUTSET = DEBUG_PIN1; } \
else { \
DEBUG_PORT->OUTSET = DEBUG_PIN1; \
DEBUG_PORT->OUTCLR = DEBUG_PIN1; } \
} while (0)
#define DEBUG_TICKER_JOB(flag) do { \
if (flag) { \
DEBUG_PORT->OUTCLR = DEBUG_PIN2; \
DEBUG_PORT->OUTSET = DEBUG_PIN2; } \
else { \
DEBUG_PORT->OUTSET = DEBUG_PIN2; \
DEBUG_PORT->OUTCLR = DEBUG_PIN2; } \
} while (0)
#define DEBUG_RADIO_ISR(flag) do { \
if (flag) { \
DEBUG_PORT->OUTCLR = DEBUG_PIN7; \
DEBUG_PORT->OUTSET = DEBUG_PIN7; } \
else { \
DEBUG_PORT->OUTSET = DEBUG_PIN7; \
DEBUG_PORT->OUTCLR = DEBUG_PIN7; } \
} while (0)
#define DEBUG_RADIO_XTAL(flag) do { \
if (flag) { \
DEBUG_PORT->OUTCLR = DEBUG_PIN8; \
DEBUG_PORT->OUTSET = DEBUG_PIN8; } \
else { \
DEBUG_PORT->OUTSET = DEBUG_PIN8; \
DEBUG_PORT->OUTCLR = DEBUG_PIN8; } \
} while (0)
#define DEBUG_RADIO_ACTIVE(flag) do { \
if (flag) { \
DEBUG_PORT->OUTCLR = DEBUG_PIN9; \
DEBUG_PORT->OUTSET = DEBUG_PIN9; } \
else { \
DEBUG_PORT->OUTSET = DEBUG_PIN9; \
DEBUG_PORT->OUTCLR = DEBUG_PIN9; } \
} while (0)
#define DEBUG_RADIO_CLOSE(flag) do { \
if (flag) { \
DEBUG_PORT->OUTCLR = 0x00000000; \
DEBUG_PORT->OUTSET = 0x00000000; } \
else { \
DEBUG_PORT->OUTCLR = DEBUG_CLOSE_MASK; } \
} while (0)
#define DEBUG_RADIO_PREPARE_A(flag) do { \
if (flag) { \
DEBUG_PORT->OUTCLR = DEBUG_PIN3; \
DEBUG_PORT->OUTSET = DEBUG_PIN3; } \
else { \
DEBUG_PORT->OUTCLR = DEBUG_PIN3; \
DEBUG_PORT->OUTSET = DEBUG_PIN3; } \
} while (0)
#define DEBUG_RADIO_START_A(flag) do { \
if (flag) { \
DEBUG_PORT->OUTCLR = DEBUG_PIN3; \
DEBUG_PORT->OUTSET = DEBUG_PIN3; } \
else { \
DEBUG_PORT->OUTCLR = DEBUG_PIN3; \
DEBUG_PORT->OUTSET = DEBUG_PIN3; } \
} while (0)
#define DEBUG_RADIO_PREPARE_S(flag) do { \
if (flag) { \
DEBUG_PORT->OUTCLR = DEBUG_PIN4; \
DEBUG_PORT->OUTSET = DEBUG_PIN4; } \
else { \
DEBUG_PORT->OUTCLR = DEBUG_PIN4; \
DEBUG_PORT->OUTSET = DEBUG_PIN4; } \
} while (0)
#define DEBUG_RADIO_START_S(flag) do { \
if (flag) { \
DEBUG_PORT->OUTCLR = DEBUG_PIN4; \
DEBUG_PORT->OUTSET = DEBUG_PIN4; } \
else { \
DEBUG_PORT->OUTCLR = DEBUG_PIN4; \
DEBUG_PORT->OUTSET = DEBUG_PIN4; } \
} while (0)
#define DEBUG_RADIO_PREPARE_O(flag) do { \
if (flag) { \
DEBUG_PORT->OUTCLR = DEBUG_PIN5; \
DEBUG_PORT->OUTSET = DEBUG_PIN5; } \
else { \
DEBUG_PORT->OUTCLR = DEBUG_PIN5; \
DEBUG_PORT->OUTSET = DEBUG_PIN5; } \
} while (0)
#define DEBUG_RADIO_START_O(flag) do { \
if (flag) { \
DEBUG_PORT->OUTCLR = DEBUG_PIN5; \
DEBUG_PORT->OUTSET = DEBUG_PIN5; } \
else { \
DEBUG_PORT->OUTCLR = DEBUG_PIN5; \
DEBUG_PORT->OUTSET = DEBUG_PIN5; } \
} while (0)
#define DEBUG_RADIO_PREPARE_M(flag) do { \
if (flag) { \
DEBUG_PORT->OUTCLR = DEBUG_PIN6; \
DEBUG_PORT->OUTSET = DEBUG_PIN6; } \
else { \
DEBUG_PORT->OUTCLR = DEBUG_PIN6; \
DEBUG_PORT->OUTSET = DEBUG_PIN6; } \
} while (0)
#define DEBUG_RADIO_START_M(flag) do { \
if (flag) { \
DEBUG_PORT->OUTCLR = DEBUG_PIN6; \
DEBUG_PORT->OUTSET = DEBUG_PIN6; } \
else { \
DEBUG_PORT->OUTCLR = DEBUG_PIN6; \
DEBUG_PORT->OUTSET = DEBUG_PIN6; } \
} while (0)
#else
#define DEBUG_INIT()
#define DEBUG_CPU_SLEEP(flag)
#define DEBUG_TICKER_ISR(flag)
#define DEBUG_TICKER_TASK(flag)
#define DEBUG_TICKER_JOB(flag)
#define DEBUG_RADIO_ISR(flag)
#define DEBUG_RADIO_HCTO(flag)
#define DEBUG_RADIO_XTAL(flag)
#define DEBUG_RADIO_ACTIVE(flag)
#define DEBUG_RADIO_CLOSE(flag)
#define DEBUG_RADIO_PREPARE_A(flag)
#define DEBUG_RADIO_START_A(flag)
#define DEBUG_RADIO_PREPARE_S(flag)
#define DEBUG_RADIO_START_S(flag)
#define DEBUG_RADIO_PREPARE_O(flag)
#define DEBUG_RADIO_START_O(flag)
#define DEBUG_RADIO_PREPARE_M(flag)
#define DEBUG_RADIO_START_M(flag)
#endif /* CONFIG_BT_CTLR_DEBUG_PINS */
#endif /* _DEBUG_H_ */

View file

@ -0,0 +1,197 @@
/*
* Copyright (c) 2016 Nordic Semiconductor ASA
* Copyright (c) 2016 Vinayak Kariappa Chettimada
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <string.h>
#include <soc.h>
//#include <arch/arm/cortex_m/cmsis.h>
#include "util/mem.h"
#include "hal/ecb.h"
#include "nrf.h"
#include "common/log.h"
#include "hal/debug.h"
struct ecb_param {
u8_t key[16];
u8_t clear_text[16];
u8_t cipher_text[16];
} __packed;
static void do_ecb(struct ecb_param *ecb)
{
do {
NRF_ECB->TASKS_STOPECB = 1;
NRF_ECB->ECBDATAPTR = (u32_t)ecb;
NRF_ECB->EVENTS_ENDECB = 0;
NRF_ECB->EVENTS_ERRORECB = 0;
NRF_ECB->TASKS_STARTECB = 1;
while ((NRF_ECB->EVENTS_ENDECB == 0) &&
(NRF_ECB->EVENTS_ERRORECB == 0) &&
(NRF_ECB->ECBDATAPTR != 0)) {
/*__WFE();*/
}
NRF_ECB->TASKS_STOPECB = 1;
} while ((NRF_ECB->EVENTS_ERRORECB != 0) || (NRF_ECB->ECBDATAPTR == 0));
NRF_ECB->ECBDATAPTR = 0;
}
void ecb_encrypt_be(u8_t const *const key_be, u8_t const *const clear_text_be,
u8_t * const cipher_text_be)
{
struct ecb_param ecb;
memcpy(&ecb.key[0], key_be, sizeof(ecb.key));
memcpy(&ecb.clear_text[0], clear_text_be, sizeof(ecb.clear_text));
do_ecb(&ecb);
memcpy(cipher_text_be, &ecb.cipher_text[0], sizeof(ecb.cipher_text));
}
void ecb_encrypt(u8_t const *const key_le, u8_t const *const clear_text_le,
u8_t * const cipher_text_le, u8_t * const cipher_text_be)
{
struct ecb_param ecb;
mem_rcopy(&ecb.key[0], key_le, sizeof(ecb.key));
mem_rcopy(&ecb.clear_text[0], clear_text_le, sizeof(ecb.clear_text));
do_ecb(&ecb);
if (cipher_text_le) {
mem_rcopy(cipher_text_le, &ecb.cipher_text[0],
sizeof(ecb.cipher_text));
}
if (cipher_text_be) {
memcpy(cipher_text_be, &ecb.cipher_text[0],
sizeof(ecb.cipher_text));
}
}
u32_t ecb_encrypt_nonblocking(struct ecb *ecb)
{
/* prepare to be used in a BE AES h/w */
if (ecb->in_key_le) {
mem_rcopy(&ecb->in_key_be[0], ecb->in_key_le,
sizeof(ecb->in_key_be));
}
if (ecb->in_clear_text_le) {
mem_rcopy(&ecb->in_clear_text_be[0],
ecb->in_clear_text_le,
sizeof(ecb->in_clear_text_be));
}
/* setup the encryption h/w */
NRF_ECB->ECBDATAPTR = (u32_t)ecb;
NRF_ECB->EVENTS_ENDECB = 0;
NRF_ECB->EVENTS_ERRORECB = 0;
NRF_ECB->INTENSET = ECB_INTENSET_ERRORECB_Msk | ECB_INTENSET_ENDECB_Msk;
/* enable interrupt */
NVIC_ClearPendingIRQ(ECB_IRQn);
irq_enable(ECB_IRQn);
/* start the encryption h/w */
NRF_ECB->TASKS_STARTECB = 1;
return 0;
}
static void ecb_cleanup(void)
{
/* stop h/w */
NRF_ECB->TASKS_STOPECB = 1;
/* cleanup interrupt */
irq_disable(ECB_IRQn);
}
void isr_ecb(void *param)
{
ARG_UNUSED(param);
if (NRF_ECB->EVENTS_ERRORECB) {
struct ecb *ecb = (struct ecb *)NRF_ECB->ECBDATAPTR;
ecb_cleanup();
ecb->fp_ecb(1, NULL, ecb->context);
}
else if (NRF_ECB->EVENTS_ENDECB) {
struct ecb *ecb = (struct ecb *)NRF_ECB->ECBDATAPTR;
ecb_cleanup();
ecb->fp_ecb(0, &ecb->out_cipher_text_be[0],
ecb->context);
}
else {
LL_ASSERT(0);
}
}
struct ecb_ut_context {
u32_t volatile done;
u32_t status;
u8_t cipher_text[16];
};
static void ecb_cb(u32_t status, u8_t *cipher_be, void *context)
{
struct ecb_ut_context *ecb_ut_context =
(struct ecb_ut_context *)context;
ecb_ut_context->done = 1;
ecb_ut_context->status = status;
if (!status) {
mem_rcopy(ecb_ut_context->cipher_text, cipher_be,
sizeof(ecb_ut_context->cipher_text));
}
}
u32_t ecb_ut(void)
{
u8_t key[16] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88,
0x99, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 };
u8_t clear_text[16] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
0x88, 0x99, 0x00, 0x11, 0x22, 0x33, 0x44,
0x55 };
u8_t cipher_text[16];
u32_t status = 0;
struct ecb ecb;
struct ecb_ut_context context;
ecb_encrypt(key, clear_text, cipher_text, NULL);
context.done = 0;
ecb.in_key_le = key;
ecb.in_clear_text_le = clear_text;
ecb.fp_ecb = ecb_cb;
ecb.context = &context;
status = ecb_encrypt_nonblocking(&ecb);
do {
__WFE();
__SEV();
__WFE();
} while (!context.done);
if (context.status != 0) {
return context.status;
}
status = memcmp(cipher_text, context.cipher_text, sizeof(cipher_text));
if (status) {
return status;
}
return status;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,209 @@
/*
* Copyright (c) 2016 Nordic Semiconductor ASA
* Copyright (c) 2016 Vinayak Kariappa Chettimada
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <soc.h>
#include <debug.h>
#include "hal/rand.h"
#include <errno.h>
#include "common/log.h"
#include "hal/debug.h"
struct rand {
u8_t count;
u8_t threshold;
u8_t first;
u8_t last;
u8_t rand[1];
};
static struct rand *rng_isr;
static struct rand *rng_thr;
static void init(struct rand **rng, u8_t *context, u8_t len, u8_t threshold)
{
struct rand *p;
LL_ASSERT(len > (offsetof(struct rand, rand) + threshold));
*rng = (struct rand *)context;
p = *rng;
p->count = len - offsetof(struct rand, rand);
p->threshold = threshold;
p->first = p->last = 0;
if (!rng_isr || !rng_thr) {
NRF_RNG->CONFIG = RNG_CONFIG_DERCEN_Msk;
NRF_RNG->EVENTS_VALRDY = 0;
NRF_RNG->INTENSET = RNG_INTENSET_VALRDY_Msk;
NRF_RNG->TASKS_START = 1;
}
}
void rand_init(u8_t *context, u8_t context_len, u8_t threshold)
{
init(&rng_thr, context, context_len, threshold);
}
void rand_isr_init(u8_t *context, u8_t context_len, u8_t threshold)
{
init(&rng_isr, context, context_len, threshold);
}
static size_t get(struct rand *rng, size_t octets, u8_t *rand)
{
u8_t first, last, remaining;
LL_ASSERT(rng);
first = rng->first;
last = rng->last;
if (first <= last) {
u8_t *d, *s;
u8_t avail;
d = &rand[octets];
s = &rng->rand[first];
avail = last - first;
if (octets < avail) {
remaining = avail - octets;
avail = octets;
} else {
remaining = 0;
}
first += avail;
octets -= avail;
while (avail--) {
*(--d) = *s++;
}
rng->first = first;
} else {
u8_t *d, *s;
u8_t avail;
d = &rand[octets];
s = &rng->rand[first];
avail = rng->count - first;
if (octets < avail) {
remaining = avail + last - octets;
avail = octets;
first += avail;
} else {
remaining = last;
first = 0;
}
octets -= avail;
while (avail--) {
*(--d) = *s++;
}
if (octets && last) {
s = &rng->rand[0];
if (octets < last) {
remaining = last - octets;
last = octets;
} else {
remaining = 0;
}
first = last;
octets -= last;
while (last--) {
*(--d) = *s++;
}
}
rng->first = first;
}
if (remaining < rng->threshold) {
NRF_RNG->TASKS_START = 1;
}
return octets;
}
size_t rand_get(size_t octets, u8_t *rand)
{
return get(rng_thr, octets, rand);
}
size_t rand_isr_get(size_t octets, u8_t *rand)
{
return get(rng_isr, octets, rand);
}
static int isr(struct rand *rng, bool store)
{
u8_t last;
if (!rng) {
return -ENOBUFS;
}
last = rng->last + 1;
if (last == rng->count) {
last = 0;
}
if (last == rng->first) {
/* this condition should not happen, but due to probable race,
* new value could be generated before NRF_RNG task is stopped.
*/
return -ENOBUFS;
}
if (!store) {
return -EBUSY;
}
rng->rand[rng->last] = NRF_RNG->VALUE;
rng->last = last;
last = rng->last + 1;
if (last == rng->count) {
last = 0;
}
if (last == rng->first) {
return 0;
}
return -EBUSY;
}
void isr_rand(void *param)
{
ARG_UNUSED(param);
if (NRF_RNG->EVENTS_VALRDY) {
int ret;
ret = isr(rng_isr, true);
if (ret != -EBUSY) {
ret = isr(rng_thr, (ret == -ENOBUFS));
}
NRF_RNG->EVENTS_VALRDY = 0;
if (ret != -EBUSY) {
NRF_RNG->TASKS_STOP = 1;
}
}
}

View file

@ -0,0 +1,101 @@
/*
* Copyright (c) 2016 Nordic Semiconductor ASA
* Copyright (c) 2016 Vinayak Kariappa Chettimada
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _RADIO_H_
#define _RADIO_H_
typedef void (*radio_isr_fp) (void);
void isr_radio(void);
void radio_isr_set(radio_isr_fp fp_radio_isr);
void radio_setup(void);
void radio_reset(void);
void radio_phy_set(u8_t phy, u8_t flags);
void radio_tx_power_set(u32_t power);
void radio_freq_chan_set(u32_t chan);
void radio_whiten_iv_set(u32_t iv);
void radio_aa_set(u8_t *aa);
void radio_pkt_configure(u8_t bits_len, u8_t max_len, u8_t flags);
void radio_pkt_rx_set(void *rx_packet);
void radio_pkt_tx_set(void *tx_packet);
u32_t radio_tx_ready_delay_get(u8_t phy, u8_t flags);
u32_t radio_tx_chain_delay_get(u8_t phy, u8_t flags);
u32_t radio_rx_ready_delay_get(u8_t phy);
u32_t radio_rx_chain_delay_get(u8_t phy, u8_t flags);
void radio_rx_enable(void);
void radio_tx_enable(void);
void radio_disable(void);
void radio_status_reset(void);
u32_t radio_is_ready(void);
u32_t radio_is_done(void);
u32_t radio_has_disabled(void);
u32_t radio_is_idle(void);
void radio_crc_configure(u32_t polynomial, u32_t iv);
u32_t radio_crc_is_valid(void);
void *radio_pkt_empty_get(void);
void *radio_pkt_scratch_get(void);
void radio_switch_complete_and_rx(u8_t phy_rx);
void radio_switch_complete_and_tx(u8_t phy_rx, u8_t flags_rx, u8_t phy_tx,
u8_t flags_tx);
void radio_switch_complete_and_disable(void);
void radio_rssi_measure(void);
u32_t radio_rssi_get(void);
void radio_rssi_status_reset(void);
u32_t radio_rssi_is_ready(void);
void radio_filter_configure(u8_t bitmask_enable, u8_t bitmask_addr_type,
u8_t *bdaddr);
void radio_filter_disable(void);
void radio_filter_status_reset(void);
u32_t radio_filter_has_match(void);
u32_t radio_filter_match_get(void);
void radio_bc_configure(u32_t n);
void radio_bc_status_reset(void);
u32_t radio_bc_has_match(void);
void radio_tmr_status_reset(void);
void radio_tmr_tifs_set(u32_t tifs);
u32_t radio_tmr_start(u8_t trx, u32_t ticks_start, u32_t remainder);
void radio_tmr_start_us(u8_t trx, u32_t us);
u32_t radio_tmr_start_now(u8_t trx);
void radio_tmr_stop(void);
void radio_tmr_hcto_configure(u32_t hcto);
void radio_tmr_aa_capture(void);
u32_t radio_tmr_aa_get(void);
void radio_tmr_aa_save(u32_t aa);
u32_t radio_tmr_aa_restore(void);
u32_t radio_tmr_ready_get(void);
void radio_tmr_end_capture(void);
u32_t radio_tmr_end_get(void);
void radio_tmr_sample(void);
u32_t radio_tmr_sample_get(void);
void radio_gpio_pa_setup(void);
void radio_gpio_lna_setup(void);
void radio_gpio_lna_on(void);
void radio_gpio_lna_off(void);
void radio_gpio_pa_lna_enable(u32_t trx_us);
void radio_gpio_pa_lna_disable(void);
void *radio_ccm_rx_pkt_set(struct ccm *ccm, u8_t phy, void *pkt);
void *radio_ccm_tx_pkt_set(struct ccm *ccm, void *pkt);
u32_t radio_ccm_is_done(void);
u32_t radio_ccm_mic_is_valid(void);
void radio_ar_configure(u32_t nirk, void *irk);
u32_t radio_ar_match_get(void);
void radio_ar_status_reset(void);
u32_t radio_ar_has_match(void);
#endif

View file

@ -0,0 +1,20 @@
/*
* Copyright (c) 2016 Nordic Semiconductor ASA
* Copyright (c) 2016 Vinayak Kariappa Chettimada
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _RAND_H_
#define _RAND_H_
#include <net/buf.h>
void rand_init(u8_t *context, u8_t context_len, u8_t threshold);
void rand_isr_init(u8_t *context, u8_t context_len, u8_t threshold);
size_t rand_get(size_t octets, u8_t *rand);
size_t rand_isr_get(size_t octets, u8_t *rand);
void isr_rand(void *param);
#endif /* _RAND_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,479 @@
/*
* Copyright (c) 2016 Nordic Semiconductor ASA
* Copyright (c) 2016 Vinayak Kariappa Chettimada
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <errno.h>
#include <stddef.h>
#include <string.h>
#include <zephyr.h>
#include <soc.h>
//#include <init.h>
#include <device.h>
#include <clock_control.h>
#include <atomic.h>
#include <misc/util.h>
#include <misc/stack.h>
#include <misc/byteorder.h>
#include <bluetooth/bluetooth.h>
#include <bluetooth/hci.h>
#include <drivers/bluetooth/hci_driver.h>
#ifdef CONFIG_CLOCK_CONTROL_NRF5
#include <drivers/clock_control/nrf5_clock_control.h>
#endif
#define BT_DBG_ENABLED IS_ENABLED(CONFIG_BT_DEBUG_HCI_DRIVER)
#include "common/log.h"
#include "util/util.h"
#include "hal/ccm.h"
#include "hal/radio.h"
#include "ll_sw/pdu.h"
#include "ll_sw/ctrl.h"
#include "ll.h"
#include "hci_internal.h"
#include "init.h"
#include "hal/debug.h"
#define NODE_RX(_node) CONTAINER_OF(_node, struct radio_pdu_node_rx, \
hdr.onion.node)
static K_SEM_DEFINE(sem_prio_recv, 0, UINT_MAX);
static K_FIFO_DEFINE(recv_fifo);
struct k_thread prio_recv_thread_data;
static BT_STACK_NOINIT(prio_recv_thread_stack,
CONFIG_BT_CTLR_RX_PRIO_STACK_SIZE);
struct k_thread recv_thread_data;
static BT_STACK_NOINIT(recv_thread_stack, CONFIG_BT_RX_STACK_SIZE);
#if defined(CONFIG_INIT_STACKS)
static u32_t prio_ts;
static u32_t rx_ts;
#endif
#if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
static struct k_poll_signal hbuf_signal =
K_POLL_SIGNAL_INITIALIZER(hbuf_signal);
static sys_slist_t hbuf_pend;
static s32_t hbuf_count;
#endif
static void prio_recv_thread(void *p1, void *p2, void *p3)
{
while (1) {
struct radio_pdu_node_rx *node_rx;
u8_t num_cmplt;
u16_t handle;
while ((num_cmplt = radio_rx_get(&node_rx, &handle))) {
#if defined(CONFIG_BT_CONN)
struct net_buf *buf;
buf = bt_buf_get_rx(BT_BUF_EVT, K_FOREVER);
hci_num_cmplt_encode(buf, handle, num_cmplt);
BT_DBG("Num Complete: 0x%04x:%u", handle, num_cmplt);
bt_recv_prio(buf);
k_yield();
#endif
}
if (node_rx) {
radio_rx_dequeue();
BT_DBG("RX node enqueue");
k_fifo_put(&recv_fifo, node_rx);
continue;
}
BT_DBG("sem take...");
k_sem_take(&sem_prio_recv, K_FOREVER);
BT_DBG("sem taken");
#if defined(CONFIG_INIT_STACKS)
if (k_uptime_get_32() - prio_ts > K_SECONDS(5)) {
STACK_ANALYZE("prio recv thread stack",
prio_recv_thread_stack);
prio_ts = k_uptime_get_32();
}
#endif
}
}
static inline struct net_buf *encode_node(struct radio_pdu_node_rx *node_rx,
s8_t class)
{
struct net_buf *buf = NULL;
/* Check if we need to generate an HCI event or ACL data */
switch (class) {
case HCI_CLASS_EVT_DISCARDABLE:
case HCI_CLASS_EVT_REQUIRED:
case HCI_CLASS_EVT_CONNECTION:
if (class == HCI_CLASS_EVT_DISCARDABLE) {
buf = bt_buf_get_rx(BT_BUF_EVT, K_NO_WAIT);
} else {
buf = bt_buf_get_rx(BT_BUF_EVT, K_FOREVER);
}
if (buf) {
hci_evt_encode(node_rx, buf);
}
break;
#if defined(CONFIG_BT_CONN)
case HCI_CLASS_ACL_DATA:
/* generate ACL data */
buf = bt_buf_get_rx(BT_BUF_ACL_IN, K_FOREVER);
hci_acl_encode(node_rx, buf);
break;
#endif
default:
LL_ASSERT(0);
break;
}
radio_rx_fc_set(node_rx->hdr.handle, 0);
node_rx->hdr.onion.next = 0;
radio_rx_mem_release(&node_rx);
return buf;
}
static inline struct net_buf *process_node(struct radio_pdu_node_rx *node_rx)
{
s8_t class = hci_get_class(node_rx);
struct net_buf *buf = NULL;
#if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
if (hbuf_count != -1) {
bool pend = !sys_slist_is_empty(&hbuf_pend);
/* controller to host flow control enabled */
switch (class) {
case HCI_CLASS_EVT_DISCARDABLE:
case HCI_CLASS_EVT_REQUIRED:
break;
case HCI_CLASS_EVT_CONNECTION:
/* for conn-related events, only pend is relevant */
hbuf_count = 1;
/* fallthrough */
case HCI_CLASS_ACL_DATA:
if (pend || !hbuf_count) {
sys_slist_append(&hbuf_pend,
&node_rx->hdr.onion.node);
BT_DBG("FC: Queuing item: %d", class);
return NULL;
}
break;
default:
LL_ASSERT(0);
break;
}
}
#endif
/* process regular node from radio */
buf = encode_node(node_rx, class);
return buf;
}
#if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
static inline struct net_buf *process_hbuf(struct radio_pdu_node_rx *n)
{
/* shadow total count in case of preemption */
struct radio_pdu_node_rx *node_rx = NULL;
s32_t hbuf_total = hci_hbuf_total;
struct net_buf *buf = NULL;
sys_snode_t *node = NULL;
s8_t class;
int reset;
reset = atomic_test_and_clear_bit(&hci_state_mask, HCI_STATE_BIT_RESET);
if (reset) {
/* flush queue, no need to free, the LL has already done it */
sys_slist_init(&hbuf_pend);
}
if (hbuf_total <= 0) {
hbuf_count = -1;
return NULL;
}
/* available host buffers */
hbuf_count = hbuf_total - (hci_hbuf_sent - hci_hbuf_acked);
/* host acked ACL packets, try to dequeue from hbuf */
node = sys_slist_peek_head(&hbuf_pend);
if (!node) {
return NULL;
}
/* Return early if this iteration already has a node to process */
node_rx = NODE_RX(node);
class = hci_get_class(node_rx);
if (n) {
if (class == HCI_CLASS_EVT_CONNECTION ||
(class == HCI_CLASS_ACL_DATA && hbuf_count)) {
/* node to process later, schedule an iteration */
BT_DBG("FC: signalling");
k_poll_signal(&hbuf_signal, 0x0);
}
return NULL;
}
switch (class) {
case HCI_CLASS_EVT_CONNECTION:
BT_DBG("FC: dequeueing event");
(void) sys_slist_get(&hbuf_pend);
break;
case HCI_CLASS_ACL_DATA:
if (hbuf_count) {
BT_DBG("FC: dequeueing ACL data");
(void) sys_slist_get(&hbuf_pend);
} else {
/* no buffers, HCI will signal */
node = NULL;
}
break;
case HCI_CLASS_EVT_DISCARDABLE:
case HCI_CLASS_EVT_REQUIRED:
default:
LL_ASSERT(0);
break;
}
if (node) {
buf = encode_node(node_rx, class);
/* Update host buffers after encoding */
hbuf_count = hbuf_total - (hci_hbuf_sent - hci_hbuf_acked);
/* next node */
node = sys_slist_peek_head(&hbuf_pend);
if (node) {
node_rx = NODE_RX(node);
class = hci_get_class(node_rx);
if (class == HCI_CLASS_EVT_CONNECTION ||
(class == HCI_CLASS_ACL_DATA && hbuf_count)) {
/* more to process, schedule an
* iteration
*/
BT_DBG("FC: signalling");
k_poll_signal(&hbuf_signal, 0x0);
}
}
}
return buf;
}
#endif
static void recv_thread(void *p1, void *p2, void *p3)
{
#if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
/* @todo: check if the events structure really needs to be static */
static struct k_poll_event events[2] = {
K_POLL_EVENT_STATIC_INITIALIZER(K_POLL_TYPE_SIGNAL,
K_POLL_MODE_NOTIFY_ONLY,
&hbuf_signal, 0),
K_POLL_EVENT_STATIC_INITIALIZER(K_POLL_TYPE_FIFO_DATA_AVAILABLE,
K_POLL_MODE_NOTIFY_ONLY,
&recv_fifo, 0),
};
#endif
while (1) {
struct radio_pdu_node_rx *node_rx = NULL;
struct net_buf *buf = NULL;
BT_DBG("blocking");
#if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
int err;
err = k_poll(events, 2, K_FOREVER);
LL_ASSERT(err == 0);
if (events[0].state == K_POLL_STATE_SIGNALED) {
events[0].signal->signaled = 0;
} else if (events[1].state ==
K_POLL_STATE_FIFO_DATA_AVAILABLE) {
node_rx = k_fifo_get(events[1].fifo, 0);
}
events[0].state = K_POLL_STATE_NOT_READY;
events[1].state = K_POLL_STATE_NOT_READY;
/* process host buffers first if any */
buf = process_hbuf(node_rx);
#else
node_rx = k_fifo_get(&recv_fifo, K_FOREVER);
#endif
BT_DBG("unblocked");
if (node_rx && !buf) {
/* process regular node from radio */
buf = process_node(node_rx);
}
if (buf) {
if (buf->len) {
BT_DBG("Packet in: type:%u len:%u",
bt_buf_get_type(buf), buf->len);
bt_recv(buf);
} else {
net_buf_unref(buf);
}
}
k_yield();
#if defined(CONFIG_INIT_STACKS)
if (k_uptime_get_32() - rx_ts > K_SECONDS(5)) {
STACK_ANALYZE("recv thread stack", recv_thread_stack);
rx_ts = k_uptime_get_32();
}
#endif
}
}
static int cmd_handle(struct net_buf *buf)
{
struct net_buf *evt;
evt = hci_cmd_handle(buf);
if (evt) {
BT_DBG("Replying with event of %u bytes", evt->len);
bt_recv_prio(evt);
}
return 0;
}
#if defined(CONFIG_BT_CONN)
static int acl_handle(struct net_buf *buf)
{
struct net_buf *evt;
int err;
err = hci_acl_handle(buf, &evt);
if (evt) {
BT_DBG("Replying with event of %u bytes", evt->len);
bt_recv_prio(evt);
}
return err;
}
#endif /* CONFIG_BT_CONN */
static int hci_driver_send(struct net_buf *buf)
{
u8_t type;
int err;
BT_DBG("enter");
if (!buf->len) {
BT_ERR("Empty HCI packet");
return -EINVAL;
}
type = bt_buf_get_type(buf);
switch (type) {
#if defined(CONFIG_BT_CONN)
case BT_BUF_ACL_OUT:
err = acl_handle(buf);
break;
#endif /* CONFIG_BT_CONN */
case BT_BUF_CMD:
err = cmd_handle(buf);
break;
default:
BT_ERR("Unknown HCI type %u", type);
return -EINVAL;
}
if (!err) {
net_buf_unref(buf);
}
else
{
}
BT_DBG("exit: %d", err);
return err;
}
static int hci_driver_open(void)
{
u32_t err;
DEBUG_INIT();
k_sem_init(&sem_prio_recv, 0, UINT_MAX);
err = ll_init(&sem_prio_recv);
if (err) {
BT_ERR("LL initialization failed: %u", err);
return err;
}
#if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
hci_init(&hbuf_signal);
#else
hci_init(NULL);
#endif
k_fifo_init(&recv_fifo);
k_thread_create(&prio_recv_thread_data, prio_recv_thread_stack,
K_THREAD_STACK_SIZEOF(prio_recv_thread_stack),
prio_recv_thread, NULL, NULL, NULL,
K_PRIO_COOP(CONFIG_BT_CTLR_RX_PRIO), 0, K_NO_WAIT);
k_thread_create(&recv_thread_data, recv_thread_stack,
K_THREAD_STACK_SIZEOF(recv_thread_stack),
recv_thread, NULL, NULL, NULL,
K_PRIO_COOP(CONFIG_BT_RX_PRIO), 0, K_NO_WAIT);
BT_DBG("Success.");
return 0;
}
static const struct bt_hci_driver drv = {
.name = "Controller",
.bus = BT_HCI_DRIVER_BUS_VIRTUAL,
.open = hci_driver_open,
.send = hci_driver_send,
};
static int _hci_driver_init(struct device *unused)
{
ARG_UNUSED(unused);
bt_hci_driver_register(&drv);
return 0;
}
int hci_driver_init()
{
bt_hci_driver_register(&drv);
return 0;
}
SYS_INIT(_hci_driver_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE);

View file

@ -0,0 +1,46 @@
/*
* Copyright (c) 2016 Nordic Semiconductor ASA
* Copyright (c) 2016 Vinayak Kariappa Chettimada
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _HCI_CONTROLLER_H_
#define _HCI_CONTROLLER_H_
#if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
extern s32_t hci_hbuf_total;
extern u32_t hci_hbuf_sent;
extern u32_t hci_hbuf_acked;
extern atomic_t hci_state_mask;
#define HCI_STATE_BIT_RESET 0
#endif
#define HCI_CLASS_EVT_REQUIRED 0
#define HCI_CLASS_EVT_DISCARDABLE 1
#define HCI_CLASS_EVT_CONNECTION 2
#define HCI_CLASS_ACL_DATA 3
#if defined(CONFIG_SOC_FAMILY_NRF5)
#define BT_HCI_VS_HW_PLAT BT_HCI_VS_HW_PLAT_NORDIC
#if defined(CONFIG_SOC_SERIES_NRF51X)
#define BT_HCI_VS_HW_VAR BT_HCI_VS_HW_VAR_NORDIC_NRF51X;
#elif defined(CONFIG_SOC_SERIES_NRF52X)
#define BT_HCI_VS_HW_VAR BT_HCI_VS_HW_VAR_NORDIC_NRF52X;
#endif
#else
#define BT_HCI_VS_HW_PLAT 0
#define BT_HCI_VS_HW_VAR 0
#endif /* CONFIG_SOC_FAMILY_NRF5 */
void hci_init(struct k_poll_signal *signal_host_buf);
struct net_buf *hci_cmd_handle(struct net_buf *cmd);
void hci_evt_encode(struct radio_pdu_node_rx *node_rx, struct net_buf *buf);
s8_t hci_get_class(struct radio_pdu_node_rx *node_rx);
#if defined(CONFIG_BT_CONN)
int hci_acl_handle(struct net_buf *acl, struct net_buf **evt);
void hci_acl_encode(struct radio_pdu_node_rx *node_rx, struct net_buf *buf);
void hci_num_cmplt_encode(struct net_buf *buf, u16_t handle, u8_t num);
#endif
#endif /* _HCI_CONTROLLER_H_ */

View file

@ -0,0 +1,235 @@
/*
* Copyright (c) 2013-2014, Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/*
* DESCRIPTION
* Platform independent, commonly used macros and defines related to linker
* script.
*
* This file may be included by:
* - Linker script files: for linker section declarations
* - C files: for external declaration of address or size of linker section
* - Assembly files: for external declaration of address or size of linker
* section
*/
#ifndef _LINKERDEFS_H
#define _LINKERDEFS_H
#include <toolchain.h>
#include <linker/sections.h>
#include <misc/util.h>
/* include platform dependent linker-defs */
#ifdef CONFIG_X86
/* Nothing yet to include */
#elif defined(CONFIG_ARM)
/* Nothing yet to include */
#elif defined(CONFIG_ARC)
/* Nothing yet to include */
#elif defined(CONFIG_NIOS2)
/* Nothing yet to include */
#elif defined(CONFIG_RISCV32)
/* Nothing yet to include */
#elif defined(CONFIG_XTENSA)
/* Nothing yet to include */
#else
#error Arch not supported.
#endif
#ifdef _LINKER
/*
* Space for storing per device busy bitmap. Since we do not know beforehand
* the number of devices, we go through the below mechanism to allocate the
* required space.
*/
#ifdef CONFIG_DEVICE_POWER_MANAGEMENT
#define DEVICE_COUNT \
((__device_init_end - __device_init_start) / _DEVICE_STRUCT_SIZE)
#define DEV_BUSY_SZ (((DEVICE_COUNT + 31) / 32) * 4)
#define DEVICE_BUSY_BITFIELD() \
FILL(0x00) ; \
__device_busy_start = .; \
. = . + DEV_BUSY_SZ; \
__device_busy_end = .;
#else
#define DEVICE_BUSY_BITFIELD()
#endif
/*
* generate a symbol to mark the start of the device initialization objects for
* the specified level, then link all of those objects (sorted by priority);
* ensure the objects aren't discarded if there is no direct reference to them
*/
#define DEVICE_INIT_LEVEL(level) \
__device_##level##_start = .; \
KEEP(*(SORT(.init_##level[0-9]))); \
KEEP(*(SORT(.init_##level[1-9][0-9]))); \
/*
* link in device initialization objects for all devices that are automatically
* initialized by the kernel; the objects are sorted in the order they will be
* initialized (i.e. ordered by level, sorted by priority within a level)
*/
#define DEVICE_INIT_SECTIONS() \
__device_init_start = .; \
DEVICE_INIT_LEVEL(PRE_KERNEL_1) \
DEVICE_INIT_LEVEL(PRE_KERNEL_2) \
DEVICE_INIT_LEVEL(POST_KERNEL) \
DEVICE_INIT_LEVEL(APPLICATION) \
__device_init_end = .; \
DEVICE_BUSY_BITFIELD() \
/* define a section for undefined device initialization levels */
#define DEVICE_INIT_UNDEFINED_SECTION() \
KEEP(*(SORT(.init_[_A-Z0-9]*))) \
/*
* link in shell initialization objects for all modules that use shell and
* their shell commands are automatically initialized by the kernel.
*/
#define SHELL_INIT_SECTIONS() \
__shell_cmd_start = .; \
KEEP(*(".shell_*")); \
__shell_cmd_end = .;
#ifdef CONFIG_APPLICATION_MEMORY
#ifndef NUM_KERNEL_OBJECT_FILES
#error "Expected NUM_KERNEL_OBJECT_FILES to be defined"
#elif NUM_KERNEL_OBJECT_FILES > 19
#error "Max supported kernel objects is 19."
/* TODO: Using the preprocessor to do this was a mistake. Rewrite to
scale better. e.g. by aggregating the kernel objects into two
archives like KBuild did.*/
#endif
#define X(i, j) KERNEL_OBJECT_FILE_##i (j)
#define Y(i, j) *KERNEL_OBJECT_FILE_##i
#define KERNEL_INPUT_SECTION(sect) \
UTIL_LISTIFY(NUM_KERNEL_OBJECT_FILES, X, sect)
#define APP_INPUT_SECTION(sect) \
*(EXCLUDE_FILE (UTIL_LISTIFY(NUM_KERNEL_OBJECT_FILES, Y, ~)) sect)
#else
#define KERNEL_INPUT_SECTION(sect) *(sect)
#define APP_INPUT_SECTION(sect) *(sect)
#endif
#ifdef CONFIG_X86 /* LINKER FILES: defines used by linker script */
/* Should be moved to linker-common-defs.h */
#if defined(CONFIG_XIP)
#define ROMABLE_REGION ROM
#else
#define ROMABLE_REGION RAM
#endif
#endif
/*
* If image is loaded via kexec Linux system call, then program
* headers need to be page aligned.
* This can be done by section page aligning.
*/
#ifdef CONFIG_BOOTLOADER_KEXEC
#define KEXEC_PGALIGN_PAD(x) . = ALIGN(x);
#else
#define KEXEC_PGALIGN_PAD(x)
#endif
#elif defined(_ASMLANGUAGE)
/* Assembly FILES: declaration defined by the linker script */
GDATA(__bss_start)
GDATA(__bss_num_words)
#ifdef CONFIG_XIP
GDATA(__data_rom_start)
GDATA(__data_ram_start)
GDATA(__data_num_words)
#endif
#else /* ! _ASMLANGUAGE */
#include <zephyr/types.h>
#ifdef CONFIG_APPLICATION_MEMORY
/* Memory owned by the application. Start and end will be aligned for memory
* management/protection hardware for the target architecture.
* The policy for this memory will be to configure all of it as user thread
* accessible. It consists of all non-kernel globals.
*/
extern char __app_ram_start[];
extern char __app_ram_end[];
extern char __app_ram_size[];
#endif
/* Memory owned by the kernel. Start and end will be aligned for memory
* management/protection hardware for the target architecture..
*
* Consists of all kernel-side globals, all kernel objects, all thread stacks,
* and all currently unused RAM. If CONFIG_APPLICATION_MEMORY is not enabled,
* has all globals, not just kernel side.
*
* Except for the stack of the currently executing thread, none of this memory
* is normally accessible to user threads unless specifically granted at
* runtime.
*/
extern char __kernel_ram_start[];
extern char __kernel_ram_end[];
extern char __kernel_ram_size[];
/* Used by _bss_zero or arch-specific implementation */
extern char __bss_start[];
extern char __bss_end[];
#ifdef CONFIG_APPLICATION_MEMORY
extern char __app_bss_start[];
extern char __app_bss_end[];
#endif
/* Used by _data_copy() or arch-specific implementation */
#ifdef CONFIG_XIP
extern char __data_rom_start[];
extern char __data_ram_start[];
extern char __data_ram_end[];
#ifdef CONFIG_APPLICATION_MEMORY
extern char __app_data_rom_start[];
extern char __app_data_ram_start[];
extern char __app_data_ram_end[];
#endif /* CONFIG_APPLICATION_MEMORY */
#endif /* CONFIG_XIP */
/* Includes text and rodata */
extern char _image_rom_start[];
extern char _image_rom_end[];
extern char _image_rom_size[];
/* datas, bss, noinit */
extern char _image_ram_start[];
extern char _image_ram_end[];
extern char _image_text_start[];
extern char _image_text_end[];
extern char _image_rodata_start[];
extern char _image_rodata_end[];
extern char _vector_start[];
extern char _vector_end[];
/* end address of image, used by newlib for the heap */
extern char _end[];
#endif /* ! _ASMLANGUAGE */
#endif /* _LINKERDEFS_H */

View file

@ -0,0 +1,98 @@
/*
* Copyright (c) 2016 Nordic Semiconductor ASA
* Copyright (c) 2016 Vinayak Kariappa Chettimada
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _LL_H_
#define _LL_H_
int ll_init(struct k_sem *sem_rx);
void ll_reset(void);
void ll_radio_state_abort(void);
u32_t ll_radio_state_is_idle(void);
u8_t *ll_addr_get(u8_t addr_type, u8_t *p_bdaddr);
void ll_addr_set(u8_t addr_type, u8_t const *const p_bdaddr);
#if defined(CONFIG_BT_CTLR_ADV_EXT)
u32_t ll_adv_params_set(u8_t handle, u16_t evt_prop, u32_t interval,
u8_t adv_type, u8_t own_addr_type,
u8_t direct_addr_type, u8_t const *const direct_addr,
u8_t chan_map, u8_t filter_policy, u8_t *tx_pwr,
u8_t phy_p, u8_t skip, u8_t phy_s, u8_t sid, u8_t sreq);
#else /* !CONFIG_BT_CTLR_ADV_EXT */
u32_t ll_adv_params_set(u16_t interval, u8_t adv_type,
u8_t own_addr_type, u8_t direct_addr_type,
u8_t const *const direct_addr, u8_t chan_map,
u8_t filter_policy);
#endif /* !CONFIG_BT_CTLR_ADV_EXT */
void ll_adv_data_set(u8_t len, u8_t const *const p_data);
void ll_scan_data_set(u8_t len, u8_t const *const p_data);
u32_t ll_adv_enable(u8_t enable);
u32_t ll_scan_params_set(u8_t type, u16_t interval, u16_t window,
u8_t own_addr_type, u8_t filter_policy);
u32_t ll_scan_enable(u8_t enable);
u32_t ll_wl_size_get(void);
u32_t ll_wl_clear(void);
u32_t ll_wl_add(bt_addr_le_t *addr);
u32_t ll_wl_remove(bt_addr_le_t *addr);
void ll_rl_id_addr_get(u8_t rl_idx, u8_t *id_addr_type, u8_t *id_addr);
u32_t ll_rl_size_get(void);
u32_t ll_rl_clear(void);
u32_t ll_rl_add(bt_addr_le_t *id_addr, const u8_t pirk[16],
const u8_t lirk[16]);
u32_t ll_rl_remove(bt_addr_le_t *id_addr);
void ll_rl_crpa_set(u8_t id_addr_type, u8_t *id_addr, u8_t rl_idx, u8_t *crpa);
u32_t ll_rl_crpa_get(bt_addr_le_t *id_addr, bt_addr_t *crpa);
u32_t ll_rl_lrpa_get(bt_addr_le_t *id_addr, bt_addr_t *lrpa);
u32_t ll_rl_enable(u8_t enable);
void ll_rl_timeout_set(u16_t timeout);
u32_t ll_priv_mode_set(bt_addr_le_t *id_addr, u8_t mode);
u32_t ll_create_connection(u16_t scan_interval, u16_t scan_window,
u8_t filter_policy, u8_t peer_addr_type,
u8_t *p_peer_addr, u8_t own_addr_type,
u16_t interval, u16_t latency,
u16_t timeout);
u32_t ll_connect_disable(void);
u32_t ll_conn_update(u16_t handle, u8_t cmd, u8_t status,
u16_t interval, u16_t latency,
u16_t timeout);
u32_t ll_chm_update(u8_t *chm);
u32_t ll_chm_get(u16_t handle, u8_t *chm);
u32_t ll_enc_req_send(u16_t handle, u8_t *rand, u8_t *ediv,
u8_t *ltk);
u32_t ll_start_enc_req_send(u16_t handle, u8_t err_code,
u8_t const *const ltk);
u32_t ll_feature_req_send(u16_t handle);
u32_t ll_version_ind_send(u16_t handle);
u32_t ll_terminate_ind_send(u16_t handle, u8_t reason);
void ll_timeslice_ticker_id_get(u8_t * const instance_index, u8_t * const user_id);
u32_t ll_rssi_get(u16_t handle, u8_t *rssi);
u32_t ll_tx_power_level_get(u16_t handle, u8_t type, s8_t *tx_power_level);
void ll_tx_power_get(s8_t *min, s8_t *max);
#if defined(CONFIG_BT_CTLR_LE_PING)
u32_t ll_apto_get(u16_t handle, u16_t *apto);
u32_t ll_apto_set(u16_t handle, u16_t apto);
#endif /* CONFIG_BT_CTLR_LE_PING */
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
u32_t ll_length_req_send(u16_t handle, u16_t tx_octets, u16_t tx_time);
void ll_length_default_get(u16_t *max_tx_octets, u16_t *max_tx_time);
u32_t ll_length_default_set(u16_t max_tx_octets, u16_t max_tx_time);
void ll_length_max_get(u16_t *max_tx_octets, u16_t *max_tx_time,
u16_t *max_rx_octets, u16_t *max_rx_time);
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
#if defined(CONFIG_BT_CTLR_PHY)
u32_t ll_phy_get(u16_t handle, u8_t *tx, u8_t *rx);
u32_t ll_phy_default_set(u8_t tx, u8_t rx);
u32_t ll_phy_req_send(u16_t handle, u8_t tx, u8_t flags, u8_t rx);
#endif /* CONFIG_BT_CTLR_PHY */
#endif /* _LL_H_ */

View file

@ -0,0 +1,59 @@
/*
* Copyright (c) 2016-2017 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <soc.h>
#include "zephyr.h"
#define BT_DBG_ENABLED IS_ENABLED(CONFIG_BT_DEBUG_HCI_DRIVER)
#include "common/log.h"
#include "hal/cpu.h"
#include "hal/rand.h"
#include "hal/ecb.h"
#include "kport.h"
K_MUTEX_DEFINE(mutex_rand);
struct k_mutex mutex_rand;
int bt_rand_c(void *buf, size_t len)
{
while (len) {
k_mutex_lock(&mutex_rand, K_FOREVER);
len = rand_get(len, buf);
k_mutex_unlock(&mutex_rand);
if (len) {
cpu_sleep();
}
}
return 0;
}
int bt_encrypt_le_c(const u8_t key[16], const u8_t plaintext[16],
u8_t enc_data[16])
{
BT_DBG("key %s plaintext %s", bt_hex(key, 16), bt_hex(plaintext, 16));
ecb_encrypt(key, plaintext, enc_data, NULL);
BT_DBG("enc_data %s", bt_hex(enc_data, 16));
return 0;
}
int bt_encrypt_be_c(const u8_t key[16], const u8_t plaintext[16],
u8_t enc_data[16])
{
BT_DBG("key %s plaintext %s", bt_hex(key, 16), bt_hex(plaintext, 16));
ecb_encrypt_be(key, plaintext, enc_data);
BT_DBG("enc_data %s", bt_hex(enc_data, 16));
return 0;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,384 @@
/*
* Copyright (c) 2016 Nordic Semiconductor ASA
* Copyright (c) 2016 Vinayak Kariappa Chettimada
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _CTRL_H_
#define _CTRL_H_
/*****************************************************************************
* Zephyr Kconfig defined
****************************************************************************/
#ifdef CONFIG_BT_MAX_CONN
#define RADIO_CONNECTION_CONTEXT_MAX CONFIG_BT_MAX_CONN
#else
#define RADIO_CONNECTION_CONTEXT_MAX 0
#endif
#ifdef CONFIG_BT_CTLR_RX_BUFFERS
#define RADIO_PACKET_COUNT_RX_MAX CONFIG_BT_CTLR_RX_BUFFERS
#endif
#ifdef CONFIG_BT_CTLR_TX_BUFFERS
#define RADIO_PACKET_COUNT_TX_MAX CONFIG_BT_CTLR_TX_BUFFERS
#endif
#ifdef CONFIG_BT_CTLR_TX_BUFFER_SIZE
#define RADIO_PACKET_TX_DATA_SIZE CONFIG_BT_CTLR_TX_BUFFER_SIZE
#endif
#define BIT64(n) (1ULL << (n))
#if defined(CONFIG_BT_CTLR_LE_ENC)
#define RADIO_BLE_FEAT_BIT_ENC BIT64(BT_LE_FEAT_BIT_ENC)
#else /* !CONFIG_BT_CTLR_LE_ENC */
#define RADIO_BLE_FEAT_BIT_ENC 0
#endif /* !CONFIG_BT_CTLR_LE_ENC */
#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
#define RADIO_BLE_FEAT_BIT_CONN_PARAM_REQ BIT64(BT_LE_FEAT_BIT_CONN_PARAM_REQ)
#else /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
#define RADIO_BLE_FEAT_BIT_CONN_PARAM_REQ 0
#endif /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
#if defined(CONFIG_BT_CTLR_LE_PING)
#define RADIO_BLE_FEAT_BIT_PING BIT64(BT_LE_FEAT_BIT_PING)
#else /* !CONFIG_BT_CTLR_LE_PING */
#define RADIO_BLE_FEAT_BIT_PING 0
#endif /* !CONFIG_BT_CTLR_LE_PING */
#if defined(CONFIG_BT_CTLR_DATA_LENGTH_MAX)
#define RADIO_BLE_FEAT_BIT_DLE BIT64(BT_LE_FEAT_BIT_DLE)
#define RADIO_LL_LENGTH_OCTETS_RX_MAX CONFIG_BT_CTLR_DATA_LENGTH_MAX
#else
#define RADIO_BLE_FEAT_BIT_DLE 0
#define RADIO_LL_LENGTH_OCTETS_RX_MAX 27
#endif /* CONFIG_BT_CTLR_DATA_LENGTH_MAX */
#if defined(CONFIG_BT_CTLR_PRIVACY)
#define RADIO_BLE_FEAT_BIT_PRIVACY BIT64(BT_LE_FEAT_BIT_PRIVACY)
#else /* !CONFIG_BT_CTLR_PRIVACY */
#define RADIO_BLE_FEAT_BIT_PRIVACY 0
#endif /* !CONFIG_BT_CTLR_PRIVACY */
#if defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
#define RADIO_BLE_FEAT_BIT_EXT_SCAN BIT64(BT_LE_FEAT_BIT_EXT_SCAN)
#else /* !CONFIG_BT_CTLR_EXT_SCAN_FP */
#define RADIO_BLE_FEAT_BIT_EXT_SCAN 0
#endif /* !CONFIG_BT_CTLR_EXT_SCAN_FP */
#if defined(CONFIG_BT_CTLR_CHAN_SEL_2)
#define RADIO_BLE_FEAT_BIT_CHAN_SEL_2 BIT64(BT_LE_FEAT_BIT_CHAN_SEL_ALGO_2)
#else /* !CONFIG_BT_CTLR_CHAN_SEL_2 */
#define RADIO_BLE_FEAT_BIT_CHAN_SEL_2 0
#endif /* !CONFIG_BT_CTLR_CHAN_SEL_2 */
#if defined(CONFIG_BT_CTLR_MIN_USED_CHAN)
#define RADIO_BLE_FEAT_BIT_MIN_USED_CHAN \
BIT64(BT_LE_FEAT_BIT_MIN_USED_CHAN_PROC)
#else /* !CONFIG_BT_CTLR_MIN_USED_CHAN */
#define RADIO_BLE_FEAT_BIT_MIN_USED_CHAN 0
#endif /* !CONFIG_BT_CTLR_MIN_USED_CHAN */
#if defined(CONFIG_BT_CTLR_PHY_2M)
#define RADIO_BLE_FEAT_BIT_PHY_2M BIT64(BT_LE_FEAT_BIT_PHY_2M)
#else /* !CONFIG_BT_CTLR_PHY_2M */
#define RADIO_BLE_FEAT_BIT_PHY_2M 0
#endif /* !CONFIG_BT_CTLR_PHY_2M */
#if defined(CONFIG_BT_CTLR_PHY_CODED)
#define RADIO_BLE_FEAT_BIT_PHY_CODED BIT64(BT_LE_FEAT_BIT_PHY_CODED)
#else /* !CONFIG_BT_CTLR_PHY_CODED */
#define RADIO_BLE_FEAT_BIT_PHY_CODED 0
#endif /* !CONFIG_BT_CTLR_PHY_CODED */
/*****************************************************************************
* Timer Resources (Controller defined)
****************************************************************************/
#define RADIO_TICKER_ID_EVENT 0
#define RADIO_TICKER_ID_MARKER_0 1
#define RADIO_TICKER_ID_PRE_EMPT 2
#define RADIO_TICKER_ID_ADV_STOP 3
#define RADIO_TICKER_ID_SCAN_STOP 4
#define RADIO_TICKER_ID_ADV 5
#define RADIO_TICKER_ID_SCAN 6
#define RADIO_TICKER_ID_FIRST_CONNECTION 7
#define RADIO_TICKER_INSTANCE_ID_RADIO 0
#define RADIO_TICKER_INSTANCE_ID_APP 1
#define RADIO_TICKER_USERS 3
#define RADIO_TICKER_USER_ID_WORKER MAYFLY_CALL_ID_0
#define RADIO_TICKER_USER_ID_JOB MAYFLY_CALL_ID_1
#define RADIO_TICKER_USER_ID_APP MAYFLY_CALL_ID_PROGRAM
#define RADIO_TICKER_USER_WORKER_OPS (7 + 1)
#define RADIO_TICKER_USER_JOB_OPS (2 + 1)
#define RADIO_TICKER_USER_APP_OPS (1 + 1)
#define RADIO_TICKER_USER_OPS (RADIO_TICKER_USER_WORKER_OPS \
+ RADIO_TICKER_USER_JOB_OPS \
+ RADIO_TICKER_USER_APP_OPS \
)
#define RADIO_TICKER_NODES (RADIO_TICKER_ID_FIRST_CONNECTION \
+ RADIO_CONNECTION_CONTEXT_MAX \
)
/*****************************************************************************
* Controller Interface Defines
****************************************************************************/
#define RADIO_BLE_VERSION_NUMBER BT_HCI_VERSION_5_0
#if defined(CONFIG_BT_CTLR_COMPANY_ID)
#define RADIO_BLE_COMPANY_ID CONFIG_BT_CTLR_COMPANY_ID
#else
#define RADIO_BLE_COMPANY_ID 0xFFFF
#endif
#if defined(CONFIG_BT_CTLR_SUBVERSION_NUMBER)
#define RADIO_BLE_SUB_VERSION_NUMBER \
CONFIG_BT_CTLR_SUBVERSION_NUMBER
#else
#define RADIO_BLE_SUB_VERSION_NUMBER 0xFFFF
#endif
#define RADIO_BLE_FEAT_BIT_MASK 0x1FFFF
#define RADIO_BLE_FEAT_BIT_MASK_VALID 0x1CF2F
#define RADIO_BLE_FEAT (RADIO_BLE_FEAT_BIT_ENC | \
RADIO_BLE_FEAT_BIT_CONN_PARAM_REQ | \
BIT(BT_LE_FEAT_BIT_EXT_REJ_IND) | \
BIT(BT_LE_FEAT_BIT_SLAVE_FEAT_REQ) | \
RADIO_BLE_FEAT_BIT_PING | \
RADIO_BLE_FEAT_BIT_DLE | \
RADIO_BLE_FEAT_BIT_PRIVACY | \
RADIO_BLE_FEAT_BIT_EXT_SCAN | \
RADIO_BLE_FEAT_BIT_PHY_2M | \
RADIO_BLE_FEAT_BIT_PHY_CODED | \
RADIO_BLE_FEAT_BIT_CHAN_SEL_2 | \
RADIO_BLE_FEAT_BIT_MIN_USED_CHAN)
#if defined(CONFIG_BT_CTLR_WORKER_PRIO)
#define RADIO_TICKER_USER_ID_WORKER_PRIO CONFIG_BT_CTLR_WORKER_PRIO
#else
#define RADIO_TICKER_USER_ID_WORKER_PRIO 0
#endif
#if defined(CONFIG_BT_CTLR_JOB_PRIO)
#define RADIO_TICKER_USER_ID_JOB_PRIO CONFIG_BT_CTLR_JOB_PRIO
#else
#define RADIO_TICKER_USER_ID_JOB_PRIO 0
#endif
/*****************************************************************************
* Controller Reference Defines (compile time override-able)
****************************************************************************/
/* Minimum LL Payload support (Dont change). */
#define RADIO_LL_LENGTH_OCTETS_RX_MIN 27
/* Maximum LL Payload support (27 to 251). */
#ifndef RADIO_LL_LENGTH_OCTETS_RX_MAX
#define RADIO_LL_LENGTH_OCTETS_RX_MAX 251
#endif
/* Implementation default L2CAP MTU */
#ifndef RADIO_L2CAP_MTU_MAX
#define RADIO_L2CAP_MTU_MAX (RADIO_LL_LENGTH_OCTETS_RX_MAX - 4)
#endif
/* Maximise L2CAP MTU to LL data PDU size */
#if (RADIO_L2CAP_MTU_MAX < (RADIO_LL_LENGTH_OCTETS_RX_MAX - 4))
#undef RADIO_L2CAP_MTU_MAX
#define RADIO_L2CAP_MTU_MAX (RADIO_LL_LENGTH_OCTETS_RX_MAX - 4)
#endif
/* Maximum LL PDU Receive pool size. */
#ifndef RADIO_PACKET_COUNT_RX_MAX
#define RADIO_PACKET_COUNT_RX ((RADIO_L2CAP_MTU_MAX + \
RADIO_LL_LENGTH_OCTETS_RX_MAX \
+ 3) \
/ \
RADIO_LL_LENGTH_OCTETS_RX_MAX \
)
#define RADIO_PACKET_COUNT_RX_MAX (RADIO_PACKET_COUNT_RX + \
((RADIO_CONNECTION_CONTEXT_MAX - 1) * \
(RADIO_PACKET_COUNT_RX - 1)) \
)
#endif /* RADIO_PACKET_COUNT_RX_MAX */
/* Maximum LL PDU Transmit pool size and application tx count. */
#ifndef RADIO_PACKET_COUNT_TX_MAX
#define RADIO_PACKET_COUNT_APP_TX_MAX (RADIO_CONNECTION_CONTEXT_MAX)
#define RADIO_PACKET_COUNT_TX_MAX (RADIO_PACKET_COUNT_RX_MAX + \
RADIO_PACKET_COUNT_APP_TX_MAX \
)
#else
#define RADIO_PACKET_COUNT_APP_TX_MAX (RADIO_PACKET_COUNT_TX_MAX)
#endif
/* Tx Data Size */
#if !defined(RADIO_PACKET_TX_DATA_SIZE) || \
(RADIO_PACKET_TX_DATA_SIZE < RADIO_LL_LENGTH_OCTETS_RX_MIN)
#define RADIO_PACKET_TX_DATA_SIZE RADIO_LL_LENGTH_OCTETS_RX_MIN
#endif
/*****************************************************************************
* Controller Interface Structures
****************************************************************************/
struct radio_adv_data {
u8_t data[DOUBLE_BUFFER_SIZE][PDU_AC_SIZE_MAX];
u8_t first;
u8_t last;
};
struct radio_pdu_node_tx {
void *next;
u8_t pdu_data[1];
};
enum radio_pdu_node_rx_type {
NODE_RX_TYPE_NONE,
NODE_RX_TYPE_DC_PDU,
NODE_RX_TYPE_REPORT,
#if defined(CONFIG_BT_CTLR_ADV_EXT)
NODE_RX_TYPE_EXT_1M_REPORT,
NODE_RX_TYPE_EXT_CODED_REPORT,
#endif /* CONFIG_BT_CTLR_ADV_EXT */
#if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
NODE_RX_TYPE_SCAN_REQ,
#endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
NODE_RX_TYPE_CONNECTION,
NODE_RX_TYPE_TERMINATE,
NODE_RX_TYPE_CONN_UPDATE,
NODE_RX_TYPE_ENC_REFRESH,
#if defined(CONFIG_BT_CTLR_LE_PING)
NODE_RX_TYPE_APTO,
#endif /* CONFIG_BT_CTLR_LE_PING */
NODE_RX_TYPE_CHAN_SEL_ALGO,
#if defined(CONFIG_BT_CTLR_PHY)
NODE_RX_TYPE_PHY_UPDATE,
#endif /* CONFIG_BT_CTLR_PHY */
#if defined(CONFIG_BT_CTLR_CONN_RSSI)
NODE_RX_TYPE_RSSI,
#endif /* CONFIG_BT_CTLR_CONN_RSSI */
#if defined(CONFIG_BT_CTLR_PROFILE_ISR)
NODE_RX_TYPE_PROFILE,
#endif /* CONFIG_BT_CTLR_PROFILE_ISR */
#if defined(CONFIG_BT_CTLR_ADV_INDICATION)
NODE_RX_TYPE_ADV_INDICATION,
#endif /* CONFIG_BT_CTLR_ADV_INDICATION */
};
struct radio_le_conn_cmplt {
u8_t status;
u8_t role;
u8_t peer_addr_type;
u8_t peer_addr[BDADDR_SIZE];
#if defined(CONFIG_BT_CTLR_PRIVACY)
u8_t peer_rpa[BDADDR_SIZE];
u8_t own_addr_type;
u8_t own_addr[BDADDR_SIZE];
#endif /* CONFIG_BT_CTLR_PRIVACY */
u16_t interval;
u16_t latency;
u16_t timeout;
u8_t mca;
} __packed;
struct radio_le_conn_update_cmplt {
u8_t status;
u16_t interval;
u16_t latency;
u16_t timeout;
} __packed;
struct radio_le_chan_sel_algo {
u8_t chan_sel_algo;
} __packed;
struct radio_le_phy_upd_cmplt {
u8_t status;
u8_t tx;
u8_t rx;
} __packed;
struct radio_pdu_node_rx_hdr {
union {
sys_snode_t node; /* used by slist */
void *next; /* used also by k_fifo once pulled */
void *link;
u8_t packet_release_last;
} onion;
enum radio_pdu_node_rx_type type;
u16_t handle;
};
struct radio_pdu_node_rx {
struct radio_pdu_node_rx_hdr hdr;
u8_t pdu_data[1];
};
/*****************************************************************************
* Controller Interface Functions
****************************************************************************/
/* Downstream */
u32_t radio_init(void *hf_clock, u8_t sca, u8_t connection_count_max,
u8_t rx_count_max, u8_t tx_count_max,
u16_t packet_data_octets_max,
u16_t packet_tx_data_size, u8_t *mem_radio,
u16_t mem_size);
struct device *radio_hf_clock_get(void);
void radio_ticks_active_to_start_set(u32_t ticks_active_to_start);
/* Downstream - Advertiser */
struct radio_adv_data *radio_adv_data_get(void);
struct radio_adv_data *radio_scan_data_get(void);
#if defined(CONFIG_BT_CTLR_ADV_EXT)
u32_t radio_adv_enable(u8_t phy_p, u16_t interval, u8_t chan_map,
u8_t filter_policy, u8_t rl_idx);
#else /* !CONFIG_BT_CTLR_ADV_EXT */
u32_t radio_adv_enable(u16_t interval, u8_t chan_map, u8_t filter_policy,
u8_t rl_idx);
#endif /* !CONFIG_BT_CTLR_ADV_EXT */
u32_t radio_adv_disable(void);
u32_t radio_adv_is_enabled(void);
u32_t radio_adv_filter_pol_get(void);
/* Downstream - Scanner */
u32_t radio_scan_enable(u8_t type, u8_t init_addr_type, u8_t *init_addr,
u16_t interval, u16_t window, u8_t filter_policy,
u8_t rpa_gen, u8_t rl_idx);
u32_t radio_scan_disable(void);
u32_t radio_scan_is_enabled(void);
u32_t radio_scan_filter_pol_get(void);
u32_t radio_connect_enable(u8_t adv_addr_type, u8_t *adv_addr,
u16_t interval, u16_t latency,
u16_t timeout);
/* Upstream */
u8_t radio_rx_get(struct radio_pdu_node_rx **radio_pdu_node_rx,
u16_t *handle);
void radio_rx_dequeue(void);
void radio_rx_mem_release(struct radio_pdu_node_rx **radio_pdu_node_rx);
u8_t radio_rx_fc_set(u16_t handle, u8_t fc);
u8_t radio_rx_fc_get(u16_t *handle);
struct radio_pdu_node_tx *radio_tx_mem_acquire(void);
void radio_tx_mem_release(struct radio_pdu_node_tx *pdu_data_node_tx);
u32_t radio_tx_mem_enqueue(u16_t handle,
struct radio_pdu_node_tx *pdu_data_node_tx);
/* Callbacks */
extern void radio_active_callback(u8_t active);
extern void radio_event_callback(void);
extern void ll_adv_scan_state_cb(u8_t bm);
#endif

View file

@ -0,0 +1,352 @@
/*
* Copyright (c) 2016 Nordic Semiconductor ASA
* Copyright (c) 2016 Vinayak Kariappa Chettimada
*
* SPDX-License-Identifier: Apache-2.0
*/
enum llcp {
LLCP_NONE,
LLCP_CONN_UPD,
LLCP_CHAN_MAP,
#if defined(CONFIG_BT_CTLR_LE_ENC)
LLCP_ENCRYPTION,
#endif /* CONFIG_BT_CTLR_LE_ENC */
LLCP_FEATURE_EXCHANGE,
LLCP_VERSION_EXCHANGE,
/* LLCP_TERMINATE, */
LLCP_CONNECTION_PARAM_REQ,
#if defined(CONFIG_BT_CTLR_LE_PING)
LLCP_PING,
#endif /* CONFIG_BT_CTLR_LE_PING */
#if defined(CONFIG_BT_CTLR_PHY)
LLCP_PHY_UPD,
#endif /* CONFIG_BT_CTLR_PHY */
};
struct shdr {
u32_t ticks_xtal_to_start;
u32_t ticks_active_to_start;
u32_t ticks_preempt_to_start;
u32_t ticks_slot;
};
struct connection {
struct shdr hdr;
u8_t access_addr[4];
u8_t crc_init[3];
u8_t data_chan_map[5];
u8_t chm_update;
u8_t data_chan_count:6;
u8_t data_chan_sel:1;
u8_t role:1;
union {
struct {
u8_t data_chan_hop;
u8_t data_chan_use;
};
u16_t data_chan_id;
};
u16_t handle;
u16_t event_counter;
u16_t conn_interval;
u16_t latency;
u16_t latency_prepare;
u16_t latency_event;
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
u16_t default_tx_octets;
u16_t max_tx_octets;
u16_t max_rx_octets;
#if defined(CONFIG_BT_CTLR_PHY)
u16_t default_tx_time;
u16_t max_tx_time;
u16_t max_rx_time;
#endif /* CONFIG_BT_CTLR_PHY */
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
#if defined(CONFIG_BT_CTLR_PHY)
u8_t phy_pref_tx:3;
u8_t phy_tx:3;
u8_t phy_pref_flags:1;
u8_t phy_flags:1;
u8_t phy_tx_time:3;
u8_t phy_pref_rx:3;
u8_t phy_rx:3;
#endif /* CONFIG_BT_CTLR_PHY */
u16_t connect_expire;
u16_t supervision_reload;
u16_t supervision_expire;
u16_t procedure_reload;
u16_t procedure_expire;
#if defined(CONFIG_BT_CTLR_LE_PING)
u16_t appto_reload;
u16_t appto_expire;
u16_t apto_reload;
u16_t apto_expire;
#endif /* CONFIG_BT_CTLR_LE_PING */
union {
struct {
u8_t reserved:5;
u8_t fex_valid:1;
} common;
struct {
u8_t terminate_ack:1;
u8_t rfu:4;
u8_t fex_valid:1;
} master;
struct {
u8_t latency_enabled:1;
u8_t latency_cancel:1;
u8_t sca:3;
u8_t fex_valid:1;
u32_t window_widening_periodic_us;
u32_t window_widening_max_us;
u32_t window_widening_prepare_us;
u32_t window_widening_event_us;
u32_t window_size_prepare_us;
u32_t window_size_event_us;
u32_t force;
u32_t ticks_to_offset;
} slave;
};
u8_t llcp_req;
u8_t llcp_ack;
enum llcp llcp_type;
union {
struct {
enum {
LLCP_CUI_STATE_INPROG,
LLCP_CUI_STATE_USE,
LLCP_CUI_STATE_SELECT
} state:2 __packed;
u8_t is_internal:1;
u16_t interval;
u16_t latency;
u16_t timeout;
u16_t instant;
u32_t win_offset_us;
u8_t win_size;
u16_t *pdu_win_offset;
u32_t ticks_anchor;
} conn_upd;
struct {
u8_t initiate;
u8_t chm[5];
u16_t instant;
} chan_map;
#if defined(CONFIG_BT_CTLR_PHY)
struct {
u8_t initiate:1;
u8_t cmd:1;
u8_t tx:3;
u8_t rx:3;
u16_t instant;
} phy_upd_ind;
#endif /* CONFIG_BT_CTLR_PHY */
struct {
u8_t initiate;
u8_t error_code;
u8_t rand[8];
u8_t ediv[2];
u8_t ltk[16];
u8_t skd[16];
} encryption;
} llcp;
u32_t llcp_features;
struct {
u8_t tx:1;
u8_t rx:1;
u8_t version_number;
u16_t company_id;
u16_t sub_version_number;
} llcp_version;
struct {
u8_t req;
u8_t ack;
u8_t reason_own;
u8_t reason_peer;
struct {
struct radio_pdu_node_rx_hdr hdr;
u8_t reason;
} radio_pdu_node_rx;
} llcp_terminate;
#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
struct {
u8_t req;
u8_t ack;
enum {
LLCP_CPR_STATE_REQ,
LLCP_CPR_STATE_RSP,
LLCP_CPR_STATE_APP_REQ,
LLCP_CPR_STATE_APP_WAIT,
LLCP_CPR_STATE_RSP_WAIT,
LLCP_CPR_STATE_UPD
} state:3 __packed;
u8_t cmd:1;
u8_t status;
u16_t interval;
u16_t latency;
u16_t timeout;
u8_t preferred_periodicity;
u16_t reference_conn_event_count;
u16_t offset0;
u16_t offset1;
u16_t offset2;
u16_t offset3;
u16_t offset4;
u16_t offset5;
u16_t *pdu_win_offset0;
u32_t ticks_ref;
u32_t ticks_to_offset_next;
} llcp_conn_param;
#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
struct {
u8_t req;
u8_t ack;
u8_t state:2;
#define LLCP_LENGTH_STATE_REQ 0
#define LLCP_LENGTH_STATE_ACK_WAIT 1
#define LLCP_LENGTH_STATE_RSP_WAIT 2
#define LLCP_LENGTH_STATE_RESIZE 3
u16_t rx_octets;
u16_t tx_octets;
#if defined(CONFIG_BT_CTLR_PHY)
u16_t rx_time;
u16_t tx_time;
#endif /* CONFIG_BT_CTLR_PHY */
} llcp_length;
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
#if defined(CONFIG_BT_CTLR_PHY)
struct {
u8_t req;
u8_t ack;
u8_t state:2;
#define LLCP_PHY_STATE_REQ 0
#define LLCP_PHY_STATE_ACK_WAIT 1
#define LLCP_PHY_STATE_RSP_WAIT 2
#define LLCP_PHY_STATE_UPD 3
u8_t tx:3;
u8_t rx:3;
u8_t flags:1;
u8_t cmd:1;
} llcp_phy;
#endif /* CONFIG_BT_CTLR_PHY */
u8_t sn:1;
u8_t nesn:1;
u8_t pause_rx:1;
u8_t pause_tx:1;
u8_t enc_rx:1;
u8_t enc_tx:1;
u8_t refresh:1;
u8_t empty:1;
struct ccm ccm_rx;
struct ccm ccm_tx;
struct radio_pdu_node_tx *pkt_tx_head;
struct radio_pdu_node_tx *pkt_tx_ctrl;
struct radio_pdu_node_tx *pkt_tx_ctrl_last;
struct radio_pdu_node_tx *pkt_tx_data;
struct radio_pdu_node_tx *pkt_tx_last;
u8_t packet_tx_head_len;
u8_t packet_tx_head_offset;
#if defined(CONFIG_BT_CTLR_CONN_RSSI)
u8_t rssi_latest;
u8_t rssi_reported;
u8_t rssi_sample_count;
#endif /* CONFIG_BT_CTLR_CONN_RSSI */
};
#define CONNECTION_T_SIZE MROUND(sizeof(struct connection))
struct pdu_data_q_tx {
u16_t handle;
struct radio_pdu_node_tx *node_tx;
};
/* Extra bytes for enqueued rx_node metadata: rssi (always) and resolving
* index and directed adv report (with privacy or extended scanner filter
* policies enabled).
* Note: to simplify the code, both bytes are allocated even if only one of
* the options is selected.
*/
#if defined(CONFIG_BT_CTLR_PRIVACY) || defined(CONFIG_BT_CTLR_EXT_SCAN_FP)
#define PDU_AC_SIZE_EXTRA 3
#else
#define PDU_AC_SIZE_EXTRA 1
#endif /* CONFIG_BT_CTLR_PRIVACY */
/* Minimum Rx Data allocation size */
#define PACKET_RX_DATA_SIZE_MIN \
MROUND(offsetof(struct radio_pdu_node_rx, pdu_data) + \
(PDU_AC_SIZE_MAX + PDU_AC_SIZE_EXTRA))
/* Minimum Tx Ctrl allocation size */
#define PACKET_TX_CTRL_SIZE_MIN \
MROUND(offsetof(struct radio_pdu_node_tx, pdu_data) + \
offsetof(struct pdu_data, payload) + 27)
/** @todo fix starvation when ctrl rx in radio ISR
* for multiple connections needs to tx back to peer.
*/
#define PACKET_MEM_COUNT_TX_CTRL 2
#define LL_MEM_CONN (sizeof(struct connection) * RADIO_CONNECTION_CONTEXT_MAX)
#define LL_MEM_RXQ (sizeof(void *) * (RADIO_PACKET_COUNT_RX_MAX + 4))
#define LL_MEM_TXQ (sizeof(struct pdu_data_q_tx) * \
(RADIO_PACKET_COUNT_TX_MAX + 2))
#define LL_MEM_RX_POOL_SZ (MROUND(offsetof(struct radio_pdu_node_rx,\
pdu_data) + ((\
(PDU_AC_SIZE_MAX + PDU_AC_SIZE_EXTRA) < \
(offsetof(struct pdu_data, payload) + \
RADIO_LL_LENGTH_OCTETS_RX_MAX)) ? \
(offsetof(struct pdu_data, payload) + \
RADIO_LL_LENGTH_OCTETS_RX_MAX) \
: \
(PDU_AC_SIZE_MAX + PDU_AC_SIZE_EXTRA))) * \
(RADIO_PACKET_COUNT_RX_MAX + 3))
#define LL_MEM_RX_LINK_POOL (sizeof(void *) * 2 * ((RADIO_PACKET_COUNT_RX_MAX +\
4) + RADIO_CONNECTION_CONTEXT_MAX))
#define LL_MEM_TX_CTRL_POOL (PACKET_TX_CTRL_SIZE_MIN * PACKET_MEM_COUNT_TX_CTRL)
#define LL_MEM_TX_DATA_POOL ((MROUND(offsetof( \
struct radio_pdu_node_tx, pdu_data) + \
offsetof(struct pdu_data, payload) + \
RADIO_PACKET_TX_DATA_SIZE)) \
* (RADIO_PACKET_COUNT_TX_MAX + 1))
#define LL_MEM_TOTAL (LL_MEM_CONN + LL_MEM_RXQ + (LL_MEM_TXQ * 2) + \
LL_MEM_RX_POOL_SZ + \
LL_MEM_RX_LINK_POOL + LL_MEM_TX_CTRL_POOL + LL_MEM_TX_DATA_POOL)

View file

@ -0,0 +1,333 @@
/*
* Copyright (c) 2016-2017 Nordic Semiconductor ASA
* Copyright (c) 2016 Vinayak Kariappa Chettimada
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/types.h>
#include <string.h>
#include "errno.h"
#include <soc.h>
#include <device.h>
#include <clock_control.h>
#ifdef CONFIG_CLOCK_CONTROL_NRF5
#include <drivers/clock_control/nrf5_clock_control.h>
#endif
#include <bluetooth/hci.h>
#define BT_DBG_ENABLED IS_ENABLED(CONFIG_BT_DEBUG_HCI_DRIVER)
#include "common/log.h"
#include "hal/cpu.h"
#include "hal/cntr.h"
#include "hal/rand.h"
#include "hal/ccm.h"
#include "hal/radio.h"
#include "hal/debug.h"
#include "util/util.h"
#include "util/mem.h"
#include "util/memq.h"
#include "util/mayfly.h"
#include "ticker/ticker.h"
#include "pdu.h"
#include "ctrl.h"
#include "ctrl_internal.h"
#include "ll.h"
#include "ll_filter.h"
#include "irq.h"
#include <arch_isr.h>
#include "kport.h"
/* Global singletons */
/* memory for storing Random number */
#define RAND_THREAD_THRESHOLD 4 /* atleast access address */
#define RAND_ISR_THRESHOLD 12 /* atleast encryption div. and iv */
static u8_t MALIGN(4) rand_context[4 + RAND_THREAD_THRESHOLD + 1];
static u8_t MALIGN(4) rand_isr_context[4 + RAND_ISR_THRESHOLD + 1];
#if defined(CONFIG_SOC_FLASH_NRF5_RADIO_SYNC)
#define FLASH_TICKER_NODES 1 /* No. of tickers reserved for flashing */
#define FLASH_TICKER_USER_APP_OPS 1 /* No. of additional ticker operations */
#else
#define FLASH_TICKER_NODES 0
#define FLASH_TICKER_USER_APP_OPS 0
#endif
#define TICKER_NODES (RADIO_TICKER_NODES + FLASH_TICKER_NODES)
#define TICKER_USER_APP_OPS (RADIO_TICKER_USER_APP_OPS + \
FLASH_TICKER_USER_APP_OPS)
#define TICKER_USER_OPS (RADIO_TICKER_USER_OPS + \
FLASH_TICKER_USER_APP_OPS)
/* memory for ticker nodes/instances */
static u8_t MALIGN(4) _ticker_nodes[TICKER_NODES][TICKER_NODE_T_SIZE];
/* memory for users/contexts operating on ticker module */
static u8_t MALIGN(4) _ticker_users[MAYFLY_CALLER_COUNT][TICKER_USER_T_SIZE];
/* memory for user/context simultaneous API operations */
static u8_t MALIGN(4) _ticker_user_ops[TICKER_USER_OPS][TICKER_USER_OP_T_SIZE];
/* memory for Bluetooth Controller (buffers, queues etc.) */
static u8_t MALIGN(4) _radio[LL_MEM_TOTAL];
static struct k_sem *sem_recv;
static struct {
u8_t pub_addr[BDADDR_SIZE];
u8_t rnd_addr[BDADDR_SIZE];
} _ll_context;
void mayfly_enable_cb(u8_t caller_id, u8_t callee_id, u8_t enable)
{
(void)caller_id;
LL_ASSERT(callee_id == MAYFLY_CALL_ID_1);
if (enable) {
irq_enable(SWI4_IRQn);
} else {
irq_disable(SWI4_IRQn);
}
}
u32_t mayfly_is_enabled(u8_t caller_id, u8_t callee_id)
{
(void)caller_id;
if (callee_id == MAYFLY_CALL_ID_0) {
return irq_is_enabled(RTC0_IRQn);
} else if (callee_id == MAYFLY_CALL_ID_1) {
return irq_is_enabled(SWI4_IRQn);
}
LL_ASSERT(0);
return 0;
}
u32_t mayfly_prio_is_equal(u8_t caller_id, u8_t callee_id)
{
#if (RADIO_TICKER_USER_ID_WORKER_PRIO == RADIO_TICKER_USER_ID_JOB_PRIO)
return (caller_id == callee_id) ||
((caller_id == MAYFLY_CALL_ID_0) &&
(callee_id == MAYFLY_CALL_ID_1)) ||
((caller_id == MAYFLY_CALL_ID_1) &&
(callee_id == MAYFLY_CALL_ID_0));
#else
return caller_id == callee_id;
#endif
}
void mayfly_pend(u8_t caller_id, u8_t callee_id)
{
(void)caller_id;
switch (callee_id) {
case MAYFLY_CALL_ID_0:
NVIC_SetPendingIRQ(RTC0_IRQn);
break;
case MAYFLY_CALL_ID_1:
NVIC_SetPendingIRQ(SWI4_IRQn);
break;
case MAYFLY_CALL_ID_PROGRAM:
default:
LL_ASSERT(0);
break;
}
}
void radio_active_callback(u8_t active)
{
}
void radio_event_callback(void)
{
k_sem_give(sem_recv);
}
/*
ISR_DIRECT_DECLARE(radio_nrf5_isr)
{
isr_radio();
ISR_DIRECT_PM();
return 1;
}
*/
void RADIO_IRQHandler(void *arg)
{
krhino_intrpt_enter();
isr_radio();
krhino_intrpt_exit();
}
void RTC0_IRQHandler(void *arg)
{
u32_t compare0, compare1;
krhino_intrpt_enter();
/* store interested events */
compare0 = NRF_RTC0->EVENTS_COMPARE[0];
compare1 = NRF_RTC0->EVENTS_COMPARE[1];
/* On compare0 run ticker worker instance0 */
if (compare0) {
NRF_RTC0->EVENTS_COMPARE[0] = 0;
ticker_trigger(0);
}
/* On compare1 run ticker worker instance1 */
if (compare1) {
NRF_RTC0->EVENTS_COMPARE[1] = 0;
ticker_trigger(1);
}
mayfly_run(MAYFLY_CALL_ID_0);
krhino_intrpt_exit();
}
void RNG_IRQHandler(void *arg)
{
krhino_intrpt_enter();
isr_rand(arg);
krhino_intrpt_exit();
}
void SWI4_EGU4_IRQHandler(void *arg)
{
krhino_intrpt_enter();
mayfly_run(MAYFLY_CALL_ID_1);
krhino_intrpt_exit();
}
int ll_init(struct k_sem *sem_rx)
{
struct device *clk_k32;
struct device *clk_m16;
u32_t err;
sem_recv = sem_rx;
extern struct k_mutex mutex_rand;
k_mutex_init(&mutex_rand);
/* TODO: bind and use RNG driver */
rand_init(rand_context, sizeof(rand_context), RAND_THREAD_THRESHOLD);
rand_isr_init(rand_isr_context, sizeof(rand_isr_context),
RAND_ISR_THRESHOLD);
clk_k32 = device_get_binding(CONFIG_CLOCK_CONTROL_NRF5_K32SRC_DRV_NAME);
if (!clk_k32) {
return -ENODEV;
}
clock_control_on(clk_k32, (void *)CLOCK_CONTROL_NRF5_K32SRC);
/* TODO: bind and use counter driver */
cntr_init();
mayfly_init();
_ticker_users[MAYFLY_CALL_ID_0][0] = RADIO_TICKER_USER_WORKER_OPS;
_ticker_users[MAYFLY_CALL_ID_1][0] = RADIO_TICKER_USER_JOB_OPS;
_ticker_users[MAYFLY_CALL_ID_2][0] = 0;
_ticker_users[MAYFLY_CALL_ID_PROGRAM][0] = TICKER_USER_APP_OPS;
ticker_init(RADIO_TICKER_INSTANCE_ID_RADIO, TICKER_NODES,
&_ticker_nodes[0], MAYFLY_CALLER_COUNT, &_ticker_users[0],
TICKER_USER_OPS, &_ticker_user_ops[0]);
clk_m16 = device_get_binding(CONFIG_CLOCK_CONTROL_NRF5_M16SRC_DRV_NAME);
if (!clk_m16) {
return -ENODEV;
}
err = radio_init(clk_m16, CLOCK_CONTROL_NRF5_K32SRC_ACCURACY,
RADIO_CONNECTION_CONTEXT_MAX,
RADIO_PACKET_COUNT_RX_MAX,
RADIO_PACKET_COUNT_TX_MAX,
RADIO_LL_LENGTH_OCTETS_RX_MAX,
RADIO_PACKET_TX_DATA_SIZE, &_radio[0], sizeof(_radio));
if (err) {
BT_ERR("Required RAM size: %d, supplied: %u.", err,
sizeof(_radio));
return -ENOMEM;
}
ll_filter_reset(true);
IRQ_DIRECT_CONNECT(NRF5_IRQ_RADIO_IRQn, CONFIG_BT_CTLR_WORKER_PRIO,
RADIO_IRQHandler, 0);
IRQ_CONNECT(NRF5_IRQ_RTC0_IRQn, CONFIG_BT_CTLR_WORKER_PRIO,
RTC0_IRQHandler, NULL, 0);
IRQ_CONNECT(NRF5_IRQ_SWI4_IRQn, CONFIG_BT_CTLR_JOB_PRIO, SWI4_EGU4_IRQHandler,
NULL, 0);
IRQ_CONNECT(NRF5_IRQ_RNG_IRQn, 1, RNG_IRQHandler, NULL, 0);
irq_enable(NRF5_IRQ_RADIO_IRQn);
irq_enable(NRF5_IRQ_RTC0_IRQn);
irq_enable(NRF5_IRQ_SWI4_IRQn);
irq_enable(NRF5_IRQ_RNG_IRQn);
return 0;
}
void ll_timeslice_ticker_id_get(u8_t * const instance_index, u8_t * const user_id)
{
*user_id = (TICKER_NODES - FLASH_TICKER_NODES); /* The last index in the total tickers */
*instance_index = RADIO_TICKER_INSTANCE_ID_RADIO;
}
u8_t *ll_addr_get(u8_t addr_type, u8_t *bdaddr)
{
if (addr_type > 1) {
return NULL;
}
if (addr_type) {
if (bdaddr) {
memcpy(bdaddr, _ll_context.rnd_addr, BDADDR_SIZE);
}
return _ll_context.rnd_addr;
}
if (bdaddr) {
memcpy(bdaddr, _ll_context.pub_addr, BDADDR_SIZE);
}
return _ll_context.pub_addr;
}
void ll_addr_set(u8_t addr_type, u8_t const *const bdaddr)
{
if (addr_type) {
memcpy(_ll_context.rnd_addr, bdaddr, BDADDR_SIZE);
} else {
memcpy(_ll_context.pub_addr, bdaddr, BDADDR_SIZE);
}
}

View file

@ -0,0 +1,425 @@
/*
* Copyright (c) 2016-2017 Nordic Semiconductor ASA
* Copyright (c) 2016 Vinayak Kariappa Chettimada
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <string.h>
#include <zephyr.h>
#include <bluetooth/hci.h>
#include "util/util.h"
#include "pdu.h"
#include "ctrl.h"
#include "ll.h"
#include "hal/debug.h"
#include "ll_filter.h"
#include "ll_adv.h"
static struct ll_adv_set ll_adv;
struct ll_adv_set *ll_adv_set_get(void)
{
return &ll_adv;
}
#if defined(CONFIG_BT_CTLR_ADV_EXT)
u32_t ll_adv_params_set(u8_t handle, u16_t evt_prop, u32_t interval,
u8_t adv_type, u8_t own_addr_type,
u8_t direct_addr_type, u8_t const *const direct_addr,
u8_t chan_map, u8_t filter_policy, u8_t *tx_pwr,
u8_t phy_p, u8_t skip, u8_t phy_s, u8_t sid, u8_t sreq)
{
u8_t const pdu_adv_type[] = {PDU_ADV_TYPE_ADV_IND,
PDU_ADV_TYPE_DIRECT_IND,
PDU_ADV_TYPE_SCAN_IND,
PDU_ADV_TYPE_NONCONN_IND,
PDU_ADV_TYPE_DIRECT_IND,
PDU_ADV_TYPE_EXT_IND};
#else /* !CONFIG_BT_CTLR_ADV_EXT */
u32_t ll_adv_params_set(u16_t interval, u8_t adv_type,
u8_t own_addr_type, u8_t direct_addr_type,
u8_t const *const direct_addr, u8_t chan_map,
u8_t filter_policy)
{
u8_t const pdu_adv_type[] = {PDU_ADV_TYPE_ADV_IND,
PDU_ADV_TYPE_DIRECT_IND,
PDU_ADV_TYPE_SCAN_IND,
PDU_ADV_TYPE_NONCONN_IND,
PDU_ADV_TYPE_DIRECT_IND};
#endif /* !CONFIG_BT_CTLR_ADV_EXT */
struct radio_adv_data *radio_adv_data;
struct pdu_adv *pdu;
if (radio_adv_is_enabled()) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
#if defined(CONFIG_BT_CTLR_ADV_EXT)
/* TODO: check and fail (0x12, invalid HCI cmd param) if invalid
* evt_prop bits.
*/
ll_adv.phy_p = BIT(0);
/* extended */
if (adv_type > 0x04) {
/* legacy */
if (evt_prop & BIT(4)) {
u8_t const leg_adv_type[] = { 0x03, 0x04, 0x02, 0x00};
adv_type = leg_adv_type[evt_prop & 0x03];
/* high duty cycle directed */
if (evt_prop & BIT(3)) {
adv_type = 0x01;
}
} else {
/* - Connectable and scannable not allowed;
* - High duty cycle directed connectable not allowed
*/
if (((evt_prop & 0x03) == 0x03) ||
((evt_prop & 0x0C) == 0x0C)) {
return 0x12; /* invalid HCI cmd param */
}
adv_type = 0x05; /* PDU_ADV_TYPE_EXT_IND */
ll_adv.phy_p = phy_p;
}
}
#endif /* CONFIG_BT_CTLR_ADV_EXT */
/* remember params so that set adv/scan data and adv enable
* interface can correctly update adv/scan data in the
* double buffer between caller and controller context.
*/
/* Set interval for Undirected or Low Duty Cycle Directed Advertising */
if (adv_type != 0x01) {
ll_adv.interval = interval;
} else {
ll_adv.interval = 0;
}
ll_adv.chan_map = chan_map;
ll_adv.filter_policy = filter_policy;
/* update the "current" primary adv data */
radio_adv_data = radio_adv_data_get();
pdu = (struct pdu_adv *)&radio_adv_data->data[radio_adv_data->last][0];
pdu->type = pdu_adv_type[adv_type];
pdu->rfu = 0;
if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2) &&
((pdu->type == PDU_ADV_TYPE_ADV_IND) ||
(pdu->type == PDU_ADV_TYPE_DIRECT_IND))) {
pdu->chan_sel = 1;
} else {
pdu->chan_sel = 0;
}
#if defined(CONFIG_BT_CTLR_PRIVACY)
ll_adv.own_addr_type = own_addr_type;
if (ll_adv.own_addr_type == BT_ADDR_LE_PUBLIC_ID ||
ll_adv.own_addr_type == BT_ADDR_LE_RANDOM_ID) {
ll_adv.id_addr_type = direct_addr_type;
memcpy(&ll_adv.id_addr, direct_addr, BDADDR_SIZE);
}
#endif /* CONFIG_BT_CTLR_PRIVACY */
pdu->tx_addr = own_addr_type & 0x1;
pdu->rx_addr = 0;
if (pdu->type == PDU_ADV_TYPE_DIRECT_IND) {
pdu->rx_addr = direct_addr_type;
memcpy(&pdu->payload.direct_ind.tgt_addr[0], direct_addr,
BDADDR_SIZE);
pdu->len = sizeof(struct pdu_adv_payload_direct_ind);
#if defined(CONFIG_BT_CTLR_ADV_EXT)
} else if (pdu->type == PDU_ADV_TYPE_EXT_IND) {
struct pdu_adv_payload_com_ext_adv *p;
struct ext_adv_hdr *h;
u8_t *ptr;
u8_t len;
p = (void *)&pdu->payload.adv_ext_ind;
h = (void *)p->ext_hdr_adi_adv_data;
ptr = (u8_t *)h + sizeof(*h);
/* No ACAD and no AdvData */
p->ext_hdr_len = 0;
p->adv_mode = evt_prop & 0x03;
/* Zero-init header flags */
*(u8_t *)h = 0;
/* AdvA flag */
if (!(evt_prop & BIT(5)) && !p->adv_mode && (phy_p != BIT(2))) {
/* TODO: optional on 1M */
h->adv_addr = 1;
/* NOTE: AdvA is filled at enable */
ptr += BDADDR_SIZE;
}
/* TODO: TargetA flag */
/* TODO: ADI flag */
/* TODO: AuxPtr flag */
/* TODO: SyncInfo flag */
/* Tx Power flag */
if (evt_prop & BIT(6)) {
h->tx_pwr = 1;
ptr++;
}
/* Calc primary PDU len */
len = ptr - (u8_t *)p;
if (len > (offsetof(struct pdu_adv_payload_com_ext_adv,
ext_hdr_adi_adv_data) + sizeof(*h))) {
p->ext_hdr_len = len -
offsetof(struct pdu_adv_payload_com_ext_adv,
ext_hdr_adi_adv_data);
pdu->len = len;
} else {
pdu->len = offsetof(struct pdu_adv_payload_com_ext_adv,
ext_hdr_adi_adv_data);
}
/* Start filling primary PDU payload based on flags */
/* TODO: AdvData */
/* TODO: ACAD */
/* Tx Power */
if (h->tx_pwr) {
u8_t _tx_pwr;
_tx_pwr = 0;
if (tx_pwr) {
if (*tx_pwr != 0x7F) {
_tx_pwr = *tx_pwr;
} else {
*tx_pwr = _tx_pwr;
}
}
ptr--;
*ptr = _tx_pwr;
}
/* TODO: SyncInfo */
/* TODO: AuxPtr */
/* TODO: ADI */
/* NOTE: TargetA, filled at enable and RPA timeout */
/* NOTE: AdvA, filled at enable and RPA timeout */
#endif /* CONFIG_BT_CTLR_ADV_EXT */
} else if (pdu->len == 0) {
pdu->len = BDADDR_SIZE;
}
/* update the current scan data */
radio_adv_data = radio_scan_data_get();
pdu = (struct pdu_adv *)&radio_adv_data->data[radio_adv_data->last][0];
pdu->type = PDU_ADV_TYPE_SCAN_RSP;
pdu->rfu = 0;
pdu->chan_sel = 0;
pdu->tx_addr = own_addr_type & 0x1;
pdu->rx_addr = 0;
if (pdu->len == 0) {
pdu->len = BDADDR_SIZE;
}
return 0;
}
void ll_adv_data_set(u8_t len, u8_t const *const data)
{
struct radio_adv_data *radio_adv_data;
struct pdu_adv *prev;
struct pdu_adv *pdu;
u8_t last;
/* Dont update data if directed or extended advertising. */
radio_adv_data = radio_adv_data_get();
prev = (struct pdu_adv *)&radio_adv_data->data[radio_adv_data->last][0];
if ((prev->type == PDU_ADV_TYPE_DIRECT_IND) ||
(IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) &&
(prev->type == PDU_ADV_TYPE_EXT_IND))) {
/* TODO: remember data, to be used if type is changed using
* parameter set function ll_adv_params_set afterwards.
*/
return;
}
/* use the last index in double buffer, */
if (radio_adv_data->first == radio_adv_data->last) {
last = radio_adv_data->last + 1;
if (last == DOUBLE_BUFFER_SIZE) {
last = 0;
}
} else {
last = radio_adv_data->last;
}
/* update adv pdu fields. */
pdu = (struct pdu_adv *)&radio_adv_data->data[last][0];
pdu->type = prev->type;
pdu->rfu = 0;
if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
pdu->chan_sel = prev->chan_sel;
} else {
pdu->chan_sel = 0;
}
pdu->tx_addr = prev->tx_addr;
pdu->rx_addr = prev->rx_addr;
memcpy(&pdu->payload.adv_ind.addr[0],
&prev->payload.adv_ind.addr[0], BDADDR_SIZE);
memcpy(&pdu->payload.adv_ind.data[0], data, len);
pdu->len = BDADDR_SIZE + len;
/* commit the update so controller picks it. */
radio_adv_data->last = last;
}
void ll_scan_data_set(u8_t len, u8_t const *const data)
{
struct radio_adv_data *radio_scan_data;
struct pdu_adv *prev;
struct pdu_adv *pdu;
u8_t last;
/* use the last index in double buffer, */
radio_scan_data = radio_scan_data_get();
if (radio_scan_data->first == radio_scan_data->last) {
last = radio_scan_data->last + 1;
if (last == DOUBLE_BUFFER_SIZE) {
last = 0;
}
} else {
last = radio_scan_data->last;
}
/* update scan pdu fields. */
prev = (struct pdu_adv *)
&radio_scan_data->data[radio_scan_data->last][0];
pdu = (struct pdu_adv *)&radio_scan_data->data[last][0];
pdu->type = PDU_ADV_TYPE_SCAN_RSP;
pdu->rfu = 0;
pdu->chan_sel = 0;
pdu->tx_addr = prev->tx_addr;
pdu->rx_addr = 0;
pdu->len = BDADDR_SIZE + len;
memcpy(&pdu->payload.scan_rsp.addr[0],
&prev->payload.scan_rsp.addr[0], BDADDR_SIZE);
memcpy(&pdu->payload.scan_rsp.data[0], data, len);
/* commit the update so controller picks it. */
radio_scan_data->last = last;
}
u32_t ll_adv_enable(u8_t enable)
{
struct radio_adv_data *radio_scan_data;
struct radio_adv_data *radio_adv_data;
u8_t rl_idx = FILTER_IDX_NONE;
struct pdu_adv *pdu_scan;
struct pdu_adv *pdu_adv;
u32_t status;
if (!enable) {
return radio_adv_disable();
} else if (radio_adv_is_enabled()) {
return 0;
}
/* TODO: move the addr remembered into controller
* this way when implementing Privacy 1.2, generated
* new resolvable addresses can be used instantly.
*/
/* remember addr to use and also update the addr in
* both adv and scan response PDUs.
*/
radio_adv_data = radio_adv_data_get();
radio_scan_data = radio_scan_data_get();
pdu_adv = (struct pdu_adv *)&radio_adv_data->data
[radio_adv_data->last][0];
pdu_scan = (struct pdu_adv *)&radio_scan_data->data
[radio_scan_data->last][0];
if (0) {
#if defined(CONFIG_BT_CTLR_ADV_EXT)
} else if (pdu_adv->type == PDU_ADV_TYPE_EXT_IND) {
struct pdu_adv_payload_com_ext_adv *p;
struct ext_adv_hdr *h;
u8_t *ptr;
p = (void *)&pdu_adv->payload.adv_ext_ind;
h = (void *)p->ext_hdr_adi_adv_data;
ptr = (u8_t *)h + sizeof(*h);
/* AdvA, fill here at enable */
if (h->adv_addr) {
memcpy(ptr, ll_addr_get(pdu_adv->tx_addr, NULL),
BDADDR_SIZE);
}
/* TODO: TargetA, fill here at enable */
#endif /* CONFIG_BT_CTLR_ADV_EXT */
} else {
bool priv = false;
#if defined(CONFIG_BT_CTLR_PRIVACY)
/* Prepare whitelist and optionally resolving list */
ll_filters_adv_update(ll_adv.filter_policy);
if (ll_adv.own_addr_type == BT_ADDR_LE_PUBLIC_ID ||
ll_adv.own_addr_type == BT_ADDR_LE_RANDOM_ID) {
/* Look up the resolving list */
rl_idx = ll_rl_find(ll_adv.id_addr_type,
ll_adv.id_addr, NULL);
if (rl_idx != FILTER_IDX_NONE) {
/* Generate RPAs if required */
ll_rl_rpa_update(false);
}
ll_rl_pdu_adv_update(rl_idx, pdu_adv);
ll_rl_pdu_adv_update(rl_idx, pdu_scan);
priv = true;
}
#endif /* !CONFIG_BT_CTLR_PRIVACY */
if (!priv) {
memcpy(&pdu_adv->payload.adv_ind.addr[0],
ll_addr_get(pdu_adv->tx_addr, NULL), BDADDR_SIZE);
memcpy(&pdu_scan->payload.scan_rsp.addr[0],
ll_addr_get(pdu_adv->tx_addr, NULL), BDADDR_SIZE);
}
}
#if defined(CONFIG_BT_CTLR_ADV_EXT)
status = radio_adv_enable(ll_adv.phy_p, ll_adv.interval,
ll_adv.chan_map, ll_adv.filter_policy,
rl_idx);
#else /* !CONFIG_BT_CTLR_ADV_EXT */
status = radio_adv_enable(ll_adv.interval, ll_adv.chan_map,
ll_adv.filter_policy, rl_idx);
#endif /* !CONFIG_BT_CTLR_ADV_EXT */
return status;
}

View file

@ -0,0 +1,25 @@
/*
* Copyright (c) 2017 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
struct ll_adv_set {
u8_t chan_map:3;
u8_t filter_policy:2;
#if defined(CONFIG_BT_CTLR_PRIVACY)
u8_t own_addr_type:2;
u8_t id_addr_type:1;
u8_t rl_idx;
u8_t id_addr[BDADDR_SIZE];
#endif /* CONFIG_BT_CTLR_PRIVACY */
#if defined(CONFIG_BT_CTLR_ADV_EXT)
u8_t phy_p:3;
u32_t interval;
#else /* !CONFIG_BT_CTLR_ADV_EXT */
u16_t interval;
#endif /* !CONFIG_BT_CTLR_ADV_EXT */
};
struct ll_adv_set *ll_adv_set_get(void);

View file

@ -0,0 +1,938 @@
/*
* Copyright (c) 2017 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <string.h>
#include <zephyr.h>
#include <misc/byteorder.h>
#include <bluetooth/hci.h>
#include "util/util.h"
#include "util/mem.h"
#include "pdu.h"
#include "ctrl.h"
#include "ll.h"
#include "ll_adv.h"
#include "ll_filter.h"
#define ADDR_TYPE_ANON 0xFF
#define BT_DBG_ENABLED IS_ENABLED(CONFIG_BT_DEBUG_HCI_DRIVER)
#include "common/log.h"
#include "hal/debug.h"
#include "pdu.h"
/* Hardware whitelist */
static struct ll_filter wl_filter;
u8_t wl_anon;
#if defined(CONFIG_BT_CTLR_PRIVACY)
#include "common/rpa.h"
/* Whitelist peer list */
static struct {
u8_t taken:1;
u8_t id_addr_type:1;
u8_t rl_idx;
bt_addr_t id_addr;
} wl[WL_SIZE];
static u8_t rl_enable;
static struct rl_dev {
u8_t taken:1;
u8_t rpas_ready:1;
u8_t pirk:1;
u8_t lirk:1;
u8_t dev:1;
u8_t wl:1;
u8_t id_addr_type:1;
bt_addr_t id_addr;
u8_t local_irk[16];
u8_t pirk_idx;
bt_addr_t curr_rpa;
bt_addr_t peer_rpa;
bt_addr_t *local_rpa;
} rl[CONFIG_BT_CTLR_RL_SIZE];
static u8_t peer_irks[CONFIG_BT_CTLR_RL_SIZE][16];
static u8_t peer_irk_rl_ids[CONFIG_BT_CTLR_RL_SIZE];
static u8_t peer_irk_count;
static bt_addr_t local_rpas[CONFIG_BT_CTLR_RL_SIZE];
BUILD_ASSERT(ARRAY_SIZE(wl) < FILTER_IDX_NONE);
BUILD_ASSERT(ARRAY_SIZE(rl) < FILTER_IDX_NONE);
/* Hardware filter for the resolving list */
static struct ll_filter rl_filter;
#define DEFAULT_RPA_TIMEOUT_MS (900 * 1000)
u32_t rpa_timeout_ms;
s64_t rpa_last_ms;
struct k_delayed_work rpa_work;
#define LIST_MATCH(list, i, type, addr) (list[i].taken && \
(list[i].id_addr_type == (type & 0x1)) && \
!memcmp(list[i].id_addr.val, addr, BDADDR_SIZE))
static void wl_clear(void)
{
for (int i = 0; i < WL_SIZE; i++) {
wl[i].taken = 0;
}
}
static u8_t wl_find(u8_t addr_type, u8_t *addr, u8_t *free)
{
int i;
if (free) {
*free = FILTER_IDX_NONE;
}
for (i = 0; i < WL_SIZE; i++) {
if (LIST_MATCH(wl, i, addr_type, addr)) {
return i;
} else if (free && !wl[i].taken && (*free == FILTER_IDX_NONE)) {
*free = i;
}
}
return FILTER_IDX_NONE;
}
static u32_t wl_add(bt_addr_le_t *id_addr)
{
u8_t i, j;
i = wl_find(id_addr->type, id_addr->a.val, &j);
/* Duplicate check */
if (i < ARRAY_SIZE(wl)) {
return BT_HCI_ERR_INVALID_PARAM;
} else if (j >= ARRAY_SIZE(wl)) {
return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
}
i = j;
wl[i].id_addr_type = id_addr->type & 0x1;
bt_addr_copy(&wl[i].id_addr, &id_addr->a);
/* Get index to Resolving List if applicable */
j = ll_rl_find(id_addr->type, id_addr->a.val, NULL);
if (j < ARRAY_SIZE(rl)) {
wl[i].rl_idx = j;
rl[j].wl = 1;
} else {
wl[i].rl_idx = FILTER_IDX_NONE;
}
wl[i].taken = 1;
return 0;
}
static u32_t wl_remove(bt_addr_le_t *id_addr)
{
/* find the device and mark it as empty */
u8_t i = wl_find(id_addr->type, id_addr->a.val, NULL);
if (i < ARRAY_SIZE(wl)) {
u8_t j = wl[i].rl_idx;
if (j < ARRAY_SIZE(rl)) {
rl[j].wl = 0;
}
wl[i].taken = 0;
return 0;
}
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
#endif /* CONFIG_BT_CTLR_PRIVACY */
static void filter_clear(struct ll_filter *filter)
{
filter->enable_bitmask = 0;
filter->addr_type_bitmask = 0;
}
static void filter_insert(struct ll_filter *filter, int index, u8_t addr_type,
u8_t *bdaddr)
{
filter->enable_bitmask |= BIT(index);
filter->addr_type_bitmask |= ((addr_type & 0x01) << index);
memcpy(&filter->bdaddr[index][0], bdaddr, BDADDR_SIZE);
}
#if !defined(CONFIG_BT_CTLR_PRIVACY)
static u32_t filter_add(struct ll_filter *filter, u8_t addr_type, u8_t *bdaddr)
{
int index;
if (filter->enable_bitmask == 0xFF) {
return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
}
for (index = 0;
(filter->enable_bitmask & BIT(index));
index++) {
}
filter_insert(filter, index, addr_type, bdaddr);
return 0;
}
static u32_t filter_remove(struct ll_filter *filter, u8_t addr_type,
u8_t *bdaddr)
{
int index;
if (!filter->enable_bitmask) {
return BT_HCI_ERR_INVALID_PARAM;
}
index = 8;
while (index--) {
if ((filter->enable_bitmask & BIT(index)) &&
(((filter->addr_type_bitmask >> index) & 0x01) ==
(addr_type & 0x01)) &&
!memcmp(filter->bdaddr[index], bdaddr, BDADDR_SIZE)) {
filter->enable_bitmask &= ~BIT(index);
filter->addr_type_bitmask &= ~BIT(index);
return 0;
}
}
return BT_HCI_ERR_INVALID_PARAM;
}
#endif
#if defined(CONFIG_BT_CTLR_PRIVACY)
bt_addr_t *ctrl_lrpa_get(u8_t rl_idx)
{
if ((rl_idx >= ARRAY_SIZE(rl)) || !rl[rl_idx].lirk ||
!rl[rl_idx].rpas_ready) {
return NULL;
}
return rl[rl_idx].local_rpa;
}
u8_t *ctrl_irks_get(u8_t *count)
{
*count = peer_irk_count;
return (u8_t *)peer_irks;
}
u8_t ctrl_rl_idx(bool whitelist, u8_t devmatch_id)
{
u8_t i;
if (whitelist) {
LL_ASSERT(devmatch_id < ARRAY_SIZE(wl));
LL_ASSERT(wl[devmatch_id].taken);
i = wl[devmatch_id].rl_idx;
} else {
LL_ASSERT(devmatch_id < ARRAY_SIZE(rl));
i = devmatch_id;
LL_ASSERT(rl[i].taken);
}
return i;
}
u8_t ctrl_rl_irk_idx(u8_t irkmatch_id)
{
u8_t i;
LL_ASSERT(irkmatch_id < peer_irk_count);
i = peer_irk_rl_ids[irkmatch_id];
LL_ASSERT(i < CONFIG_BT_CTLR_RL_SIZE);
LL_ASSERT(rl[i].taken);
return i;
}
bool ctrl_irk_whitelisted(u8_t rl_idx)
{
if (rl_idx >= ARRAY_SIZE(rl)) {
return false;
}
LL_ASSERT(rl[rl_idx].taken);
return rl[rl_idx].wl;
}
#endif
struct ll_filter *ctrl_filter_get(bool whitelist)
{
#if defined(CONFIG_BT_CTLR_PRIVACY)
if (whitelist) {
return &wl_filter;
}
return &rl_filter;
#else
LL_ASSERT(whitelist);
return &wl_filter;
#endif
}
u32_t ll_wl_size_get(void)
{
return WL_SIZE;
}
u32_t ll_wl_clear(void)
{
if (radio_adv_filter_pol_get() || (radio_scan_filter_pol_get() & 0x1)) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
#if defined(CONFIG_BT_CTLR_PRIVACY)
wl_clear();
#else
filter_clear(&wl_filter);
#endif /* CONFIG_BT_CTLR_PRIVACY */
wl_anon = 0;
return 0;
}
u32_t ll_wl_add(bt_addr_le_t *addr)
{
if (radio_adv_filter_pol_get() || (radio_scan_filter_pol_get() & 0x1)) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
if (addr->type == ADDR_TYPE_ANON) {
wl_anon = 1;
return 0;
}
#if defined(CONFIG_BT_CTLR_PRIVACY)
return wl_add(addr);
#else
return filter_add(&wl_filter, addr->type, addr->a.val);
#endif /* CONFIG_BT_CTLR_PRIVACY */
}
u32_t ll_wl_remove(bt_addr_le_t *addr)
{
if (radio_adv_filter_pol_get() || (radio_scan_filter_pol_get() & 0x1)) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
if (addr->type == ADDR_TYPE_ANON) {
wl_anon = 0;
return 0;
}
#if defined(CONFIG_BT_CTLR_PRIVACY)
return wl_remove(addr);
#else
return filter_remove(&wl_filter, addr->type, addr->a.val);
#endif /* CONFIG_BT_CTLR_PRIVACY */
}
#if defined(CONFIG_BT_CTLR_PRIVACY)
static void filter_wl_update(void)
{
u8_t i;
/* Populate filter from wl peers */
filter_clear(&wl_filter);
for (i = 0; i < WL_SIZE; i++) {
u8_t j;
if (!wl[i].taken) {
continue;
}
j = wl[i].rl_idx;
if (!rl_enable || j >= ARRAY_SIZE(rl) || !rl[j].pirk ||
rl[j].dev) {
filter_insert(&wl_filter, i, wl[i].id_addr_type,
wl[i].id_addr.val);
}
}
}
static void filter_rl_update(void)
{
u8_t i;
/* No whitelist: populate filter from rl peers */
filter_clear(&rl_filter);
for (i = 0; i < CONFIG_BT_CTLR_RL_SIZE; i++) {
if (rl[i].taken) {
filter_insert(&rl_filter, i, rl[i].id_addr_type,
rl[i].id_addr.val);
}
}
}
void ll_filters_adv_update(u8_t adv_fp)
{
/* enabling advertising */
if (adv_fp && !(radio_scan_filter_pol_get() & 0x1)) {
/* whitelist not in use, update whitelist */
filter_wl_update();
}
if (rl_enable && !radio_scan_is_enabled()) {
/* rl not in use, update resolving list LUT */
filter_rl_update();
}
}
void ll_filters_scan_update(u8_t scan_fp)
{
/* enabling advertising */
if ((scan_fp & 0x1) && !radio_adv_filter_pol_get()) {
/* whitelist not in use, update whitelist */
filter_wl_update();
}
if (rl_enable && !radio_adv_is_enabled()) {
/* rl not in use, update resolving list LUT */
filter_rl_update();
}
}
u8_t ll_rl_find(u8_t id_addr_type, u8_t *id_addr, u8_t *free)
{
u8_t i;
if (free) {
*free = FILTER_IDX_NONE;
}
for (i = 0; i < CONFIG_BT_CTLR_RL_SIZE; i++) {
if (LIST_MATCH(rl, i, id_addr_type, id_addr)) {
return i;
} else if (free && !rl[i].taken && (*free == FILTER_IDX_NONE)) {
*free = i;
}
}
return FILTER_IDX_NONE;
}
bool ctrl_rl_idx_allowed(u8_t irkmatch_ok, u8_t rl_idx)
{
/* If AR is disabled or we don't know the device or we matched an IRK
* then we're all set.
*/
if (!rl_enable || rl_idx >= ARRAY_SIZE(rl) || irkmatch_ok) {
return true;
}
LL_ASSERT(rl_idx < CONFIG_BT_CTLR_RL_SIZE);
LL_ASSERT(rl[rl_idx].taken);
return !rl[rl_idx].pirk || rl[rl_idx].dev;
}
void ll_rl_id_addr_get(u8_t rl_idx, u8_t *id_addr_type, u8_t *id_addr)
{
LL_ASSERT(rl_idx < CONFIG_BT_CTLR_RL_SIZE);
LL_ASSERT(rl[rl_idx].taken);
*id_addr_type = rl[rl_idx].id_addr_type;
memcpy(id_addr, rl[rl_idx].id_addr.val, BDADDR_SIZE);
}
bool ctrl_rl_addr_allowed(u8_t id_addr_type, u8_t *id_addr, u8_t *rl_idx)
{
u8_t i, j;
/* If AR is disabled or we matched an IRK then we're all set. No hw
* filters are used in this case.
*/
if (!rl_enable || *rl_idx != FILTER_IDX_NONE) {
return true;
}
for (i = 0; i < CONFIG_BT_CTLR_RL_SIZE; i++) {
if (rl[i].taken && (rl[i].id_addr_type == id_addr_type)) {
u8_t *addr = rl[i].id_addr.val;
for (j = 0; j < BDADDR_SIZE; j++) {
if (addr[j] != id_addr[j]) {
break;
}
}
if (j == BDADDR_SIZE) {
*rl_idx = i;
return !rl[i].pirk || rl[i].dev;
}
}
}
return true;
}
bool ctrl_rl_addr_resolve(u8_t id_addr_type, u8_t *id_addr, u8_t rl_idx)
{
/* Unable to resolve if AR is disabled, no RL entry or no local IRK */
if (!rl_enable || rl_idx >= ARRAY_SIZE(rl) || !rl[rl_idx].lirk) {
return false;
}
if ((id_addr_type != 0) && ((id_addr[5] & 0xc0) == 0x40)) {
return bt_rpa_irk_matches(rl[rl_idx].local_irk,
(bt_addr_t *)id_addr);
}
return false;
}
bool ctrl_rl_enabled(void)
{
return rl_enable;
}
#if defined(CONFIG_BT_BROADCASTER)
void ll_rl_pdu_adv_update(u8_t idx, struct pdu_adv *pdu)
{
u8_t *adva = pdu->type == PDU_ADV_TYPE_SCAN_RSP ?
&pdu->payload.scan_rsp.addr[0] :
&pdu->payload.adv_ind.addr[0];
struct ll_adv_set *ll_adv = ll_adv_set_get();
/* AdvA */
if (idx < ARRAY_SIZE(rl) && rl[idx].lirk) {
LL_ASSERT(rl[idx].rpas_ready);
pdu->tx_addr = 1;
memcpy(adva, rl[idx].local_rpa->val, BDADDR_SIZE);
} else {
pdu->tx_addr = ll_adv->own_addr_type & 0x1;
ll_addr_get(ll_adv->own_addr_type & 0x1, adva);
}
/* TargetA */
if (pdu->type == PDU_ADV_TYPE_DIRECT_IND) {
if (idx < ARRAY_SIZE(rl) && rl[idx].pirk) {
pdu->rx_addr = 1;
memcpy(&pdu->payload.direct_ind.tgt_addr[0],
rl[idx].peer_rpa.val, BDADDR_SIZE);
} else {
pdu->rx_addr = ll_adv->id_addr_type;
memcpy(&pdu->payload.direct_ind.tgt_addr[0],
ll_adv->id_addr, BDADDR_SIZE);
}
}
}
static void rpa_adv_refresh(void)
{
struct radio_adv_data *radio_adv_data;
struct ll_adv_set *ll_adv;
struct pdu_adv *prev;
struct pdu_adv *pdu;
u8_t last;
u8_t idx;
ll_adv = ll_adv_set_get();
if (ll_adv->own_addr_type != BT_ADDR_LE_PUBLIC_ID &&
ll_adv->own_addr_type != BT_ADDR_LE_RANDOM_ID) {
return;
}
radio_adv_data = radio_adv_data_get();
prev = (struct pdu_adv *)&radio_adv_data->data[radio_adv_data->last][0];
/* use the last index in double buffer, */
if (radio_adv_data->first == radio_adv_data->last) {
last = radio_adv_data->last + 1;
if (last == DOUBLE_BUFFER_SIZE) {
last = 0;
}
} else {
last = radio_adv_data->last;
}
/* update adv pdu fields. */
pdu = (struct pdu_adv *)&radio_adv_data->data[last][0];
pdu->type = prev->type;
pdu->rfu = 0;
if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
pdu->chan_sel = prev->chan_sel;
} else {
pdu->chan_sel = 0;
}
idx = ll_rl_find(ll_adv->id_addr_type, ll_adv->id_addr, NULL);
LL_ASSERT(idx < ARRAY_SIZE(rl));
ll_rl_pdu_adv_update(idx, pdu);
memcpy(&pdu->payload.adv_ind.data[0], &prev->payload.adv_ind.data[0],
prev->len - BDADDR_SIZE);
pdu->len = prev->len;
/* commit the update so controller picks it. */
radio_adv_data->last = last;
}
#endif
static void rl_clear(void)
{
for (u8_t i = 0; i < CONFIG_BT_CTLR_RL_SIZE; i++) {
rl[i].taken = 0;
}
peer_irk_count = 0;
}
static int rl_access_check(bool check_ar)
{
if (check_ar) {
/* If address resolution is disabled, allow immediately */
if (!rl_enable) {
return -1;
}
}
return (radio_adv_is_enabled() || radio_scan_is_enabled()) ? 0 : 1;
}
void ll_rl_rpa_update(bool timeout)
{
u8_t i;
int err;
s64_t now = k_uptime_get();
bool all = timeout || (rpa_last_ms == -1) ||
(now - rpa_last_ms >= rpa_timeout_ms);
BT_DBG("");
for (i = 0; i < CONFIG_BT_CTLR_RL_SIZE; i++) {
if ((rl[i].taken) && (all || !rl[i].rpas_ready)) {
if (rl[i].pirk) {
u8_t irk[16];
/* TODO: move this swap to the driver level */
sys_memcpy_swap(irk, peer_irks[rl[i].pirk_idx],
16);
err = bt_rpa_create(irk, &rl[i].peer_rpa);
LL_ASSERT(!err);
}
if (rl[i].lirk) {
bt_addr_t rpa;
err = bt_rpa_create(rl[i].local_irk, &rpa);
LL_ASSERT(!err);
/* pointer read/write assumed to be atomic
* so that if ISR fires the local_rpa pointer
* will always point to a valid full RPA
*/
rl[i].local_rpa = &rpa;
bt_addr_copy(&local_rpas[i], &rpa);
rl[i].local_rpa = &local_rpas[i];
}
rl[i].rpas_ready = 1;
}
}
if (all) {
rpa_last_ms = now;
}
if (timeout) {
#if defined(CONFIG_BT_BROADCASTER)
if (radio_adv_is_enabled()) {
rpa_adv_refresh();
}
#endif
}
}
static void rpa_timeout(struct k_work *work)
{
ll_rl_rpa_update(true);
k_delayed_work_submit(&rpa_work, rpa_timeout_ms);
}
static void rpa_refresh_start(void)
{
if (!rl_enable) {
return;
}
BT_DBG("");
k_delayed_work_submit(&rpa_work, rpa_timeout_ms);
}
static void rpa_refresh_stop(void)
{
if (!rl_enable) {
return;
}
k_delayed_work_cancel(&rpa_work);
}
void ll_adv_scan_state_cb(u8_t bm)
{
if (bm) {
rpa_refresh_start();
} else {
rpa_refresh_stop();
}
}
u32_t ll_rl_size_get(void)
{
return CONFIG_BT_CTLR_RL_SIZE;
}
u32_t ll_rl_clear(void)
{
if (!rl_access_check(false)) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
rl_clear();
return 0;
}
u32_t ll_rl_add(bt_addr_le_t *id_addr, const u8_t pirk[16],
const u8_t lirk[16])
{
u8_t i, j;
if (!rl_access_check(false)) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
i = ll_rl_find(id_addr->type, id_addr->a.val, &j);
/* Duplicate check */
if (i < ARRAY_SIZE(rl)) {
return BT_HCI_ERR_INVALID_PARAM;
} else if (j >= ARRAY_SIZE(rl)) {
return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
}
/* Device not found but empty slot found */
i = j;
bt_addr_copy(&rl[i].id_addr, &id_addr->a);
rl[i].id_addr_type = id_addr->type & 0x1;
rl[i].pirk = mem_nz((u8_t *)pirk, 16);
rl[i].lirk = mem_nz((u8_t *)lirk, 16);
if (rl[i].pirk) {
/* cross-reference */
rl[i].pirk_idx = peer_irk_count;
peer_irk_rl_ids[peer_irk_count] = i;
/* AAR requires big-endian IRKs */
sys_memcpy_swap(peer_irks[peer_irk_count++], pirk, 16);
}
if (rl[i].lirk) {
memcpy(rl[i].local_irk, lirk, 16);
rl[i].local_rpa = NULL;
}
memset(rl[i].curr_rpa.val, 0x00, sizeof(rl[i].curr_rpa));
rl[i].rpas_ready = 0;
/* Default to Network Privacy */
rl[i].dev = 0;
/* Add reference to a whitelist entry */
j = wl_find(id_addr->type, id_addr->a.val, NULL);
if (j < ARRAY_SIZE(wl)) {
wl[j].rl_idx = i;
rl[i].wl = 1;
} else {
rl[i].wl = 0;
}
rl[i].taken = 1;
return 0;
}
u32_t ll_rl_remove(bt_addr_le_t *id_addr)
{
u8_t i;
if (!rl_access_check(false)) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
/* find the device and mark it as empty */
i = ll_rl_find(id_addr->type, id_addr->a.val, NULL);
if (i < ARRAY_SIZE(rl)) {
u8_t j, k;
if (rl[i].pirk) {
/* Swap with last item */
u8_t pi = rl[i].pirk_idx, pj = peer_irk_count - 1;
if (pj && pi != pj) {
memcpy(peer_irks[pi], peer_irks[pj], 16);
for (k = 0;
k < CONFIG_BT_CTLR_RL_SIZE;
k++) {
if (rl[k].taken && rl[k].pirk &&
rl[k].pirk_idx == pj) {
rl[k].pirk_idx = pi;
peer_irk_rl_ids[pi] = k;
break;
}
}
}
peer_irk_count--;
}
/* Check if referenced by a whitelist entry */
j = wl_find(id_addr->type, id_addr->a.val, NULL);
if (j < ARRAY_SIZE(wl)) {
wl[j].rl_idx = FILTER_IDX_NONE;
}
rl[i].taken = 0;
return 0;
}
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
void ll_rl_crpa_set(u8_t id_addr_type, u8_t *id_addr, u8_t rl_idx, u8_t *crpa)
{
if ((crpa[5] & 0xc0) == 0x40) {
if (id_addr) {
/* find the device and return its RPA */
rl_idx = ll_rl_find(id_addr_type, id_addr, NULL);
}
if (rl_idx < ARRAY_SIZE(rl) && rl[rl_idx].taken) {
memcpy(rl[rl_idx].curr_rpa.val, crpa,
sizeof(bt_addr_t));
}
}
}
u32_t ll_rl_crpa_get(bt_addr_le_t *id_addr, bt_addr_t *crpa)
{
u8_t i;
/* find the device and return its RPA */
i = ll_rl_find(id_addr->type, id_addr->a.val, NULL);
if (i < ARRAY_SIZE(rl) &&
mem_nz(rl[i].curr_rpa.val, sizeof(rl[i].curr_rpa.val))) {
bt_addr_copy(crpa, &rl[i].curr_rpa);
return 0;
}
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
u32_t ll_rl_lrpa_get(bt_addr_le_t *id_addr, bt_addr_t *lrpa)
{
u8_t i;
/* find the device and return the local RPA */
i = ll_rl_find(id_addr->type, id_addr->a.val, NULL);
if (i < ARRAY_SIZE(rl)) {
bt_addr_copy(lrpa, rl[i].local_rpa);
return 0;
}
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
u32_t ll_rl_enable(u8_t enable)
{
if (!rl_access_check(false)) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
switch (enable) {
case BT_HCI_ADDR_RES_DISABLE:
rl_enable = 0;
break;
case BT_HCI_ADDR_RES_ENABLE:
rl_enable = 1;
break;
default:
return BT_HCI_ERR_INVALID_PARAM;
}
return 0;
}
void ll_rl_timeout_set(u16_t timeout)
{
rpa_timeout_ms = timeout * 1000;
}
u32_t ll_priv_mode_set(bt_addr_le_t *id_addr, u8_t mode)
{
u8_t i;
if (!rl_access_check(false)) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
/* find the device and mark it as empty */
i = ll_rl_find(id_addr->type, id_addr->a.val, NULL);
if (i < ARRAY_SIZE(rl)) {
switch (mode) {
case BT_HCI_LE_PRIVACY_MODE_NETWORK:
rl[i].dev = 0;
break;
case BT_HCI_LE_PRIVACY_MODE_DEVICE:
rl[i].dev = 1;
break;
default:
return BT_HCI_ERR_INVALID_PARAM;
}
} else {
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
return 0;
}
#endif /* CONFIG_BT_CTLR_PRIVACY */
void ll_filter_reset(bool init)
{
wl_anon = 0;
#if defined(CONFIG_BT_CTLR_PRIVACY)
wl_clear();
rl_enable = 0;
rpa_timeout_ms = DEFAULT_RPA_TIMEOUT_MS;
rpa_last_ms = -1;
rl_clear();
if (init) {
k_delayed_work_init(&rpa_work, rpa_timeout);
} else {
k_delayed_work_cancel(&rpa_work);
}
#else
filter_clear(&wl_filter);
#endif /* CONFIG_BT_CTLR_PRIVACY */
}

View file

@ -0,0 +1,35 @@
/*
* Copyright (c) 2017 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#define WL_SIZE 8
#define FILTER_IDX_NONE 0xFF
struct ll_filter {
u8_t enable_bitmask;
u8_t addr_type_bitmask;
u8_t bdaddr[WL_SIZE][BDADDR_SIZE];
};
void ll_filter_reset(bool init);
void ll_filters_adv_update(u8_t adv_fp);
void ll_filters_scan_update(u8_t scan_fp);
struct ll_filter *ctrl_filter_get(bool whitelist);
bt_addr_t *ctrl_lrpa_get(u8_t rl_idx);
u8_t *ctrl_irks_get(u8_t *count);
u8_t ctrl_rl_idx(bool whitelist, u8_t devmatch_id);
u8_t ctrl_rl_irk_idx(u8_t irkmatch_id);
bool ctrl_irk_whitelisted(u8_t rl_idx);
bool ctrl_rl_enabled(void);
void ll_rl_rpa_update(bool timeout);
u8_t ll_rl_find(u8_t id_addr_type, u8_t *id_addr, u8_t *free);
bool ctrl_rl_addr_allowed(u8_t id_addr_type, u8_t *id_addr, u8_t *rl_idx);
bool ctrl_rl_addr_resolve(u8_t id_addr_type, u8_t *id_addr, u8_t rl_idx);
bool ctrl_rl_idx_allowed(u8_t irkmatch_ok, u8_t rl_idx);
void ll_rl_pdu_adv_update(u8_t idx, struct pdu_adv *pdu);

View file

@ -0,0 +1,60 @@
/*
* Copyright (c) 2016-2017 Nordic Semiconductor ASA
* Copyright (c) 2016 Vinayak Kariappa Chettimada
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr.h>
#include <bluetooth/hci.h>
#include "util/util.h"
#include "pdu.h"
#include "ctrl.h"
#include "ll.h"
#include "ll_filter.h"
u32_t ll_create_connection(u16_t scan_interval, u16_t scan_window,
u8_t filter_policy, u8_t peer_addr_type,
u8_t *peer_addr, u8_t own_addr_type,
u16_t interval, u16_t latency,
u16_t timeout)
{
u32_t status;
u8_t rpa_gen = 0;
u8_t rl_idx = FILTER_IDX_NONE;
if (radio_scan_is_enabled()) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
status = radio_connect_enable(peer_addr_type, peer_addr, interval,
latency, timeout);
if (status) {
return status;
}
#if defined(CONFIG_BT_CTLR_PRIVACY)
ll_filters_scan_update(filter_policy);
if (!filter_policy && ctrl_rl_enabled()) {
/* Look up the resolving list */
rl_idx = ll_rl_find(peer_addr_type, peer_addr, NULL);
}
if (own_addr_type == BT_ADDR_LE_PUBLIC_ID ||
own_addr_type == BT_ADDR_LE_RANDOM_ID) {
/* Generate RPAs if required */
ll_rl_rpa_update(false);
own_addr_type &= 0x1;
rpa_gen = 1;
}
#endif
return radio_scan_enable(0, own_addr_type,
ll_addr_get(own_addr_type, NULL),
scan_interval, scan_window,
filter_policy, rpa_gen, rl_idx);
}

View file

@ -0,0 +1,91 @@
/*
* Copyright (c) 2016-2017 Nordic Semiconductor ASA
* Copyright (c) 2016 Vinayak Kariappa Chettimada
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr.h>
#include <bluetooth/hci.h>
#include "util/util.h"
#include "pdu.h"
#include "ctrl.h"
#include "ll.h"
#include "ll_filter.h"
static struct {
u16_t interval;
u16_t window;
#if defined(CONFIG_BT_CTLR_ADV_EXT)
u8_t type:4;
#else /* !CONFIG_BT_CTLR_ADV_EXT */
u8_t type:1;
#endif /* !CONFIG_BT_CTLR_ADV_EXT */
u8_t own_addr_type:2;
u8_t filter_policy:2;
} ll_scan;
u32_t ll_scan_params_set(u8_t type, u16_t interval, u16_t window,
u8_t own_addr_type, u8_t filter_policy)
{
if (radio_scan_is_enabled()) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
/* type value:
* 0000b - legacy 1M passive
* 0001b - legacy 1M active
* 0010b - Ext. 1M passive
* 0011b - Ext. 1M active
* 0100b - invalid
* 0101b - invalid
* 0110b - invalid
* 0111b - invalid
* 1000b - Ext. Coded passive
* 1001b - Ext. Coded active
*/
ll_scan.type = type;
ll_scan.interval = interval;
ll_scan.window = window;
ll_scan.own_addr_type = own_addr_type;
ll_scan.filter_policy = filter_policy;
return 0;
}
u32_t ll_scan_enable(u8_t enable)
{
u32_t status;
u8_t rpa_gen = 0;
if (!enable) {
return radio_scan_disable();
} else if (radio_scan_is_enabled()) {
/* Duplicate filtering is processed in the HCI layer */
return 0;
}
#if defined(CONFIG_BT_CTLR_PRIVACY)
ll_filters_scan_update(ll_scan.filter_policy);
if ((ll_scan.type & 0x1) &&
(ll_scan.own_addr_type == BT_ADDR_LE_PUBLIC_ID ||
ll_scan.own_addr_type == BT_ADDR_LE_RANDOM_ID)) {
/* Generate RPAs if required */
ll_rl_rpa_update(false);
rpa_gen = 1;
}
#endif
status = radio_scan_enable(ll_scan.type, ll_scan.own_addr_type & 0x1,
ll_addr_get(ll_scan.own_addr_type & 0x1,
NULL),
ll_scan.interval, ll_scan.window,
ll_scan.filter_policy, rpa_gen,
FILTER_IDX_NONE);
return status;
}

View file

@ -0,0 +1,345 @@
/*
* Copyright (c) 2017 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stddef.h>
#include <string.h>
#include <toolchain.h>
#include <zephyr/types.h>
#include <soc.h>
#include <clock_control.h>
#include "hal/cpu.h"
#include "hal/cntr.h"
#include "hal/ccm.h"
#include "hal/radio.h"
#include "util/util.h"
#include "ll_sw/pdu.h"
#include "ll_sw/ctrl.h"
#include "ll_test.h"
#define CNTR_MIN_DELTA 3
static const u32_t test_sync_word = 0x71764129;
static u8_t test_phy;
static u8_t test_phy_flags;
static u16_t test_num_rx;
static bool started;
/* NOTE: The PRBS9 sequence used as packet payload.
* The bytes in the sequence are in the right order, but the bits of each byte
* in the array are reverse from that found by running the PRBS9 algorithm. This
* is done to transmit MSbit first on air.
*/
static const u8_t prbs9[] = {
0xFF, 0xC1, 0xFB, 0xE8, 0x4C, 0x90, 0x72, 0x8B,
0xE7, 0xB3, 0x51, 0x89, 0x63, 0xAB, 0x23, 0x23,
0x02, 0x84, 0x18, 0x72, 0xAA, 0x61, 0x2F, 0x3B,
0x51, 0xA8, 0xE5, 0x37, 0x49, 0xFB, 0xC9, 0xCA,
0x0C, 0x18, 0x53, 0x2C, 0xFD, 0x45, 0xE3, 0x9A,
0xE6, 0xF1, 0x5D, 0xB0, 0xB6, 0x1B, 0xB4, 0xBE,
0x2A, 0x50, 0xEA, 0xE9, 0x0E, 0x9C, 0x4B, 0x5E,
0x57, 0x24, 0xCC, 0xA1, 0xB7, 0x59, 0xB8, 0x87,
0xFF, 0xE0, 0x7D, 0x74, 0x26, 0x48, 0xB9, 0xC5,
0xF3, 0xD9, 0xA8, 0xC4, 0xB1, 0xD5, 0x91, 0x11,
0x01, 0x42, 0x0C, 0x39, 0xD5, 0xB0, 0x97, 0x9D,
0x28, 0xD4, 0xF2, 0x9B, 0xA4, 0xFD, 0x64, 0x65,
0x06, 0x8C, 0x29, 0x96, 0xFE, 0xA2, 0x71, 0x4D,
0xF3, 0xF8, 0x2E, 0x58, 0xDB, 0x0D, 0x5A, 0x5F,
0x15, 0x28, 0xF5, 0x74, 0x07, 0xCE, 0x25, 0xAF,
0x2B, 0x12, 0xE6, 0xD0, 0xDB, 0x2C, 0xDC, 0xC3,
0x7F, 0xF0, 0x3E, 0x3A, 0x13, 0xA4, 0xDC, 0xE2,
0xF9, 0x6C, 0x54, 0xE2, 0xD8, 0xEA, 0xC8, 0x88,
0x00, 0x21, 0x86, 0x9C, 0x6A, 0xD8, 0xCB, 0x4E,
0x14, 0x6A, 0xF9, 0x4D, 0xD2, 0x7E, 0xB2, 0x32,
0x03, 0xC6, 0x14, 0x4B, 0x7F, 0xD1, 0xB8, 0xA6,
0x79, 0x7C, 0x17, 0xAC, 0xED, 0x06, 0xAD, 0xAF,
0x0A, 0x94, 0x7A, 0xBA, 0x03, 0xE7, 0x92, 0xD7,
0x15, 0x09, 0x73, 0xE8, 0x6D, 0x16, 0xEE, 0xE1,
0x3F, 0x78, 0x1F, 0x9D, 0x09, 0x52, 0x6E, 0xF1,
0x7C, 0x36, 0x2A, 0x71, 0x6C, 0x75, 0x64, 0x44,
0x80, 0x10, 0x43, 0x4E, 0x35, 0xEC, 0x65, 0x27,
0x0A, 0xB5, 0xFC, 0x26, 0x69, 0x3F, 0x59, 0x99,
0x01, 0x63, 0x8A, 0xA5, 0xBF, 0x68, 0x5C, 0xD3,
0x3C, 0xBE, 0x0B, 0xD6, 0x76, 0x83, 0xD6, 0x57,
0x05, 0x4A, 0x3D, 0xDD, 0x81, 0x73, 0xC9, 0xEB,
0x8A, 0x84, 0x39, 0xF4, 0x36, 0x0B, 0xF7};
/* TODO: fill correct prbs15 */
static const u8_t prbs15[255] = { 0x00, };
static u8_t tx_req;
static u8_t volatile tx_ack;
static void isr_tx(void)
{
u32_t l, i, s, t;
/* Clear radio status and events */
radio_status_reset();
radio_tmr_status_reset();
#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN)
radio_gpio_pa_lna_disable();
#endif /* CONFIG_BT_CTLR_GPIO_PA_PIN */
/* Exit if radio disabled */
if (((tx_req - tx_ack) & 0x01) == 0) {
tx_ack = tx_req;
return;
}
/* LE Test Packet Interval */
l = radio_tmr_end_get() - radio_tmr_ready_get();
i = ((l + 249 + 624) / 625) * 625;
t = radio_tmr_end_get() - l + i;
t -= radio_tx_ready_delay_get(test_phy, test_phy_flags);
/* Set timer capture in the future. */
radio_tmr_sample();
s = radio_tmr_sample_get();
while (t < s) {
t += 625;
}
/* Setup next Tx */
radio_switch_complete_and_disable();
radio_tmr_start_us(1, t);
radio_tmr_aa_capture();
radio_tmr_end_capture();
/* TODO: check for probable stale timer capture being set */
#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN)
radio_gpio_pa_setup();
radio_gpio_pa_lna_enable(t + radio_tx_ready_delay_get(test_phy,
test_phy_flags) -
CONFIG_BT_CTLR_GPIO_PA_OFFSET);
#endif /* CONFIG_BT_CTLR_GPIO_PA_PIN */
}
static void isr_rx(void)
{
u8_t crc_ok = 0;
u8_t trx_done;
/* Read radio status and events */
trx_done = radio_is_done();
if (trx_done) {
crc_ok = radio_crc_is_valid();
}
/* Clear radio status and events */
radio_status_reset();
radio_tmr_status_reset();
/* Exit if radio disabled */
if (!trx_done) {
return;
}
/* Setup next Rx */
radio_switch_complete_and_rx(test_phy);
/* Count Rx-ed packets */
if (crc_ok) {
test_num_rx++;
}
}
static u32_t init(u8_t chan, u8_t phy, void (*isr)(void))
{
struct device *hf_clock;
if (started) {
return 1;
}
/* start coarse timer */
cntr_start();
/* Setup resources required by Radio */
hf_clock = radio_hf_clock_get();
clock_control_on(hf_clock, (void *)1); /* start clock, blocking. */
/* Reset Radio h/w */
radio_reset();
radio_isr_set(isr);
/* Store value needed in Tx/Rx ISR */
if (phy < 0x04) {
test_phy = BIT(phy - 1);
test_phy_flags = 1;
} else {
test_phy = BIT(2);
test_phy_flags = 0;
}
/* Setup Radio in Tx/Rx */
/* NOTE: No whitening in test mode. */
radio_phy_set(test_phy, test_phy_flags);
radio_tmr_tifs_set(150);
radio_tx_power_set(0);
radio_freq_chan_set((chan << 1) + 2);
radio_aa_set((u8_t *)&test_sync_word);
radio_crc_configure(0x65b, 0x555555);
radio_pkt_configure(8, 255, (test_phy << 1));
return 0;
}
u32_t ll_test_tx(u8_t chan, u8_t len, u8_t type, u8_t phy)
{
u32_t start_us;
u8_t *payload;
u8_t *pdu;
u32_t err;
if ((type > 0x07) || !phy || (phy > 0x04)) {
return 1;
}
err = init(chan, phy, isr_tx);
if (err) {
return err;
}
tx_req++;
pdu = radio_pkt_scratch_get();
payload = &pdu[2];
switch (type) {
case 0x00:
memcpy(payload, prbs9, len);
break;
case 0x01:
memset(payload, 0x0f, len);
break;
case 0x02:
memset(payload, 0x55, len);
break;
case 0x03:
memcpy(payload, prbs15, len);
break;
case 0x04:
memset(payload, 0xff, len);
break;
case 0x05:
memset(payload, 0x00, len);
break;
case 0x06:
memset(payload, 0xf0, len);
break;
case 0x07:
memset(payload, 0xaa, len);
break;
}
pdu[0] = type;
pdu[1] = len;
radio_pkt_tx_set(pdu);
radio_switch_complete_and_disable();
start_us = radio_tmr_start(1, cntr_cnt_get() + CNTR_MIN_DELTA, 0);
radio_tmr_aa_capture();
radio_tmr_end_capture();
#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN)
radio_gpio_pa_setup();
radio_gpio_pa_lna_enable(start_us +
radio_tx_ready_delay_get(test_phy,
test_phy_flags) -
CONFIG_BT_CTLR_GPIO_PA_OFFSET);
#else /* !CONFIG_BT_CTLR_GPIO_PA_PIN */
ARG_UNUSED(start_us);
#endif /* !CONFIG_BT_CTLR_GPIO_PA_PIN */
started = true;
return 0;
}
u32_t ll_test_rx(u8_t chan, u8_t phy, u8_t mod_idx)
{
u32_t err;
if (!phy || (phy > 0x03)) {
return 1;
}
err = init(chan, phy, isr_rx);
if (err) {
return err;
}
radio_pkt_rx_set(radio_pkt_scratch_get());
radio_switch_complete_and_rx(test_phy);
radio_tmr_start(0, cntr_cnt_get() + CNTR_MIN_DELTA, 0);
#if defined(CONFIG_BT_CTLR_GPIO_LNA_PIN)
radio_gpio_lna_on();
#endif /* !CONFIG_BT_CTLR_GPIO_LNA_PIN */
started = true;
return 0;
}
u32_t ll_test_end(u16_t *num_rx)
{
struct device *hf_clock;
u8_t ack;
if (!started) {
return 1;
}
/* Return packets Rx-ed/Completed */
*num_rx = test_num_rx;
test_num_rx = 0;
/* Disable Radio, if in Rx test */
ack = tx_ack;
if (tx_req == ack) {
radio_disable();
} else {
/* Wait for Tx to complete */
tx_req = ack + 2;
while (tx_req != tx_ack) {
cpu_sleep();
}
}
/* Stop packet timer */
radio_tmr_stop();
/* Release resources acquired for Radio */
hf_clock = radio_hf_clock_get();
clock_control_off(hf_clock, NULL);
/* Stop coarse timer */
cntr_stop();
#if defined(CONFIG_BT_CTLR_GPIO_LNA_PIN)
radio_gpio_lna_off();
#endif /* !CONFIG_BT_CTLR_GPIO_LNA_PIN */
started = false;
return 0;
}

Some files were not shown because too many files have changed in this diff Show more