19 changed files with 87 additions and 1496 deletions
@ -1,7 +0,0 @@
|
||||
#include "u_async.h" |
||||
#include <stdio.h> |
||||
|
||||
int main() { |
||||
printf("socket_node size: %zu bytes\n", sizeof(struct socket_node)); |
||||
return 0; |
||||
} |
||||
@ -1,155 +0,0 @@
|
||||
// timeout_heap.c |
||||
|
||||
#include "timeout_heap.h" |
||||
#include "debug_config.h" |
||||
#include <stdlib.h> |
||||
#include <stdio.h> // For potential error printing, optional |
||||
|
||||
// Helper macros for 1-based indices |
||||
#define PARENT(i) ((i) / 2) |
||||
#define LEFT_CHILD(i) (2 * (i)) |
||||
#define RIGHT_CHILD(i) (2 * (i) + 1) |
||||
|
||||
TimeoutHeap *timeout_heap_create(size_t initial_capacity) { |
||||
TimeoutHeap *h = malloc(sizeof(TimeoutHeap)); |
||||
if (!h) return NULL; |
||||
h->heap = malloc(sizeof(TimeoutEntry) * initial_capacity); |
||||
if (!h->heap) { |
||||
free(h); |
||||
return NULL; |
||||
} |
||||
h->size = 0; |
||||
h->capacity = initial_capacity; |
||||
h->freed_count = 0; |
||||
h->user_data = NULL; |
||||
h->free_callback = NULL; |
||||
return h; |
||||
} |
||||
|
||||
|
||||
void timeout_heap_destroy(TimeoutHeap *h) { |
||||
if (!h) return; |
||||
|
||||
// Safely destroy all timer data atomically |
||||
// This prevents double-free by handling destruction consistently |
||||
while (h->size > 0) { |
||||
TimeoutEntry entry; |
||||
if (timeout_heap_pop(h, &entry) == 0) { |
||||
// Don't free data here - let the caller handle it consistently |
||||
// Just remove from heap to prevent double references |
||||
} |
||||
} |
||||
|
||||
free(h->heap); |
||||
free(h); |
||||
} |
||||
static void bubble_up(TimeoutHeap *h, size_t i) { |
||||
// i is 1-based |
||||
while (i > 1 && h->heap[PARENT(i) - 1].expiration > h->heap[i - 1].expiration) { |
||||
// Swap with parent |
||||
TimeoutEntry temp = h->heap[PARENT(i) - 1]; |
||||
h->heap[PARENT(i) - 1] = h->heap[i - 1]; |
||||
h->heap[i - 1] = temp; |
||||
i = PARENT(i); |
||||
} |
||||
} |
||||
|
||||
int timeout_heap_push(TimeoutHeap *h, TimeoutTime expiration, void *data) { |
||||
if (h->size == h->capacity) { |
||||
size_t new_cap = h->capacity ? h->capacity * 2 : 1; |
||||
TimeoutEntry *new_heap = realloc(h->heap, sizeof(TimeoutEntry) * new_cap); |
||||
if (!new_heap) return -1; // Allocation failed |
||||
h->heap = new_heap; |
||||
h->capacity = new_cap; |
||||
} |
||||
|
||||
// Insert at end (0-based) |
||||
size_t idx = h->size++; |
||||
h->heap[idx].expiration = expiration; |
||||
h->heap[idx].data = data; |
||||
h->heap[idx].deleted = 0; |
||||
|
||||
// Bubble up (1-based) |
||||
bubble_up(h, idx + 1); |
||||
return 0; |
||||
} |
||||
|
||||
static void heapify_down(TimeoutHeap *h, size_t i) { |
||||
// i is 1-based |
||||
while (1) { |
||||
size_t smallest = i; |
||||
size_t left = LEFT_CHILD(i); |
||||
size_t right = RIGHT_CHILD(i); |
||||
|
||||
if (left <= h->size && h->heap[left - 1].expiration < h->heap[smallest - 1].expiration) { |
||||
smallest = left; |
||||
} |
||||
if (right <= h->size && h->heap[right - 1].expiration < h->heap[smallest - 1].expiration) { |
||||
smallest = right; |
||||
} |
||||
if (smallest == i) break; |
||||
|
||||
// Swap |
||||
TimeoutEntry temp = h->heap[smallest - 1]; |
||||
h->heap[smallest - 1] = h->heap[i - 1]; |
||||
h->heap[i - 1] = temp; |
||||
i = smallest; |
||||
} |
||||
} |
||||
|
||||
static void remove_root(TimeoutHeap *h) { |
||||
if (h->size == 0) return; |
||||
|
||||
// Move last to root |
||||
h->heap[0] = h->heap[--h->size]; |
||||
|
||||
// Heapify down (1-based) |
||||
if (h->size > 0) { |
||||
heapify_down(h, 1); |
||||
} |
||||
} |
||||
|
||||
int timeout_heap_peek(TimeoutHeap *h, TimeoutEntry *out) { |
||||
if (h->size == 0) return -1; |
||||
|
||||
// Skip deleted |
||||
size_t i = 0; |
||||
while (i < h->size && h->heap[0].deleted) { |
||||
remove_root(h); |
||||
} |
||||
if (h->size == 0) return -1; |
||||
|
||||
*out = h->heap[0]; |
||||
return 0; |
||||
} |
||||
|
||||
int timeout_heap_pop(TimeoutHeap *h, TimeoutEntry *out) { |
||||
if (h->size == 0) return -1; |
||||
|
||||
// Skip deleted and free their data |
||||
while (h->size > 0 && h->heap[0].deleted) { |
||||
// Just remove from heap, do not free data (to avoid double-free during destruction) |
||||
remove_root(h); |
||||
} |
||||
if (h->size == 0) return -1; |
||||
|
||||
*out = h->heap[0]; |
||||
remove_root(h); |
||||
return 0; |
||||
} |
||||
|
||||
int timeout_heap_cancel(TimeoutHeap *h, TimeoutTime expiration, void *data) { |
||||
for (size_t i = 0; i < h->size; ++i) { |
||||
if (h->heap[i].expiration == expiration && h->heap[i].data == data) { |
||||
h->heap[i].deleted = 1; |
||||
return 0; |
||||
} |
||||
} |
||||
return -1; // Not found |
||||
} |
||||
|
||||
void timeout_heap_set_free_callback(TimeoutHeap *h, void* user_data, void (*callback)(void* user_data, void* data)) { |
||||
if (!h) return; |
||||
h->user_data = user_data; |
||||
h->free_callback = callback; |
||||
} |
||||
@ -1,793 +0,0 @@
|
||||
// uasync.c |
||||
|
||||
#include "u_async.h" |
||||
#include "debug_config.h" |
||||
#include <stdio.h> |
||||
#include <string.h> |
||||
#include <stdlib.h> |
||||
#include <unistd.h> |
||||
#include <errno.h> |
||||
#include <poll.h> |
||||
#include <limits.h> |
||||
#include <fcntl.h> |
||||
|
||||
|
||||
|
||||
// Timeout node with safe cancellation |
||||
struct timeout_node { |
||||
void* arg; |
||||
timeout_callback_t callback; |
||||
uint64_t expiration_ms; // absolute expiration time in milliseconds |
||||
struct UASYNC* ua; // Pointer back to uasync instance for counter updates |
||||
int cancelled; // Cancellation flag |
||||
}; |
||||
|
||||
// Socket node with array-based storage |
||||
struct socket_node { |
||||
int fd; |
||||
socket_callback_t read_cbk; |
||||
socket_callback_t write_cbk; |
||||
socket_callback_t except_cbk; |
||||
void* user_data; |
||||
int active; // 1 if socket is active, 0 if freed (for reuse) |
||||
}; |
||||
|
||||
// Array-based socket management for O(1) operations |
||||
struct socket_array { |
||||
struct socket_node* sockets; // Dynamic array of socket nodes |
||||
int* fd_to_index; // FD to array index mapping |
||||
int* index_to_fd; // Array index to FD mapping |
||||
int capacity; // Total allocated capacity |
||||
int count; // Number of active sockets |
||||
int max_fd; // Maximum FD for bounds checking |
||||
}; |
||||
|
||||
static struct socket_array* socket_array_create(int initial_capacity); |
||||
static void socket_array_destroy(struct socket_array* sa); |
||||
static int socket_array_add(struct socket_array* sa, int fd, socket_callback_t read_cbk, socket_callback_t write_cbk, socket_callback_t except_cbk, void* user_data); |
||||
static int socket_array_remove(struct socket_array* sa, int fd); |
||||
static struct socket_node* socket_array_get(struct socket_array* sa, int fd); |
||||
|
||||
// No global instance - each module must use its own struct UASYNC instance |
||||
|
||||
// Array-based socket management implementation |
||||
static struct socket_array* socket_array_create(int initial_capacity) { |
||||
if (initial_capacity < 4) initial_capacity = 4; // Minimum capacity |
||||
|
||||
struct socket_array* sa = malloc(sizeof(struct socket_array)); |
||||
if (!sa) return NULL; |
||||
|
||||
sa->sockets = calloc(initial_capacity, sizeof(struct socket_node)); |
||||
sa->fd_to_index = calloc(initial_capacity, sizeof(int)); |
||||
sa->index_to_fd = calloc(initial_capacity, sizeof(int)); |
||||
|
||||
if (!sa->sockets || !sa->fd_to_index || !sa->index_to_fd) { |
||||
free(sa->sockets); |
||||
free(sa->fd_to_index); |
||||
free(sa->index_to_fd); |
||||
free(sa); |
||||
return NULL; |
||||
} |
||||
|
||||
// Initialize mapping arrays to -1 (invalid) |
||||
for (int i = 0; i < initial_capacity; i++) { |
||||
sa->fd_to_index[i] = -1; |
||||
sa->index_to_fd[i] = -1; |
||||
sa->sockets[i].fd = -1; |
||||
sa->sockets[i].active = 0; |
||||
} |
||||
|
||||
sa->capacity = initial_capacity; |
||||
sa->count = 0; |
||||
sa->max_fd = -1; |
||||
|
||||
return sa; |
||||
} |
||||
|
||||
static void socket_array_destroy(struct socket_array* sa) { |
||||
if (!sa) return; |
||||
|
||||
free(sa->sockets); |
||||
free(sa->fd_to_index); |
||||
free(sa->index_to_fd); |
||||
free(sa); |
||||
} |
||||
|
||||
static int socket_array_add(struct socket_array* sa, int fd, socket_callback_t read_cbk, socket_callback_t write_cbk, socket_callback_t except_cbk, void* user_data) { |
||||
if (!sa || fd < 0 || fd >= FD_SETSIZE) return -1; |
||||
if (fd >= sa->capacity) { |
||||
// Need to resize - double the capacity |
||||
int new_capacity = sa->capacity * 2; |
||||
if (fd >= new_capacity) new_capacity = fd + 16; // Ensure enough space |
||||
|
||||
struct socket_node* new_sockets = realloc(sa->sockets, new_capacity * sizeof(struct socket_node)); |
||||
int* new_fd_to_index = realloc(sa->fd_to_index, new_capacity * sizeof(int)); |
||||
int* new_index_to_fd = realloc(sa->index_to_fd, new_capacity * sizeof(int)); |
||||
|
||||
if (!new_sockets || !new_fd_to_index || !new_index_to_fd) { |
||||
// Allocation failed |
||||
free(new_sockets); |
||||
free(new_fd_to_index); |
||||
free(new_index_to_fd); |
||||
return -1; |
||||
} |
||||
|
||||
// Initialize new elements |
||||
for (int i = sa->capacity; i < new_capacity; i++) { |
||||
new_fd_to_index[i] = -1; |
||||
new_index_to_fd[i] = -1; |
||||
new_sockets[i].fd = -1; |
||||
new_sockets[i].active = 0; |
||||
} |
||||
|
||||
sa->sockets = new_sockets; |
||||
sa->fd_to_index = new_fd_to_index; |
||||
sa->index_to_fd = new_index_to_fd; |
||||
sa->capacity = new_capacity; |
||||
} |
||||
|
||||
// Check if FD already exists |
||||
if (sa->fd_to_index[fd] != -1) return -1; // FD already exists |
||||
|
||||
// Find first free slot |
||||
int index = -1; |
||||
for (int i = 0; i < sa->capacity; i++) { |
||||
if (!sa->sockets[i].active) { |
||||
index = i; |
||||
break; |
||||
} |
||||
} |
||||
|
||||
if (index == -1) return -1; // No free slots (shouldn't happen) |
||||
|
||||
// Add the socket |
||||
sa->sockets[index].fd = fd; |
||||
sa->sockets[index].read_cbk = read_cbk; |
||||
sa->sockets[index].write_cbk = write_cbk; |
||||
sa->sockets[index].except_cbk = except_cbk; |
||||
sa->sockets[index].user_data = user_data; |
||||
sa->sockets[index].active = 1; |
||||
|
||||
sa->fd_to_index[fd] = index; |
||||
sa->index_to_fd[index] = fd; |
||||
sa->count++; |
||||
|
||||
if (fd > sa->max_fd) sa->max_fd = fd; |
||||
|
||||
return index; |
||||
} |
||||
|
||||
static int socket_array_remove(struct socket_array* sa, int fd) { |
||||
if (!sa || fd < 0 || fd >= sa->capacity) return -1; |
||||
|
||||
int index = sa->fd_to_index[fd]; |
||||
if (index == -1 || !sa->sockets[index].active) return -1; // FD not found |
||||
|
||||
// Mark as inactive |
||||
sa->sockets[index].active = 0; |
||||
sa->sockets[index].fd = -1; |
||||
sa->fd_to_index[fd] = -1; |
||||
sa->index_to_fd[index] = -1; |
||||
sa->count--; |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
static struct socket_node* socket_array_get(struct socket_array* sa, int fd) { |
||||
if (!sa || fd < 0 || fd >= sa->capacity) return NULL; |
||||
|
||||
int index = sa->fd_to_index[fd]; |
||||
if (index == -1 || !sa->sockets[index].active) return NULL; |
||||
|
||||
return &sa->sockets[index]; |
||||
} |
||||
|
||||
// Callback to free timeout node and update counters |
||||
static void timeout_node_free_callback(void* user_data, void* data) { |
||||
struct UASYNC* ua = (struct UASYNC*)user_data; |
||||
struct timeout_node* node = (struct timeout_node*)data; |
||||
(void)node; // Not used directly, but keep for consistency |
||||
ua->timer_free_count++; |
||||
free(data); |
||||
} |
||||
|
||||
// Helper to get current time |
||||
static void get_current_time(struct timeval* tv) { |
||||
gettimeofday(tv, NULL); |
||||
} |
||||
|
||||
|
||||
|
||||
// Drain wakeup pipe - read all available bytes |
||||
static void drain_wakeup_pipe(struct UASYNC* ua) { |
||||
if (!ua || !ua->wakeup_initialized) return; |
||||
|
||||
char buf[64]; |
||||
while (1) { |
||||
ssize_t n = read(ua->wakeup_pipe[0], buf, sizeof(buf)); |
||||
if (n <= 0) break; |
||||
} |
||||
} |
||||
|
||||
// Helper to add timeval: tv += dt (timebase units) |
||||
static void timeval_add_tb(struct timeval* tv, int dt) { |
||||
tv->tv_usec += (dt % 10000) * 100; |
||||
tv->tv_sec += dt / 10000 + tv->tv_usec / 1000000; |
||||
tv->tv_usec %= 1000000; |
||||
} |
||||
|
||||
// Convert timeval to milliseconds (uint64_t) |
||||
static uint64_t timeval_to_ms(const struct timeval* tv) { |
||||
return (uint64_t)tv->tv_sec * 1000ULL + (uint64_t)tv->tv_usec / 1000ULL; |
||||
} |
||||
|
||||
|
||||
|
||||
// Simplified timeout handling without reference counting |
||||
|
||||
// Process expired timeouts with safe cancellation |
||||
static void process_timeouts(struct UASYNC* ua) { |
||||
if (!ua || !ua->timeout_heap) return; |
||||
|
||||
struct timeval now_tv; |
||||
get_current_time(&now_tv); |
||||
uint64_t now_ms = timeval_to_ms(&now_tv); |
||||
|
||||
while (1) { |
||||
TimeoutEntry entry; |
||||
if (timeout_heap_peek(ua->timeout_heap, &entry) != 0) break; |
||||
if (entry.expiration > now_ms) break; |
||||
|
||||
// Pop the expired timeout |
||||
timeout_heap_pop(ua->timeout_heap, &entry); |
||||
struct timeout_node* node = (struct timeout_node*)entry.data; |
||||
|
||||
if (node && node->callback && !node->cancelled) { |
||||
// Execute callback only if not cancelled |
||||
node->callback(node->arg); |
||||
} |
||||
|
||||
// Always free the node after processing |
||||
if (node && node->ua) { |
||||
node->ua->timer_free_count++; |
||||
} |
||||
free(node); |
||||
} |
||||
} |
||||
|
||||
// Compute time to next timeout |
||||
static void get_next_timeout(struct UASYNC* ua, struct timeval* tv) { |
||||
if (!ua || !ua->timeout_heap) { |
||||
tv->tv_sec = 0; |
||||
tv->tv_usec = 0; |
||||
return; |
||||
} |
||||
|
||||
TimeoutEntry entry; |
||||
if (timeout_heap_peek(ua->timeout_heap, &entry) != 0) { |
||||
tv->tv_sec = 0; |
||||
tv->tv_usec = 0; |
||||
return; |
||||
} |
||||
|
||||
struct timeval now_tv; |
||||
get_current_time(&now_tv); |
||||
uint64_t now_ms = timeval_to_ms(&now_tv); |
||||
|
||||
if (entry.expiration <= now_ms) { |
||||
tv->tv_sec = 0; |
||||
tv->tv_usec = 0; |
||||
return; |
||||
} |
||||
|
||||
uint64_t delta_ms = entry.expiration - now_ms; |
||||
tv->tv_sec = delta_ms / 1000; |
||||
tv->tv_usec = (delta_ms % 1000) * 1000; |
||||
} |
||||
|
||||
|
||||
|
||||
// Instance version |
||||
void* uasync_set_timeout(struct UASYNC* ua, int timeout_tb, void* arg, timeout_callback_t callback) { |
||||
if (!ua || timeout_tb < 0 || !callback) return NULL; |
||||
if (!ua->timeout_heap) return NULL; |
||||
|
||||
DEBUG_DEBUG(DEBUG_CATEGORY_TIMERS, "uasync_set_timeout: timeout=%d.%d ms, arg=%p, callback=%p", timeout_tb/10, timeout_tb%10, arg, callback); |
||||
|
||||
struct timeout_node* node = malloc(sizeof(struct timeout_node)); |
||||
if (!node) { |
||||
DEBUG_ERROR(DEBUG_CATEGORY_TIMERS, "uasync_set_timeout: failed to allocate node"); |
||||
return NULL; |
||||
} |
||||
ua->timer_alloc_count++; |
||||
|
||||
node->arg = arg; |
||||
node->callback = callback; |
||||
node->ua = ua; |
||||
node->cancelled = 0; |
||||
|
||||
// Calculate expiration time in milliseconds |
||||
struct timeval now; |
||||
get_current_time(&now); |
||||
timeval_add_tb(&now, timeout_tb); |
||||
node->expiration_ms = timeval_to_ms(&now); |
||||
|
||||
// Add to heap |
||||
if (timeout_heap_push(ua->timeout_heap, node->expiration_ms, node) != 0) { |
||||
DEBUG_ERROR(DEBUG_CATEGORY_TIMERS, "uasync_set_timeout: failed to push to heap"); |
||||
free(node); |
||||
ua->timer_free_count++; // Balance the alloc counter |
||||
return NULL; |
||||
} |
||||
|
||||
return node; |
||||
} |
||||
|
||||
|
||||
|
||||
// Instance version |
||||
err_t uasync_cancel_timeout(struct UASYNC* ua, void* t_id) { |
||||
if (!ua || !t_id || !ua->timeout_heap) { |
||||
DEBUG_ERROR(DEBUG_CATEGORY_TIMERS, "uasync_cancel_timeout: invalid parameters ua=%p, t_id=%p, heap=%p", |
||||
ua, t_id, ua ? ua->timeout_heap : NULL); |
||||
return ERR_FAIL; |
||||
} |
||||
|
||||
struct timeout_node* node = (struct timeout_node*)t_id; |
||||
|
||||
// Try to cancel from heap first |
||||
if (timeout_heap_cancel(ua->timeout_heap, node->expiration_ms, node) == 0) { |
||||
// Successfully marked as deleted - free will happen lazily in heap |
||||
node->cancelled = 1; |
||||
node->callback = NULL; |
||||
DEBUG_DEBUG(DEBUG_CATEGORY_TIMERS, "uasync_cancel_timeout: successfully cancelled timer %p from heap", node); |
||||
return ERR_OK; |
||||
} |
||||
|
||||
DEBUG_DEBUG(DEBUG_CATEGORY_TIMERS, "uasync_cancel_timeout: not found in heap: ua=%p, t_id=%p, node=%p, expires=%llu ms", |
||||
ua, t_id, node, (unsigned long long)node->expiration_ms); |
||||
|
||||
// If not found in heap, it may have already expired or been invalid |
||||
return ERR_FAIL; |
||||
} |
||||
|
||||
|
||||
// Instance version |
||||
void* uasync_add_socket(struct UASYNC* ua, int fd, socket_callback_t read_cbk, socket_callback_t write_cbk, socket_callback_t except_cbk, void* user_data) { |
||||
if (!ua || fd < 0 || fd >= FD_SETSIZE) return NULL; // Bounds check |
||||
|
||||
int index = socket_array_add(ua->sockets, fd, read_cbk, write_cbk, except_cbk, user_data); |
||||
if (index < 0) return NULL; |
||||
|
||||
ua->socket_alloc_count++; |
||||
|
||||
// Return pointer to the socket node (same as before for API compatibility) |
||||
return &ua->sockets->sockets[index]; |
||||
} |
||||
|
||||
|
||||
|
||||
// Instance version |
||||
err_t uasync_remove_socket(struct UASYNC* ua, void* s_id) { |
||||
if (!ua || !s_id) return ERR_FAIL; |
||||
|
||||
struct socket_node* node = (struct socket_node*)s_id; |
||||
if (node->fd < 0) return ERR_FAIL; // Invalid node |
||||
|
||||
int result = socket_array_remove(ua->sockets, node->fd); |
||||
if (result != 0) return ERR_FAIL; |
||||
|
||||
ua->socket_free_count++; |
||||
return ERR_OK; |
||||
} |
||||
|
||||
|
||||
|
||||
void uasync_mainloop(struct UASYNC* ua) { |
||||
while (1) { |
||||
uasync_poll(ua, -1); /* infinite timeout */ |
||||
} |
||||
} |
||||
|
||||
// Instance version |
||||
void uasync_poll(struct UASYNC* ua, int timeout_tb) { |
||||
if (!ua) return; |
||||
|
||||
DEBUG_DEBUG(DEBUG_CATEGORY_ETCP, "async: mainloop: event waiting"); |
||||
|
||||
/* Process expired timeouts */ |
||||
process_timeouts(ua); |
||||
|
||||
/* Compute timeout for poll in milliseconds */ |
||||
int timeout_ms = -1; // infinite by default |
||||
|
||||
// Get next timeout from heap |
||||
struct timeval tv; |
||||
get_next_timeout(ua, &tv); |
||||
|
||||
if (tv.tv_sec > 0 || tv.tv_usec > 0 || (ua->timeout_heap && ua->timeout_heap->size > 0)) { |
||||
// Convert timeval to milliseconds, cap at INT_MAX |
||||
uint64_t ms = (uint64_t)tv.tv_sec * 1000ULL + (uint64_t)tv.tv_usec / 1000ULL; |
||||
if (ms > INT_MAX) ms = INT_MAX; |
||||
timeout_ms = (int)ms; |
||||
} |
||||
|
||||
/* If timeout_tb >= 0, compute timeout as min(timeout_tb, existing timer) */ |
||||
if (timeout_tb >= 0) { |
||||
// Convert timebase (0.1 ms) to milliseconds |
||||
int user_timeout_ms = timeout_tb / 10; |
||||
if (timeout_tb % 10 != 0) user_timeout_ms++; // round up |
||||
|
||||
if (timeout_ms < 0 || user_timeout_ms < timeout_ms) { |
||||
timeout_ms = user_timeout_ms; |
||||
} |
||||
} |
||||
|
||||
/* Build pollfd array from socket array - O(1) per socket */ |
||||
int socket_count = ua->sockets ? ua->sockets->count : 0; |
||||
int wakeup_fd_present = ua->wakeup_initialized ? 1 : 0; |
||||
int total_fds = socket_count + wakeup_fd_present; |
||||
|
||||
if (total_fds == 0) { |
||||
/* No sockets and no wakeup fd, just wait for timeout */ |
||||
if (timeout_ms >= 0) { |
||||
/* usleep would be better but we just call poll with empty set */ |
||||
poll(NULL, 0, timeout_ms); |
||||
} else { |
||||
/* Infinite timeout with no sockets - should not happen in practice */ |
||||
return; |
||||
} |
||||
/* Check timeouts again after sleep */ |
||||
process_timeouts(ua); |
||||
return; |
||||
} |
||||
|
||||
struct pollfd* fds = malloc(total_fds * sizeof(struct pollfd)); |
||||
struct socket_node** nodes = NULL; |
||||
if (socket_count > 0) { |
||||
nodes = malloc(socket_count * sizeof(struct socket_node*)); |
||||
} |
||||
if (!fds || (socket_count > 0 && !nodes)) { |
||||
free(fds); |
||||
free(nodes); |
||||
return; /* out of memory */ |
||||
} |
||||
|
||||
/* Fill arrays */ |
||||
int idx = 0; |
||||
|
||||
/* Add wakeup fd first if present */ |
||||
if (wakeup_fd_present) { |
||||
fds[idx].fd = ua->wakeup_pipe[0]; |
||||
fds[idx].events = POLLIN; |
||||
fds[idx].revents = 0; |
||||
idx++; |
||||
} |
||||
|
||||
/* Add socket fds using efficient array traversal */ |
||||
int node_idx = 0; |
||||
for (int i = 0; i < ua->sockets->capacity && node_idx < socket_count; i++) { |
||||
if (ua->sockets->sockets[i].active) { |
||||
struct socket_node* cur = &ua->sockets->sockets[i]; |
||||
fds[idx].fd = cur->fd; |
||||
fds[idx].events = 0; |
||||
fds[idx].revents = 0; |
||||
|
||||
if (cur->read_cbk) fds[idx].events |= POLLIN; |
||||
if (cur->write_cbk) fds[idx].events |= POLLOUT; |
||||
if (cur->except_cbk) fds[idx].events |= POLLPRI; |
||||
|
||||
if (nodes) { |
||||
nodes[node_idx] = cur; |
||||
} |
||||
idx++; |
||||
node_idx++; |
||||
} |
||||
} |
||||
|
||||
/* Call poll */ |
||||
int ret = poll(fds, total_fds, timeout_ms); |
||||
if (ret < 0) { |
||||
if (errno == EINTR) { |
||||
free(fds); |
||||
free(nodes); |
||||
return; |
||||
} |
||||
perror("poll"); |
||||
free(fds); |
||||
free(nodes); |
||||
return; |
||||
} |
||||
|
||||
/* Process timeouts that may have expired during poll */ |
||||
process_timeouts(ua); |
||||
|
||||
/* Process socket events */ |
||||
if (ret > 0) { |
||||
for (int i = 0; i < total_fds; i++) { |
||||
if (fds[i].revents == 0) continue; |
||||
|
||||
/* Handle wakeup fd separately */ |
||||
if (wakeup_fd_present && i == 0) { |
||||
if (fds[i].revents & POLLIN) { |
||||
drain_wakeup_pipe(ua); |
||||
} |
||||
continue; |
||||
} |
||||
|
||||
/* Socket event */ |
||||
int socket_idx = i - wakeup_fd_present; |
||||
struct socket_node* node = nodes[socket_idx]; |
||||
|
||||
/* Check for error conditions first */ |
||||
if (fds[i].revents & (POLLERR | POLLHUP | POLLNVAL)) { |
||||
/* Treat as exceptional condition */ |
||||
if (node->except_cbk) { |
||||
node->except_cbk(node->fd, node->user_data); |
||||
} |
||||
} |
||||
|
||||
/* Exceptional data (out-of-band) */ |
||||
if (fds[i].revents & POLLPRI) { |
||||
if (node->except_cbk) { |
||||
node->except_cbk(node->fd, node->user_data); |
||||
} |
||||
} |
||||
|
||||
/* Read readiness */ |
||||
if (fds[i].revents & POLLIN) { |
||||
if (node->read_cbk) { |
||||
node->read_cbk(node->fd, node->user_data); |
||||
} |
||||
} |
||||
|
||||
/* Write readiness */ |
||||
if (fds[i].revents & POLLOUT) { |
||||
if (node->write_cbk) { |
||||
node->write_cbk(node->fd, node->user_data); |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
free(fds); |
||||
free(nodes); |
||||
} |
||||
|
||||
|
||||
|
||||
// ========== Instance management functions ========== |
||||
|
||||
struct UASYNC* uasync_create(void) { |
||||
|
||||
struct UASYNC* ua = malloc(sizeof(struct UASYNC)); |
||||
if (!ua) return NULL; |
||||
|
||||
memset(ua, 0, sizeof(struct UASYNC)); |
||||
ua->wakeup_pipe[0] = -1; |
||||
ua->wakeup_pipe[1] = -1; |
||||
ua->wakeup_initialized = 0; |
||||
|
||||
// Create wakeup pipe |
||||
if (pipe(ua->wakeup_pipe) < 0) { |
||||
DEBUG_WARN(DEBUG_CATEGORY_UASYNC, "Failed to create wakeup pipe: %s", strerror(errno)); |
||||
// Continue without wakeup mechanism |
||||
ua->wakeup_pipe[0] = -1; |
||||
ua->wakeup_pipe[1] = -1; |
||||
} else { |
||||
ua->wakeup_initialized = 1; |
||||
// Set non-blocking on read end to avoid blocking if pipe is full |
||||
int flags = fcntl(ua->wakeup_pipe[0], F_GETFL, 0); |
||||
if (flags >= 0) { |
||||
fcntl(ua->wakeup_pipe[0], F_SETFL, flags | O_NONBLOCK); |
||||
} |
||||
} |
||||
|
||||
ua->sockets = socket_array_create(16); |
||||
if (!ua->sockets) { |
||||
if (ua->wakeup_initialized) { |
||||
close(ua->wakeup_pipe[0]); |
||||
close(ua->wakeup_pipe[1]); |
||||
} |
||||
free(ua); |
||||
return NULL; |
||||
} |
||||
|
||||
ua->timeout_heap = timeout_heap_create(16); |
||||
if (!ua->timeout_heap) { |
||||
socket_array_destroy(ua->sockets); |
||||
if (ua->wakeup_initialized) { |
||||
close(ua->wakeup_pipe[0]); |
||||
close(ua->wakeup_pipe[1]); |
||||
} |
||||
free(ua); |
||||
return NULL; |
||||
} |
||||
|
||||
// Set callback to free timeout nodes and update counters |
||||
timeout_heap_set_free_callback(ua->timeout_heap, ua, timeout_node_free_callback); |
||||
|
||||
return ua; |
||||
} |
||||
|
||||
// Print all resources for debugging |
||||
void uasync_print_resources(struct UASYNC* ua, const char* prefix) { |
||||
if (!ua) { |
||||
printf("%s: NULL uasync instance\n", prefix); |
||||
return; |
||||
} |
||||
|
||||
printf("\n🔍 %s: UASYNC Resource Report for %p\n", prefix, ua); |
||||
printf(" Timer Statistics: allocated=%zu, freed=%zu, active=%zd\n", |
||||
ua->timer_alloc_count, ua->timer_free_count, |
||||
(ssize_t)(ua->timer_alloc_count - ua->timer_free_count)); |
||||
printf(" Socket Statistics: allocated=%zu, freed=%zu, active=%zd\n", |
||||
ua->socket_alloc_count, ua->socket_free_count, |
||||
(ssize_t)(ua->socket_alloc_count - ua->socket_free_count)); |
||||
|
||||
// Показать активные таймеры |
||||
if (ua->timeout_heap) { |
||||
size_t active_timers = 0; |
||||
// Безопасное чтение без извлечения - просто итерируем по массиву |
||||
for (size_t i = 0; i < ua->timeout_heap->size; i++) { |
||||
if (!ua->timeout_heap->heap[i].deleted) { |
||||
active_timers++; |
||||
struct timeout_node* node = (struct timeout_node*)ua->timeout_heap->heap[i].data; |
||||
printf(" Timer: node=%p, expires=%llu ms, cancelled=%d\n", |
||||
node, (unsigned long long)ua->timeout_heap->heap[i].expiration, node->cancelled); |
||||
} |
||||
} |
||||
printf(" Active timers in heap: %zu\n", active_timers); |
||||
} |
||||
|
||||
// Показать активные сокеты |
||||
if (ua->sockets) { |
||||
int active_sockets = 0; |
||||
printf(" Socket array capacity: %d, active: %d\n", |
||||
ua->sockets->capacity, ua->sockets->count); |
||||
for (int i = 0; i < ua->sockets->capacity; i++) { |
||||
if (ua->sockets->sockets[i].active) { |
||||
active_sockets++; |
||||
printf(" Socket: fd=%d, active=%d\n", |
||||
ua->sockets->sockets[i].fd, |
||||
ua->sockets->sockets[i].active); |
||||
} |
||||
} |
||||
printf(" Total active sockets: %d\n", active_sockets); |
||||
} |
||||
|
||||
printf("🔚 %s: End of resource report\n\n", prefix); |
||||
} |
||||
|
||||
void uasync_destroy(struct UASYNC* ua, int close_fds) { |
||||
if (!ua) return; |
||||
|
||||
DEBUG_DEBUG(DEBUG_CATEGORY_TIMERS, "uasync_destroy: starting cleanup for ua=%p", ua); |
||||
|
||||
// Диагностика ресурсов перед очисткой |
||||
uasync_print_resources(ua, "BEFORE_DESTROY"); |
||||
|
||||
// Check for potential memory leaks |
||||
if (ua->timer_alloc_count != ua->timer_free_count || ua->socket_alloc_count != ua->socket_free_count) { |
||||
DEBUG_ERROR(DEBUG_CATEGORY_MEMORY, "Memory leaks detected before cleanup: timers %zu/%zu, sockets %zu/%zu", |
||||
ua->timer_alloc_count, ua->timer_free_count, ua->socket_alloc_count, ua->socket_free_count); |
||||
DEBUG_ERROR(DEBUG_CATEGORY_TIMERS, "Timer leak: allocated=%zu, freed=%zu, diff=%zd", |
||||
ua->timer_alloc_count, ua->timer_free_count, |
||||
(ssize_t)(ua->timer_alloc_count - ua->timer_free_count)); |
||||
DEBUG_ERROR(DEBUG_CATEGORY_TIMERS, "Socket leak: allocated=%zu, freed=%zu, diff=%zd", |
||||
ua->socket_alloc_count, ua->socket_free_count, |
||||
(ssize_t)(ua->socket_alloc_count - ua->socket_free_count)); |
||||
// Continue cleanup, will abort after if leaks remain |
||||
} |
||||
|
||||
// Free all remaining timeouts |
||||
if (ua->timeout_heap) { |
||||
size_t freed_count = 0; |
||||
while (1) { |
||||
TimeoutEntry entry; |
||||
if (timeout_heap_pop(ua->timeout_heap, &entry) != 0) break; |
||||
struct timeout_node* node = (struct timeout_node*)entry.data; |
||||
|
||||
// Free all timer nodes (avoid double-free bug) |
||||
if (node) { |
||||
ua->timer_free_count++; |
||||
free(node); |
||||
} |
||||
} |
||||
timeout_heap_destroy(ua->timeout_heap); |
||||
} |
||||
|
||||
// Free all socket nodes using array approach |
||||
if (ua->sockets) { |
||||
// Count and free all active sockets |
||||
int freed_count = 0; |
||||
for (int i = 0; i < ua->sockets->capacity; i++) { |
||||
if (ua->sockets->sockets[i].active) { |
||||
if (close_fds && ua->sockets->sockets[i].fd >= 0) { |
||||
close(ua->sockets->sockets[i].fd); |
||||
} |
||||
ua->socket_free_count++; |
||||
freed_count++; |
||||
} |
||||
} |
||||
DEBUG_DEBUG(DEBUG_CATEGORY_MEMORY, "Freed %d socket nodes in destroy", freed_count); |
||||
socket_array_destroy(ua->sockets); |
||||
} |
||||
|
||||
// Close wakeup pipe |
||||
if (ua->wakeup_initialized) { |
||||
close(ua->wakeup_pipe[0]); |
||||
close(ua->wakeup_pipe[1]); |
||||
} |
||||
|
||||
// Final leak check |
||||
if (ua->timer_alloc_count != ua->timer_free_count || ua->socket_alloc_count != ua->socket_free_count) { |
||||
DEBUG_ERROR(DEBUG_CATEGORY_MEMORY, "Memory leaks detected after cleanup: timers %zu/%zu, sockets %zu/%zu", |
||||
ua->timer_alloc_count, ua->timer_free_count, ua->socket_alloc_count, ua->socket_free_count); |
||||
DEBUG_ERROR(DEBUG_CATEGORY_TIMERS, "FINAL Timer leak: allocated=%zu, freed=%zu, diff=%zd", |
||||
ua->timer_alloc_count, ua->timer_free_count, |
||||
(ssize_t)(ua->timer_alloc_count - ua->timer_free_count)); |
||||
DEBUG_ERROR(DEBUG_CATEGORY_TIMERS, "FINAL Socket leak: allocated=%zu, freed=%zu, diff=%zd", |
||||
ua->socket_alloc_count, ua->socket_free_count, |
||||
(ssize_t)(ua->socket_alloc_count - ua->socket_free_count)); |
||||
abort(); |
||||
} |
||||
|
||||
DEBUG_DEBUG(DEBUG_CATEGORY_TIMERS, "uasync_destroy: completed successfully for ua=%p", ua); |
||||
free(ua); |
||||
} |
||||
|
||||
void uasync_init_instance(struct UASYNC* ua) { |
||||
if (!ua) return; |
||||
|
||||
// Initialize socket array if not present |
||||
if (!ua->sockets) { |
||||
ua->sockets = socket_array_create(16); |
||||
} |
||||
|
||||
if (!ua->timeout_heap) { |
||||
ua->timeout_heap = timeout_heap_create(16); |
||||
if (ua->timeout_heap) { |
||||
timeout_heap_set_free_callback(ua->timeout_heap, ua, timeout_node_free_callback); |
||||
} |
||||
} |
||||
} |
||||
|
||||
// Debug statistics |
||||
void uasync_get_stats(struct UASYNC* ua, size_t* timer_alloc, size_t* timer_free, size_t* socket_alloc, size_t* socket_free) { |
||||
if (!ua) return; |
||||
if (timer_alloc) *timer_alloc = ua->timer_alloc_count; |
||||
if (timer_free) *timer_free = ua->timer_free_count; |
||||
if (socket_alloc) *socket_alloc = ua->socket_alloc_count; |
||||
if (socket_free) *socket_free = ua->socket_free_count; |
||||
} |
||||
|
||||
// Get global instance for backward compatibility |
||||
|
||||
// Wakeup mechanism |
||||
int uasync_wakeup(struct UASYNC* ua) { |
||||
if (!ua || !ua->wakeup_initialized) return -1; |
||||
|
||||
char byte = 0; |
||||
ssize_t ret = write(ua->wakeup_pipe[1], &byte, 1); |
||||
if (ret != 1) { |
||||
// Don't print error from signal handler |
||||
return -1; |
||||
} |
||||
return 0; |
||||
} |
||||
|
||||
int uasync_get_wakeup_fd(struct UASYNC* ua) { |
||||
if (!ua || !ua->wakeup_initialized) return -1; |
||||
return ua->wakeup_pipe[1]; |
||||
} |
||||
|
||||
/* Lookup socket by file descriptor - returns current pointer even after realloc */ |
||||
int uasync_lookup_socket(struct UASYNC* ua, int fd, void** socket_id) { |
||||
if (!ua || !ua->sockets || !socket_id || fd < 0 || fd >= FD_SETSIZE) { |
||||
return -1; |
||||
} |
||||
|
||||
*socket_id = socket_array_get(ua->sockets, fd); |
||||
return (*socket_id != NULL) ? 0 : -1; |
||||
} |
||||
@ -1,391 +0,0 @@
|
||||
/* sc_lib.c - Secure Channel library implementation using TinyCrypt */ |
||||
|
||||
#include "secure_channel.h" |
||||
#include "../tinycrypt/lib/include/tinycrypt/ecc.h" |
||||
#include "../tinycrypt/lib/include/tinycrypt/ecc_dh.h" |
||||
#include "../tinycrypt/lib/include/tinycrypt/aes.h" |
||||
#include "../tinycrypt/lib/include/tinycrypt/ccm_mode.h" |
||||
#include "../tinycrypt/lib/include/tinycrypt/constants.h" |
||||
#include "../tinycrypt/lib/include/tinycrypt/ecc_platform_specific.h" |
||||
#include "../tinycrypt/lib/include/tinycrypt/sha256.h" |
||||
#include <string.h> |
||||
#include <stddef.h> |
||||
#include <sys/types.h> |
||||
#include <unistd.h> |
||||
#include <sys/time.h> |
||||
#include <stdio.h> |
||||
|
||||
// Simple debug macros |
||||
#define DEBUG_CATEGORY_CRYPTO 1 |
||||
#define DEBUG_ERROR(category, fmt, ...) fprintf(stderr, "ERROR: " fmt "\n", ##__VA_ARGS__) |
||||
#define DEBUG_INFO(category, fmt, ...) fprintf(stdout, "INFO: " fmt "\n", ##__VA_ARGS__) |
||||
#include <stdio.h> |
||||
#include <fcntl.h> |
||||
#include "crc32.h" |
||||
|
||||
static const struct uECC_Curve_t *curve = NULL; |
||||
static uint8_t sc_urandom_seed[8] = {0}; |
||||
static int sc_urandom_initialized = 0; |
||||
|
||||
static void sc_init_random_seed(void) |
||||
{ |
||||
int fd = open("/dev/urandom", O_RDONLY); |
||||
if (fd >= 0) { |
||||
ssize_t ret = read(fd, sc_urandom_seed, 8); |
||||
close(fd); |
||||
if (ret == 8) { |
||||
sc_urandom_initialized = 1; |
||||
} |
||||
} |
||||
} |
||||
|
||||
|
||||
static int sc_rng(uint8_t *dest, unsigned size) |
||||
{ |
||||
int fd = open("/dev/urandom", O_RDONLY); |
||||
if (fd < 0) { |
||||
return 0; |
||||
} |
||||
|
||||
ssize_t ret = read(fd, dest, size); |
||||
close(fd); |
||||
if (ret != size) { |
||||
return 0; |
||||
} |
||||
|
||||
/* Mix in PID and microtime for additional entropy */ |
||||
pid_t pid = getpid(); |
||||
struct timeval tv; |
||||
gettimeofday(&tv, NULL); |
||||
|
||||
for (unsigned i = 0; i < size; i++) { |
||||
dest[i] ^= ((pid >> (i % (sizeof(pid) * 8))) & 0xFF); |
||||
dest[i] ^= ((tv.tv_sec >> (i % (sizeof(tv.tv_sec) * 8))) & 0xFF); |
||||
dest[i] ^= ((tv.tv_usec >> (i % (sizeof(tv.tv_usec) * 8))) & 0xFF); |
||||
} |
||||
|
||||
return 1; |
||||
} |
||||
|
||||
static int sc_validate_key(const uint8_t *public_key) |
||||
{ |
||||
if (!curve) { |
||||
curve = uECC_secp256r1(); |
||||
} |
||||
int result = uECC_valid_public_key(public_key, curve); |
||||
DEBUG_INFO(DEBUG_CATEGORY_CRYPTO, "sc_validate_key: uECC_valid_public_key returned %d", result); |
||||
return result; |
||||
} |
||||
|
||||
sc_status_t sc_generate_keypair(struct SC_MYKEYS *pk) |
||||
{ |
||||
if (!pk) { |
||||
return SC_ERR_INVALID_ARG; |
||||
} |
||||
|
||||
if (!curve) { |
||||
curve = uECC_secp256r1(); |
||||
} |
||||
|
||||
/* Set custom RNG function */ |
||||
uECC_set_rng(sc_rng); |
||||
|
||||
if (!uECC_make_key(pk->public_key, pk->private_key, curve)) { |
||||
return SC_ERR_CRYPTO; |
||||
} |
||||
return SC_OK; |
||||
} |
||||
|
||||
// Конвертация hex строки в бинарный формат |
||||
static int hex_to_binary(const char *hex_str, uint8_t *binary, size_t binary_len) { |
||||
if (!hex_str || !binary || strlen(hex_str) != binary_len * 2) return -1; |
||||
|
||||
for (size_t i = 0; i < binary_len; i++) { |
||||
unsigned int byte; |
||||
if (sscanf(hex_str + i * 2, "%2x", &byte) != 1) return -1; |
||||
binary[i] = (uint8_t)byte; |
||||
} |
||||
return 0; |
||||
} |
||||
|
||||
sc_status_t sc_init_local_keys(struct SC_MYKEYS *mykeys, const char *public_key, const char *private_key) { |
||||
if (!mykeys || !public_key || !private_key) { |
||||
DEBUG_ERROR(DEBUG_CATEGORY_CRYPTO, "sc_init_local_keys: invalid arguments"); |
||||
return SC_ERR_INVALID_ARG; |
||||
} |
||||
|
||||
if (!curve) { |
||||
curve = uECC_secp256r1(); |
||||
} |
||||
|
||||
DEBUG_INFO(DEBUG_CATEGORY_CRYPTO, "sc_init_local_keys: public_key len=%zu, private_key len=%zu", |
||||
strlen(public_key), strlen(private_key)); |
||||
|
||||
/* Convert hex to binary first */ |
||||
if (hex_to_binary(public_key, mykeys->public_key, SC_PUBKEY_SIZE)) { |
||||
DEBUG_ERROR(DEBUG_CATEGORY_CRYPTO, "sc_init_local_keys: failed to convert public key from hex"); |
||||
return SC_ERR_INVALID_ARG; |
||||
} |
||||
if (hex_to_binary(private_key, mykeys->private_key, SC_PRIVKEY_SIZE)) { |
||||
DEBUG_ERROR(DEBUG_CATEGORY_CRYPTO, "sc_init_local_keys: failed to convert private key from hex"); |
||||
return SC_ERR_INVALID_ARG; |
||||
} |
||||
|
||||
/* Validate the converted binary public key */ |
||||
if (sc_validate_key(mykeys->public_key) != 0) { |
||||
DEBUG_ERROR(DEBUG_CATEGORY_CRYPTO, "sc_init_local_keys: public key validation failed"); |
||||
return SC_ERR_INVALID_ARG; |
||||
} |
||||
|
||||
DEBUG_INFO(DEBUG_CATEGORY_CRYPTO, "sc_init_local_keys: keys initialized successfully"); |
||||
return SC_OK; |
||||
} |
||||
|
||||
sc_status_t sc_init_ctx(sc_context_t *ctx, struct SC_MYKEYS *mykeys) { |
||||
|
||||
ctx->pk=mykeys; |
||||
ctx->initialized = 1; |
||||
ctx->peer_key_set = 0; |
||||
ctx->session_ready = 0; |
||||
ctx->tx_counter = 0; |
||||
ctx->rx_counter = 0; |
||||
|
||||
return SC_OK; |
||||
} |
||||
|
||||
sc_status_t sc_set_peer_public_key(sc_context_t *ctx, const char *peer_public_key_h, int mode) { |
||||
uint8_t shared_secret[SC_SHARED_SECRET_SIZE]; |
||||
uint8_t peer_public_key[SC_PUBKEY_SIZE]; |
||||
|
||||
if (mode) { |
||||
if (hex_to_binary(peer_public_key_h, peer_public_key, SC_PUBKEY_SIZE)) { |
||||
DEBUG_ERROR(DEBUG_CATEGORY_CRYPTO, "sc_set_peer_public_key: invalid hex key format"); |
||||
return SC_ERR_INVALID_ARG; |
||||
} |
||||
} |
||||
else memcpy(peer_public_key, peer_public_key_h, SC_PUBKEY_SIZE); |
||||
|
||||
if (!ctx) { |
||||
DEBUG_ERROR(DEBUG_CATEGORY_CRYPTO, "sc_set_peer_public_key: invalid ctx"); |
||||
return SC_ERR_INVALID_ARG; |
||||
} |
||||
|
||||
if (!ctx->initialized) { |
||||
DEBUG_ERROR(DEBUG_CATEGORY_CRYPTO, "sc_set_peer_public_key: ctx not initialized"); |
||||
return SC_ERR_NOT_INITIALIZED; |
||||
} |
||||
|
||||
if (!curve) { |
||||
curve = uECC_secp256r1(); |
||||
} |
||||
|
||||
/* Validate peer public key */ |
||||
if (sc_validate_key(peer_public_key) != 0) { |
||||
DEBUG_ERROR(DEBUG_CATEGORY_CRYPTO, "sc_set_peer_public_key: invalid key"); |
||||
return SC_ERR_INVALID_ARG; |
||||
} |
||||
|
||||
/* Compute shared secret using ECDH */ |
||||
if (!ctx->pk) { |
||||
DEBUG_ERROR(DEBUG_CATEGORY_CRYPTO, "sc_set_peer_public_key: no private key"); |
||||
return SC_ERR_NOT_INITIALIZED; |
||||
} |
||||
if (!uECC_shared_secret(peer_public_key, ctx->pk->private_key, |
||||
shared_secret, curve)) { |
||||
DEBUG_ERROR(DEBUG_CATEGORY_CRYPTO, "sc_set_peer_public_key: shared secret error"); |
||||
return SC_ERR_CRYPTO; |
||||
} |
||||
|
||||
/* Derive session key from shared secret (simple copy for demo) */ |
||||
memcpy(ctx->session_key, shared_secret, SC_SESSION_KEY_SIZE); |
||||
|
||||
/* Store peer public key */ |
||||
memcpy(ctx->peer_public_key, peer_public_key, SC_PUBKEY_SIZE); |
||||
ctx->peer_key_set = 1; |
||||
|
||||
ctx->session_ready = 1; |
||||
|
||||
return SC_OK; |
||||
} |
||||
|
||||
static void sc_build_nonce(uint64_t counter, uint8_t *nonce_out) |
||||
{ |
||||
struct tc_sha256_state_struct sha_ctx; |
||||
uint8_t hash[32]; |
||||
struct timeval tv; |
||||
uint8_t data[8 + 8 + 8]; |
||||
|
||||
if (!sc_urandom_initialized) { |
||||
sc_init_random_seed(); |
||||
} |
||||
|
||||
gettimeofday(&tv, NULL); |
||||
|
||||
memcpy(data, sc_urandom_seed, 8); |
||||
data[8] = (counter >> 0) & 0xFF; |
||||
data[9] = (counter >> 8) & 0xFF; |
||||
data[10] = (counter >> 16) & 0xFF; |
||||
data[11] = (counter >> 24) & 0xFF; |
||||
data[12] = (counter >> 32) & 0xFF; |
||||
data[13] = (counter >> 40) & 0xFF; |
||||
data[14] = (counter >> 48) & 0xFF; |
||||
data[15] = (counter >> 56) & 0xFF; |
||||
data[16] = (tv.tv_sec >> 0) & 0xFF; |
||||
data[17] = (tv.tv_sec >> 8) & 0xFF; |
||||
data[18] = (tv.tv_sec >> 16) & 0xFF; |
||||
data[19] = (tv.tv_sec >> 24) & 0xFF; |
||||
data[20] = (tv.tv_usec >> 0) & 0xFF; |
||||
data[21] = (tv.tv_usec >> 8) & 0xFF; |
||||
data[22] = (tv.tv_usec >> 16) & 0xFF; |
||||
data[23] = (tv.tv_usec >> 24) & 0xFF; |
||||
|
||||
tc_sha256_init(&sha_ctx); |
||||
tc_sha256_update(&sha_ctx, data, 24); |
||||
tc_sha256_final(hash, &sha_ctx); |
||||
|
||||
memcpy(nonce_out, hash, SC_NONCE_SIZE); |
||||
} |
||||
|
||||
sc_status_t sc_encrypt(sc_context_t *ctx, const uint8_t *plaintext, size_t plaintext_len, uint8_t *ciphertext, size_t *ciphertext_len) { |
||||
uint8_t nonce[SC_NONCE_SIZE]; |
||||
uint8_t plaintext_with_crc[plaintext_len + SC_CRC32_SIZE]; |
||||
size_t total_plaintext_len = plaintext_len + SC_CRC32_SIZE; |
||||
uint8_t combined_output[total_plaintext_len + SC_TAG_SIZE]; |
||||
struct tc_aes_key_sched_struct sched; |
||||
struct tc_ccm_mode_struct ccm_state; |
||||
|
||||
if (!ctx || !plaintext || !ciphertext || !ciphertext_len) { |
||||
return SC_ERR_INVALID_ARG; |
||||
} |
||||
|
||||
if (!ctx->session_ready) { |
||||
return SC_ERR_NOT_INITIALIZED; |
||||
} |
||||
|
||||
if (plaintext_len == 0) { |
||||
return SC_ERR_INVALID_ARG; |
||||
} |
||||
|
||||
/* Добавляем CRC32 к данным */ |
||||
memcpy(plaintext_with_crc, plaintext, plaintext_len); |
||||
uint32_t crc = crc32_calc(plaintext, plaintext_len); |
||||
plaintext_with_crc[plaintext_len] = (crc >> 0) & 0xFF; |
||||
plaintext_with_crc[plaintext_len + 1] = (crc >> 8) & 0xFF; |
||||
plaintext_with_crc[plaintext_len + 2] = (crc >> 16) & 0xFF; |
||||
plaintext_with_crc[plaintext_len + 3] = (crc >> 24) & 0xFF; |
||||
|
||||
/* Генерируем nonce с таймером */ |
||||
sc_build_nonce(ctx->tx_counter, nonce); |
||||
|
||||
/* Initialize AES key schedule */ |
||||
if (tc_aes128_set_encrypt_key(&sched, ctx->session_key) != TC_CRYPTO_SUCCESS) { |
||||
return SC_ERR_CRYPTO; |
||||
} |
||||
|
||||
/* Configure CCM mode */ |
||||
if (tc_ccm_config(&ccm_state, &sched, nonce, SC_NONCE_SIZE, SC_TAG_SIZE) != TC_CRYPTO_SUCCESS) { |
||||
return SC_ERR_CRYPTO; |
||||
} |
||||
|
||||
/* Encrypt and generate tag */ |
||||
if (tc_ccm_generation_encryption(combined_output, sizeof(combined_output), |
||||
NULL, 0, /* no associated data */ |
||||
plaintext_with_crc, total_plaintext_len, |
||||
&ccm_state) != TC_CRYPTO_SUCCESS) { |
||||
return SC_ERR_CRYPTO; |
||||
} |
||||
|
||||
/* Copy nonce + ciphertext + tag to output buffer */ |
||||
memcpy(ciphertext, nonce, SC_NONCE_SIZE); |
||||
memcpy(ciphertext + SC_NONCE_SIZE, combined_output, total_plaintext_len + SC_TAG_SIZE); |
||||
*ciphertext_len = SC_NONCE_SIZE + total_plaintext_len + SC_TAG_SIZE; |
||||
|
||||
ctx->tx_counter++; |
||||
|
||||
return SC_OK; |
||||
} |
||||
|
||||
sc_status_t sc_decrypt(sc_context_t *ctx, |
||||
const uint8_t *ciphertext, |
||||
size_t ciphertext_len, |
||||
uint8_t *plaintext, |
||||
size_t *plaintext_len) |
||||
{ |
||||
uint8_t nonce[SC_NONCE_SIZE]; |
||||
struct tc_aes_key_sched_struct sched; |
||||
struct tc_ccm_mode_struct ccm_state; |
||||
size_t total_plaintext_len = ciphertext_len - SC_NONCE_SIZE - SC_TAG_SIZE; |
||||
uint8_t plaintext_with_crc[total_plaintext_len]; |
||||
|
||||
if (!ctx || !ciphertext || !plaintext || !plaintext_len) { |
||||
return SC_ERR_INVALID_ARG; |
||||
} |
||||
|
||||
if (!ctx->session_ready) { |
||||
return SC_ERR_NOT_INITIALIZED; |
||||
} |
||||
|
||||
if (ciphertext_len < SC_NONCE_SIZE + SC_TAG_SIZE + SC_CRC32_SIZE) { |
||||
return SC_ERR_INVALID_ARG; |
||||
} |
||||
|
||||
/* Извлекаем nonce из начала ciphertext */ |
||||
memcpy(nonce, ciphertext, SC_NONCE_SIZE); |
||||
|
||||
/* Ciphertext для расшифровки начинается после nonce */ |
||||
const uint8_t *encrypted_data = ciphertext + SC_NONCE_SIZE; |
||||
size_t encrypted_len = ciphertext_len - SC_NONCE_SIZE; |
||||
|
||||
/* Initialize AES key schedule */ |
||||
if (tc_aes128_set_encrypt_key(&sched, ctx->session_key) != TC_CRYPTO_SUCCESS) { |
||||
return SC_ERR_CRYPTO; |
||||
} |
||||
|
||||
/* Configure CCM mode с извлечённым nonce */ |
||||
if (tc_ccm_config(&ccm_state, &sched, nonce, SC_NONCE_SIZE, SC_TAG_SIZE) != TC_CRYPTO_SUCCESS) { |
||||
return SC_ERR_CRYPTO; |
||||
} |
||||
|
||||
/* Decrypt and verify tag */ |
||||
if (tc_ccm_decryption_verification(plaintext_with_crc, total_plaintext_len, |
||||
NULL, 0, /* no associated data */ |
||||
encrypted_data, encrypted_len, |
||||
&ccm_state) != TC_CRYPTO_SUCCESS) { |
||||
return SC_ERR_AUTH_FAILED; |
||||
} |
||||
|
||||
/* Проверяем CRC32 */ |
||||
size_t data_len = total_plaintext_len - SC_CRC32_SIZE; |
||||
uint32_t expected_crc = crc32_calc(plaintext_with_crc, data_len); |
||||
uint32_t received_crc = (plaintext_with_crc[data_len] << 0) | |
||||
(plaintext_with_crc[data_len + 1] << 8) | |
||||
(plaintext_with_crc[data_len + 2] << 16) | |
||||
(plaintext_with_crc[data_len + 3] << 24); |
||||
|
||||
if (expected_crc != received_crc) { |
||||
return SC_ERR_CRC_FAILED; |
||||
} |
||||
|
||||
/* Копируем данные без CRC32 */ |
||||
memcpy(plaintext, plaintext_with_crc, data_len); |
||||
*plaintext_len = data_len; |
||||
|
||||
ctx->rx_counter++; |
||||
|
||||
return SC_OK; |
||||
} |
||||
|
||||
sc_status_t sc_compute_public_key_from_private(const uint8_t *private_key, uint8_t *public_key) { |
||||
if (!private_key || !public_key) { |
||||
return SC_ERR_INVALID_ARG; |
||||
} |
||||
|
||||
if (!curve) { |
||||
curve = uECC_secp256r1(); |
||||
} |
||||
|
||||
if (!uECC_compute_public_key(private_key, public_key, curve)) { |
||||
return SC_ERR_CRYPTO; |
||||
} |
||||
return SC_OK; |
||||
} |
||||
Loading…
Reference in new issue