Browse Source

refactor

nodeinfo-routing-update
jeka 1 month ago
parent
commit
9d0223d3fa
  1. 45
      lib/debug_config.c
  2. 8
      lib/debug_config.h
  3. 7
      lib/test_size.c
  4. 155
      lib/timeout_heap.c1
  5. 15
      lib/u_async.c
  6. 793
      lib/u_async.c1
  7. 3
      lib/u_async.h
  8. 8
      src/config_parser.c
  9. 13
      src/control_server.c
  10. 7
      src/dummynet.c
  11. 24
      src/etcp.c
  12. 1
      src/etcp.h
  13. 51
      src/etcp_connections.c
  14. 4
      src/etcp_loadbalancer.c
  15. 20
      src/routing.c
  16. 391
      src/secure_channel.c1
  17. 4
      src/tun_if.c
  18. 20
      src/tun_route.c
  19. 12
      src/utun_instance.c

45
lib/debug_config.c

@ -13,10 +13,9 @@
#include <time.h>
void log_dump(const char* prefix, const uint8_t* data, size_t len) {
// Build packet data as hex string for single-line output
char hex_buf[513]; // 256 bytes * 2 chars + 1 for null terminator
char hex_buf[513] = {'\0'};
size_t hex_len = 0;
size_t show_len = (len > 128) ? 128 : len; // Show max 128 bytes
size_t show_len = (len > 128) ? 128 : len;
for (size_t i = 0; i < show_len && hex_len < 512 - 3; i++) {
hex_len += snprintf(hex_buf + hex_len, sizeof(hex_buf) - hex_len, "%02x", data[i]);
@ -40,39 +39,21 @@ void log_dump(const char* prefix, const uint8_t* data, size_t len) {
}
size_t addr_to_string(const struct sockaddr_storage *addr, char *buf, size_t buflen) {
if (!buf || buflen == 0)
return 0;
buf[0] = '\0'; // всегда валидная строка
char ip[INET6_ADDRSTRLEN];
uint16_t port;
int n;
if (addr->ss_family == AF_INET) {
const struct sockaddr_in *a = (const struct sockaddr_in *)addr;
inet_ntop(AF_INET, &a->sin_addr, ip, sizeof(ip));
port = ntohs(a->sin_port);
n = snprintf(buf, buflen, "%s:%u", ip, port);
ip_str_t ip_to_str(const void *addr, int family) {
ip_str_t result;
result.str[0] = '\0';
} else if (addr->ss_family == AF_INET6) {
const struct sockaddr_in6 *a = (const struct sockaddr_in6 *)addr;
inet_ntop(AF_INET6, &a->sin6_addr, ip, sizeof(ip));
port = ntohs(a->sin6_port);
n = snprintf(buf, buflen, "[%s]:%u", ip, port);
} else {
return 0;
if (!addr) {
return result;
}
// snprintf гарантирует '\0', но на всякий случай:
buf[buflen - 1] = '\0';
if (family == AF_INET) {
inet_ntop(AF_INET, addr, result.str, 16);
} else if (family == AF_INET6) {
inet_ntop(AF_INET6, addr, result.str, 48);
}
// Возвращаем фактическую длину (как snprintf)
return (n < 0) ? 0 : (size_t)n;
return result;
}

8
lib/debug_config.h

@ -99,8 +99,12 @@ void debug_enable_file_output(const char* file_path, int truncate);
void debug_disable_file_output(void);
void debug_enable_console(int enable);
// вывод struct sockaddr_storage как текст
size_t addr_to_string(const struct sockaddr_storage *addr, char *buf, size_t buflen);
// IP address to string (static buffer, single-threaded)
typedef struct {
char str[48]; // INET6_ADDRSTRLEN(45) + \0
} ip_str_t;
ip_str_t ip_to_str(const void *addr, int family);
// hex dump в лог
void log_dump(const char* prefix, const uint8_t* data, size_t len);

7
lib/test_size.c

@ -1,7 +0,0 @@
#include "u_async.h"
#include <stdio.h>
int main() {
printf("socket_node size: %zu bytes\n", sizeof(struct socket_node));
return 0;
}

155
lib/timeout_heap.c1

@ -1,155 +0,0 @@
// timeout_heap.c
#include "timeout_heap.h"
#include "debug_config.h"
#include <stdlib.h>
#include <stdio.h> // For potential error printing, optional
// Helper macros for 1-based indices
#define PARENT(i) ((i) / 2)
#define LEFT_CHILD(i) (2 * (i))
#define RIGHT_CHILD(i) (2 * (i) + 1)
TimeoutHeap *timeout_heap_create(size_t initial_capacity) {
TimeoutHeap *h = malloc(sizeof(TimeoutHeap));
if (!h) return NULL;
h->heap = malloc(sizeof(TimeoutEntry) * initial_capacity);
if (!h->heap) {
free(h);
return NULL;
}
h->size = 0;
h->capacity = initial_capacity;
h->freed_count = 0;
h->user_data = NULL;
h->free_callback = NULL;
return h;
}
void timeout_heap_destroy(TimeoutHeap *h) {
if (!h) return;
// Safely destroy all timer data atomically
// This prevents double-free by handling destruction consistently
while (h->size > 0) {
TimeoutEntry entry;
if (timeout_heap_pop(h, &entry) == 0) {
// Don't free data here - let the caller handle it consistently
// Just remove from heap to prevent double references
}
}
free(h->heap);
free(h);
}
static void bubble_up(TimeoutHeap *h, size_t i) {
// i is 1-based
while (i > 1 && h->heap[PARENT(i) - 1].expiration > h->heap[i - 1].expiration) {
// Swap with parent
TimeoutEntry temp = h->heap[PARENT(i) - 1];
h->heap[PARENT(i) - 1] = h->heap[i - 1];
h->heap[i - 1] = temp;
i = PARENT(i);
}
}
int timeout_heap_push(TimeoutHeap *h, TimeoutTime expiration, void *data) {
if (h->size == h->capacity) {
size_t new_cap = h->capacity ? h->capacity * 2 : 1;
TimeoutEntry *new_heap = realloc(h->heap, sizeof(TimeoutEntry) * new_cap);
if (!new_heap) return -1; // Allocation failed
h->heap = new_heap;
h->capacity = new_cap;
}
// Insert at end (0-based)
size_t idx = h->size++;
h->heap[idx].expiration = expiration;
h->heap[idx].data = data;
h->heap[idx].deleted = 0;
// Bubble up (1-based)
bubble_up(h, idx + 1);
return 0;
}
static void heapify_down(TimeoutHeap *h, size_t i) {
// i is 1-based
while (1) {
size_t smallest = i;
size_t left = LEFT_CHILD(i);
size_t right = RIGHT_CHILD(i);
if (left <= h->size && h->heap[left - 1].expiration < h->heap[smallest - 1].expiration) {
smallest = left;
}
if (right <= h->size && h->heap[right - 1].expiration < h->heap[smallest - 1].expiration) {
smallest = right;
}
if (smallest == i) break;
// Swap
TimeoutEntry temp = h->heap[smallest - 1];
h->heap[smallest - 1] = h->heap[i - 1];
h->heap[i - 1] = temp;
i = smallest;
}
}
static void remove_root(TimeoutHeap *h) {
if (h->size == 0) return;
// Move last to root
h->heap[0] = h->heap[--h->size];
// Heapify down (1-based)
if (h->size > 0) {
heapify_down(h, 1);
}
}
int timeout_heap_peek(TimeoutHeap *h, TimeoutEntry *out) {
if (h->size == 0) return -1;
// Skip deleted
size_t i = 0;
while (i < h->size && h->heap[0].deleted) {
remove_root(h);
}
if (h->size == 0) return -1;
*out = h->heap[0];
return 0;
}
int timeout_heap_pop(TimeoutHeap *h, TimeoutEntry *out) {
if (h->size == 0) return -1;
// Skip deleted and free their data
while (h->size > 0 && h->heap[0].deleted) {
// Just remove from heap, do not free data (to avoid double-free during destruction)
remove_root(h);
}
if (h->size == 0) return -1;
*out = h->heap[0];
remove_root(h);
return 0;
}
int timeout_heap_cancel(TimeoutHeap *h, TimeoutTime expiration, void *data) {
for (size_t i = 0; i < h->size; ++i) {
if (h->heap[i].expiration == expiration && h->heap[i].data == data) {
h->heap[i].deleted = 1;
return 0;
}
}
return -1; // Not found
}
void timeout_heap_set_free_callback(TimeoutHeap *h, void* user_data, void (*callback)(void* user_data, void* data)) {
if (!h) return;
h->user_data = user_data;
h->free_callback = callback;
}

15
lib/u_async.c

@ -301,6 +301,21 @@ static void get_current_time(struct timeval* tv) {
#endif
}
#ifdef _WIN32
uint64_t get_time_tb(void) {
LARGE_INTEGER freq, count;
QueryPerformanceFrequency(&freq); // Получаем частоту таймера
QueryPerformanceCounter(&count); // Получаем текущее значение счётчика
return (uint64_t)(count.QuadPart * 10000) / (uint64_t)freq.QuadPart; // Преобразуем в требуемые единицы времени
}
#else
uint64_t get_time_tb(void) {
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return (uint64_t)ts.tv_sec * 10000ULL + (uint64_t)ts.tv_nsec / 100000ULL; // Преобразуем в требуемые единицы времени
}
#endif
// Drain wakeup pipe - read all available bytes

793
lib/u_async.c1

@ -1,793 +0,0 @@
// uasync.c
#include "u_async.h"
#include "debug_config.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <poll.h>
#include <limits.h>
#include <fcntl.h>
// Timeout node with safe cancellation
struct timeout_node {
void* arg;
timeout_callback_t callback;
uint64_t expiration_ms; // absolute expiration time in milliseconds
struct UASYNC* ua; // Pointer back to uasync instance for counter updates
int cancelled; // Cancellation flag
};
// Socket node with array-based storage
struct socket_node {
int fd;
socket_callback_t read_cbk;
socket_callback_t write_cbk;
socket_callback_t except_cbk;
void* user_data;
int active; // 1 if socket is active, 0 if freed (for reuse)
};
// Array-based socket management for O(1) operations
struct socket_array {
struct socket_node* sockets; // Dynamic array of socket nodes
int* fd_to_index; // FD to array index mapping
int* index_to_fd; // Array index to FD mapping
int capacity; // Total allocated capacity
int count; // Number of active sockets
int max_fd; // Maximum FD for bounds checking
};
static struct socket_array* socket_array_create(int initial_capacity);
static void socket_array_destroy(struct socket_array* sa);
static int socket_array_add(struct socket_array* sa, int fd, socket_callback_t read_cbk, socket_callback_t write_cbk, socket_callback_t except_cbk, void* user_data);
static int socket_array_remove(struct socket_array* sa, int fd);
static struct socket_node* socket_array_get(struct socket_array* sa, int fd);
// No global instance - each module must use its own struct UASYNC instance
// Array-based socket management implementation
static struct socket_array* socket_array_create(int initial_capacity) {
if (initial_capacity < 4) initial_capacity = 4; // Minimum capacity
struct socket_array* sa = malloc(sizeof(struct socket_array));
if (!sa) return NULL;
sa->sockets = calloc(initial_capacity, sizeof(struct socket_node));
sa->fd_to_index = calloc(initial_capacity, sizeof(int));
sa->index_to_fd = calloc(initial_capacity, sizeof(int));
if (!sa->sockets || !sa->fd_to_index || !sa->index_to_fd) {
free(sa->sockets);
free(sa->fd_to_index);
free(sa->index_to_fd);
free(sa);
return NULL;
}
// Initialize mapping arrays to -1 (invalid)
for (int i = 0; i < initial_capacity; i++) {
sa->fd_to_index[i] = -1;
sa->index_to_fd[i] = -1;
sa->sockets[i].fd = -1;
sa->sockets[i].active = 0;
}
sa->capacity = initial_capacity;
sa->count = 0;
sa->max_fd = -1;
return sa;
}
static void socket_array_destroy(struct socket_array* sa) {
if (!sa) return;
free(sa->sockets);
free(sa->fd_to_index);
free(sa->index_to_fd);
free(sa);
}
static int socket_array_add(struct socket_array* sa, int fd, socket_callback_t read_cbk, socket_callback_t write_cbk, socket_callback_t except_cbk, void* user_data) {
if (!sa || fd < 0 || fd >= FD_SETSIZE) return -1;
if (fd >= sa->capacity) {
// Need to resize - double the capacity
int new_capacity = sa->capacity * 2;
if (fd >= new_capacity) new_capacity = fd + 16; // Ensure enough space
struct socket_node* new_sockets = realloc(sa->sockets, new_capacity * sizeof(struct socket_node));
int* new_fd_to_index = realloc(sa->fd_to_index, new_capacity * sizeof(int));
int* new_index_to_fd = realloc(sa->index_to_fd, new_capacity * sizeof(int));
if (!new_sockets || !new_fd_to_index || !new_index_to_fd) {
// Allocation failed
free(new_sockets);
free(new_fd_to_index);
free(new_index_to_fd);
return -1;
}
// Initialize new elements
for (int i = sa->capacity; i < new_capacity; i++) {
new_fd_to_index[i] = -1;
new_index_to_fd[i] = -1;
new_sockets[i].fd = -1;
new_sockets[i].active = 0;
}
sa->sockets = new_sockets;
sa->fd_to_index = new_fd_to_index;
sa->index_to_fd = new_index_to_fd;
sa->capacity = new_capacity;
}
// Check if FD already exists
if (sa->fd_to_index[fd] != -1) return -1; // FD already exists
// Find first free slot
int index = -1;
for (int i = 0; i < sa->capacity; i++) {
if (!sa->sockets[i].active) {
index = i;
break;
}
}
if (index == -1) return -1; // No free slots (shouldn't happen)
// Add the socket
sa->sockets[index].fd = fd;
sa->sockets[index].read_cbk = read_cbk;
sa->sockets[index].write_cbk = write_cbk;
sa->sockets[index].except_cbk = except_cbk;
sa->sockets[index].user_data = user_data;
sa->sockets[index].active = 1;
sa->fd_to_index[fd] = index;
sa->index_to_fd[index] = fd;
sa->count++;
if (fd > sa->max_fd) sa->max_fd = fd;
return index;
}
static int socket_array_remove(struct socket_array* sa, int fd) {
if (!sa || fd < 0 || fd >= sa->capacity) return -1;
int index = sa->fd_to_index[fd];
if (index == -1 || !sa->sockets[index].active) return -1; // FD not found
// Mark as inactive
sa->sockets[index].active = 0;
sa->sockets[index].fd = -1;
sa->fd_to_index[fd] = -1;
sa->index_to_fd[index] = -1;
sa->count--;
return 0;
}
static struct socket_node* socket_array_get(struct socket_array* sa, int fd) {
if (!sa || fd < 0 || fd >= sa->capacity) return NULL;
int index = sa->fd_to_index[fd];
if (index == -1 || !sa->sockets[index].active) return NULL;
return &sa->sockets[index];
}
// Callback to free timeout node and update counters
static void timeout_node_free_callback(void* user_data, void* data) {
struct UASYNC* ua = (struct UASYNC*)user_data;
struct timeout_node* node = (struct timeout_node*)data;
(void)node; // Not used directly, but keep for consistency
ua->timer_free_count++;
free(data);
}
// Helper to get current time
static void get_current_time(struct timeval* tv) {
gettimeofday(tv, NULL);
}
// Drain wakeup pipe - read all available bytes
static void drain_wakeup_pipe(struct UASYNC* ua) {
if (!ua || !ua->wakeup_initialized) return;
char buf[64];
while (1) {
ssize_t n = read(ua->wakeup_pipe[0], buf, sizeof(buf));
if (n <= 0) break;
}
}
// Helper to add timeval: tv += dt (timebase units)
static void timeval_add_tb(struct timeval* tv, int dt) {
tv->tv_usec += (dt % 10000) * 100;
tv->tv_sec += dt / 10000 + tv->tv_usec / 1000000;
tv->tv_usec %= 1000000;
}
// Convert timeval to milliseconds (uint64_t)
static uint64_t timeval_to_ms(const struct timeval* tv) {
return (uint64_t)tv->tv_sec * 1000ULL + (uint64_t)tv->tv_usec / 1000ULL;
}
// Simplified timeout handling without reference counting
// Process expired timeouts with safe cancellation
static void process_timeouts(struct UASYNC* ua) {
if (!ua || !ua->timeout_heap) return;
struct timeval now_tv;
get_current_time(&now_tv);
uint64_t now_ms = timeval_to_ms(&now_tv);
while (1) {
TimeoutEntry entry;
if (timeout_heap_peek(ua->timeout_heap, &entry) != 0) break;
if (entry.expiration > now_ms) break;
// Pop the expired timeout
timeout_heap_pop(ua->timeout_heap, &entry);
struct timeout_node* node = (struct timeout_node*)entry.data;
if (node && node->callback && !node->cancelled) {
// Execute callback only if not cancelled
node->callback(node->arg);
}
// Always free the node after processing
if (node && node->ua) {
node->ua->timer_free_count++;
}
free(node);
}
}
// Compute time to next timeout
static void get_next_timeout(struct UASYNC* ua, struct timeval* tv) {
if (!ua || !ua->timeout_heap) {
tv->tv_sec = 0;
tv->tv_usec = 0;
return;
}
TimeoutEntry entry;
if (timeout_heap_peek(ua->timeout_heap, &entry) != 0) {
tv->tv_sec = 0;
tv->tv_usec = 0;
return;
}
struct timeval now_tv;
get_current_time(&now_tv);
uint64_t now_ms = timeval_to_ms(&now_tv);
if (entry.expiration <= now_ms) {
tv->tv_sec = 0;
tv->tv_usec = 0;
return;
}
uint64_t delta_ms = entry.expiration - now_ms;
tv->tv_sec = delta_ms / 1000;
tv->tv_usec = (delta_ms % 1000) * 1000;
}
// Instance version
void* uasync_set_timeout(struct UASYNC* ua, int timeout_tb, void* arg, timeout_callback_t callback) {
if (!ua || timeout_tb < 0 || !callback) return NULL;
if (!ua->timeout_heap) return NULL;
DEBUG_DEBUG(DEBUG_CATEGORY_TIMERS, "uasync_set_timeout: timeout=%d.%d ms, arg=%p, callback=%p", timeout_tb/10, timeout_tb%10, arg, callback);
struct timeout_node* node = malloc(sizeof(struct timeout_node));
if (!node) {
DEBUG_ERROR(DEBUG_CATEGORY_TIMERS, "uasync_set_timeout: failed to allocate node");
return NULL;
}
ua->timer_alloc_count++;
node->arg = arg;
node->callback = callback;
node->ua = ua;
node->cancelled = 0;
// Calculate expiration time in milliseconds
struct timeval now;
get_current_time(&now);
timeval_add_tb(&now, timeout_tb);
node->expiration_ms = timeval_to_ms(&now);
// Add to heap
if (timeout_heap_push(ua->timeout_heap, node->expiration_ms, node) != 0) {
DEBUG_ERROR(DEBUG_CATEGORY_TIMERS, "uasync_set_timeout: failed to push to heap");
free(node);
ua->timer_free_count++; // Balance the alloc counter
return NULL;
}
return node;
}
// Instance version
err_t uasync_cancel_timeout(struct UASYNC* ua, void* t_id) {
if (!ua || !t_id || !ua->timeout_heap) {
DEBUG_ERROR(DEBUG_CATEGORY_TIMERS, "uasync_cancel_timeout: invalid parameters ua=%p, t_id=%p, heap=%p",
ua, t_id, ua ? ua->timeout_heap : NULL);
return ERR_FAIL;
}
struct timeout_node* node = (struct timeout_node*)t_id;
// Try to cancel from heap first
if (timeout_heap_cancel(ua->timeout_heap, node->expiration_ms, node) == 0) {
// Successfully marked as deleted - free will happen lazily in heap
node->cancelled = 1;
node->callback = NULL;
DEBUG_DEBUG(DEBUG_CATEGORY_TIMERS, "uasync_cancel_timeout: successfully cancelled timer %p from heap", node);
return ERR_OK;
}
DEBUG_DEBUG(DEBUG_CATEGORY_TIMERS, "uasync_cancel_timeout: not found in heap: ua=%p, t_id=%p, node=%p, expires=%llu ms",
ua, t_id, node, (unsigned long long)node->expiration_ms);
// If not found in heap, it may have already expired or been invalid
return ERR_FAIL;
}
// Instance version
void* uasync_add_socket(struct UASYNC* ua, int fd, socket_callback_t read_cbk, socket_callback_t write_cbk, socket_callback_t except_cbk, void* user_data) {
if (!ua || fd < 0 || fd >= FD_SETSIZE) return NULL; // Bounds check
int index = socket_array_add(ua->sockets, fd, read_cbk, write_cbk, except_cbk, user_data);
if (index < 0) return NULL;
ua->socket_alloc_count++;
// Return pointer to the socket node (same as before for API compatibility)
return &ua->sockets->sockets[index];
}
// Instance version
err_t uasync_remove_socket(struct UASYNC* ua, void* s_id) {
if (!ua || !s_id) return ERR_FAIL;
struct socket_node* node = (struct socket_node*)s_id;
if (node->fd < 0) return ERR_FAIL; // Invalid node
int result = socket_array_remove(ua->sockets, node->fd);
if (result != 0) return ERR_FAIL;
ua->socket_free_count++;
return ERR_OK;
}
void uasync_mainloop(struct UASYNC* ua) {
while (1) {
uasync_poll(ua, -1); /* infinite timeout */
}
}
// Instance version
void uasync_poll(struct UASYNC* ua, int timeout_tb) {
if (!ua) return;
DEBUG_DEBUG(DEBUG_CATEGORY_ETCP, "async: mainloop: event waiting");
/* Process expired timeouts */
process_timeouts(ua);
/* Compute timeout for poll in milliseconds */
int timeout_ms = -1; // infinite by default
// Get next timeout from heap
struct timeval tv;
get_next_timeout(ua, &tv);
if (tv.tv_sec > 0 || tv.tv_usec > 0 || (ua->timeout_heap && ua->timeout_heap->size > 0)) {
// Convert timeval to milliseconds, cap at INT_MAX
uint64_t ms = (uint64_t)tv.tv_sec * 1000ULL + (uint64_t)tv.tv_usec / 1000ULL;
if (ms > INT_MAX) ms = INT_MAX;
timeout_ms = (int)ms;
}
/* If timeout_tb >= 0, compute timeout as min(timeout_tb, existing timer) */
if (timeout_tb >= 0) {
// Convert timebase (0.1 ms) to milliseconds
int user_timeout_ms = timeout_tb / 10;
if (timeout_tb % 10 != 0) user_timeout_ms++; // round up
if (timeout_ms < 0 || user_timeout_ms < timeout_ms) {
timeout_ms = user_timeout_ms;
}
}
/* Build pollfd array from socket array - O(1) per socket */
int socket_count = ua->sockets ? ua->sockets->count : 0;
int wakeup_fd_present = ua->wakeup_initialized ? 1 : 0;
int total_fds = socket_count + wakeup_fd_present;
if (total_fds == 0) {
/* No sockets and no wakeup fd, just wait for timeout */
if (timeout_ms >= 0) {
/* usleep would be better but we just call poll with empty set */
poll(NULL, 0, timeout_ms);
} else {
/* Infinite timeout with no sockets - should not happen in practice */
return;
}
/* Check timeouts again after sleep */
process_timeouts(ua);
return;
}
struct pollfd* fds = malloc(total_fds * sizeof(struct pollfd));
struct socket_node** nodes = NULL;
if (socket_count > 0) {
nodes = malloc(socket_count * sizeof(struct socket_node*));
}
if (!fds || (socket_count > 0 && !nodes)) {
free(fds);
free(nodes);
return; /* out of memory */
}
/* Fill arrays */
int idx = 0;
/* Add wakeup fd first if present */
if (wakeup_fd_present) {
fds[idx].fd = ua->wakeup_pipe[0];
fds[idx].events = POLLIN;
fds[idx].revents = 0;
idx++;
}
/* Add socket fds using efficient array traversal */
int node_idx = 0;
for (int i = 0; i < ua->sockets->capacity && node_idx < socket_count; i++) {
if (ua->sockets->sockets[i].active) {
struct socket_node* cur = &ua->sockets->sockets[i];
fds[idx].fd = cur->fd;
fds[idx].events = 0;
fds[idx].revents = 0;
if (cur->read_cbk) fds[idx].events |= POLLIN;
if (cur->write_cbk) fds[idx].events |= POLLOUT;
if (cur->except_cbk) fds[idx].events |= POLLPRI;
if (nodes) {
nodes[node_idx] = cur;
}
idx++;
node_idx++;
}
}
/* Call poll */
int ret = poll(fds, total_fds, timeout_ms);
if (ret < 0) {
if (errno == EINTR) {
free(fds);
free(nodes);
return;
}
perror("poll");
free(fds);
free(nodes);
return;
}
/* Process timeouts that may have expired during poll */
process_timeouts(ua);
/* Process socket events */
if (ret > 0) {
for (int i = 0; i < total_fds; i++) {
if (fds[i].revents == 0) continue;
/* Handle wakeup fd separately */
if (wakeup_fd_present && i == 0) {
if (fds[i].revents & POLLIN) {
drain_wakeup_pipe(ua);
}
continue;
}
/* Socket event */
int socket_idx = i - wakeup_fd_present;
struct socket_node* node = nodes[socket_idx];
/* Check for error conditions first */
if (fds[i].revents & (POLLERR | POLLHUP | POLLNVAL)) {
/* Treat as exceptional condition */
if (node->except_cbk) {
node->except_cbk(node->fd, node->user_data);
}
}
/* Exceptional data (out-of-band) */
if (fds[i].revents & POLLPRI) {
if (node->except_cbk) {
node->except_cbk(node->fd, node->user_data);
}
}
/* Read readiness */
if (fds[i].revents & POLLIN) {
if (node->read_cbk) {
node->read_cbk(node->fd, node->user_data);
}
}
/* Write readiness */
if (fds[i].revents & POLLOUT) {
if (node->write_cbk) {
node->write_cbk(node->fd, node->user_data);
}
}
}
}
free(fds);
free(nodes);
}
// ========== Instance management functions ==========
struct UASYNC* uasync_create(void) {
struct UASYNC* ua = malloc(sizeof(struct UASYNC));
if (!ua) return NULL;
memset(ua, 0, sizeof(struct UASYNC));
ua->wakeup_pipe[0] = -1;
ua->wakeup_pipe[1] = -1;
ua->wakeup_initialized = 0;
// Create wakeup pipe
if (pipe(ua->wakeup_pipe) < 0) {
DEBUG_WARN(DEBUG_CATEGORY_UASYNC, "Failed to create wakeup pipe: %s", strerror(errno));
// Continue without wakeup mechanism
ua->wakeup_pipe[0] = -1;
ua->wakeup_pipe[1] = -1;
} else {
ua->wakeup_initialized = 1;
// Set non-blocking on read end to avoid blocking if pipe is full
int flags = fcntl(ua->wakeup_pipe[0], F_GETFL, 0);
if (flags >= 0) {
fcntl(ua->wakeup_pipe[0], F_SETFL, flags | O_NONBLOCK);
}
}
ua->sockets = socket_array_create(16);
if (!ua->sockets) {
if (ua->wakeup_initialized) {
close(ua->wakeup_pipe[0]);
close(ua->wakeup_pipe[1]);
}
free(ua);
return NULL;
}
ua->timeout_heap = timeout_heap_create(16);
if (!ua->timeout_heap) {
socket_array_destroy(ua->sockets);
if (ua->wakeup_initialized) {
close(ua->wakeup_pipe[0]);
close(ua->wakeup_pipe[1]);
}
free(ua);
return NULL;
}
// Set callback to free timeout nodes and update counters
timeout_heap_set_free_callback(ua->timeout_heap, ua, timeout_node_free_callback);
return ua;
}
// Print all resources for debugging
void uasync_print_resources(struct UASYNC* ua, const char* prefix) {
if (!ua) {
printf("%s: NULL uasync instance\n", prefix);
return;
}
printf("\n🔍 %s: UASYNC Resource Report for %p\n", prefix, ua);
printf(" Timer Statistics: allocated=%zu, freed=%zu, active=%zd\n",
ua->timer_alloc_count, ua->timer_free_count,
(ssize_t)(ua->timer_alloc_count - ua->timer_free_count));
printf(" Socket Statistics: allocated=%zu, freed=%zu, active=%zd\n",
ua->socket_alloc_count, ua->socket_free_count,
(ssize_t)(ua->socket_alloc_count - ua->socket_free_count));
// Показать активные таймеры
if (ua->timeout_heap) {
size_t active_timers = 0;
// Безопасное чтение без извлечения - просто итерируем по массиву
for (size_t i = 0; i < ua->timeout_heap->size; i++) {
if (!ua->timeout_heap->heap[i].deleted) {
active_timers++;
struct timeout_node* node = (struct timeout_node*)ua->timeout_heap->heap[i].data;
printf(" Timer: node=%p, expires=%llu ms, cancelled=%d\n",
node, (unsigned long long)ua->timeout_heap->heap[i].expiration, node->cancelled);
}
}
printf(" Active timers in heap: %zu\n", active_timers);
}
// Показать активные сокеты
if (ua->sockets) {
int active_sockets = 0;
printf(" Socket array capacity: %d, active: %d\n",
ua->sockets->capacity, ua->sockets->count);
for (int i = 0; i < ua->sockets->capacity; i++) {
if (ua->sockets->sockets[i].active) {
active_sockets++;
printf(" Socket: fd=%d, active=%d\n",
ua->sockets->sockets[i].fd,
ua->sockets->sockets[i].active);
}
}
printf(" Total active sockets: %d\n", active_sockets);
}
printf("🔚 %s: End of resource report\n\n", prefix);
}
void uasync_destroy(struct UASYNC* ua, int close_fds) {
if (!ua) return;
DEBUG_DEBUG(DEBUG_CATEGORY_TIMERS, "uasync_destroy: starting cleanup for ua=%p", ua);
// Диагностика ресурсов перед очисткой
uasync_print_resources(ua, "BEFORE_DESTROY");
// Check for potential memory leaks
if (ua->timer_alloc_count != ua->timer_free_count || ua->socket_alloc_count != ua->socket_free_count) {
DEBUG_ERROR(DEBUG_CATEGORY_MEMORY, "Memory leaks detected before cleanup: timers %zu/%zu, sockets %zu/%zu",
ua->timer_alloc_count, ua->timer_free_count, ua->socket_alloc_count, ua->socket_free_count);
DEBUG_ERROR(DEBUG_CATEGORY_TIMERS, "Timer leak: allocated=%zu, freed=%zu, diff=%zd",
ua->timer_alloc_count, ua->timer_free_count,
(ssize_t)(ua->timer_alloc_count - ua->timer_free_count));
DEBUG_ERROR(DEBUG_CATEGORY_TIMERS, "Socket leak: allocated=%zu, freed=%zu, diff=%zd",
ua->socket_alloc_count, ua->socket_free_count,
(ssize_t)(ua->socket_alloc_count - ua->socket_free_count));
// Continue cleanup, will abort after if leaks remain
}
// Free all remaining timeouts
if (ua->timeout_heap) {
size_t freed_count = 0;
while (1) {
TimeoutEntry entry;
if (timeout_heap_pop(ua->timeout_heap, &entry) != 0) break;
struct timeout_node* node = (struct timeout_node*)entry.data;
// Free all timer nodes (avoid double-free bug)
if (node) {
ua->timer_free_count++;
free(node);
}
}
timeout_heap_destroy(ua->timeout_heap);
}
// Free all socket nodes using array approach
if (ua->sockets) {
// Count and free all active sockets
int freed_count = 0;
for (int i = 0; i < ua->sockets->capacity; i++) {
if (ua->sockets->sockets[i].active) {
if (close_fds && ua->sockets->sockets[i].fd >= 0) {
close(ua->sockets->sockets[i].fd);
}
ua->socket_free_count++;
freed_count++;
}
}
DEBUG_DEBUG(DEBUG_CATEGORY_MEMORY, "Freed %d socket nodes in destroy", freed_count);
socket_array_destroy(ua->sockets);
}
// Close wakeup pipe
if (ua->wakeup_initialized) {
close(ua->wakeup_pipe[0]);
close(ua->wakeup_pipe[1]);
}
// Final leak check
if (ua->timer_alloc_count != ua->timer_free_count || ua->socket_alloc_count != ua->socket_free_count) {
DEBUG_ERROR(DEBUG_CATEGORY_MEMORY, "Memory leaks detected after cleanup: timers %zu/%zu, sockets %zu/%zu",
ua->timer_alloc_count, ua->timer_free_count, ua->socket_alloc_count, ua->socket_free_count);
DEBUG_ERROR(DEBUG_CATEGORY_TIMERS, "FINAL Timer leak: allocated=%zu, freed=%zu, diff=%zd",
ua->timer_alloc_count, ua->timer_free_count,
(ssize_t)(ua->timer_alloc_count - ua->timer_free_count));
DEBUG_ERROR(DEBUG_CATEGORY_TIMERS, "FINAL Socket leak: allocated=%zu, freed=%zu, diff=%zd",
ua->socket_alloc_count, ua->socket_free_count,
(ssize_t)(ua->socket_alloc_count - ua->socket_free_count));
abort();
}
DEBUG_DEBUG(DEBUG_CATEGORY_TIMERS, "uasync_destroy: completed successfully for ua=%p", ua);
free(ua);
}
void uasync_init_instance(struct UASYNC* ua) {
if (!ua) return;
// Initialize socket array if not present
if (!ua->sockets) {
ua->sockets = socket_array_create(16);
}
if (!ua->timeout_heap) {
ua->timeout_heap = timeout_heap_create(16);
if (ua->timeout_heap) {
timeout_heap_set_free_callback(ua->timeout_heap, ua, timeout_node_free_callback);
}
}
}
// Debug statistics
void uasync_get_stats(struct UASYNC* ua, size_t* timer_alloc, size_t* timer_free, size_t* socket_alloc, size_t* socket_free) {
if (!ua) return;
if (timer_alloc) *timer_alloc = ua->timer_alloc_count;
if (timer_free) *timer_free = ua->timer_free_count;
if (socket_alloc) *socket_alloc = ua->socket_alloc_count;
if (socket_free) *socket_free = ua->socket_free_count;
}
// Get global instance for backward compatibility
// Wakeup mechanism
int uasync_wakeup(struct UASYNC* ua) {
if (!ua || !ua->wakeup_initialized) return -1;
char byte = 0;
ssize_t ret = write(ua->wakeup_pipe[1], &byte, 1);
if (ret != 1) {
// Don't print error from signal handler
return -1;
}
return 0;
}
int uasync_get_wakeup_fd(struct UASYNC* ua) {
if (!ua || !ua->wakeup_initialized) return -1;
return ua->wakeup_pipe[1];
}
/* Lookup socket by file descriptor - returns current pointer even after realloc */
int uasync_lookup_socket(struct UASYNC* ua, int fd, void** socket_id) {
if (!ua || !ua->sockets || !socket_id || fd < 0 || fd >= FD_SETSIZE) {
return -1;
}
*socket_id = socket_array_get(ua->sockets, fd);
return (*socket_id != NULL) ? 0 : -1;
}

3
lib/u_async.h

@ -69,6 +69,9 @@ struct UASYNC* uasync_create(void);
void uasync_destroy(struct UASYNC* ua, int close_fds);
void uasync_init_instance(struct UASYNC* ua);
// текущее время (timebase 0.1ms)
uint64_t get_time_tb(void);
// Timeouts, timebase = 0.1 mS
void* uasync_set_timeout(struct UASYNC* ua, int timeout_tb, void* user_arg, timeout_callback_t callback);
err_t uasync_cancel_timeout(struct UASYNC* ua, void* t_id);

8
src/config_parser.c

@ -636,11 +636,9 @@ void print_config(const struct utun_config *cfg) {
printf("\nServers:\n");
struct CFG_SERVER *s = cfg->servers;
while (s) {
char addr_buffer[64];
struct sockaddr_in *sin = (struct sockaddr_in *)&s->ip;
inet_ntop(AF_INET, &sin->sin_addr, addr_buffer, sizeof(addr_buffer));
printf(" %s: %s:%d (mark=%d, netif=%u, type=%u)\n",
s->name, addr_buffer, ntohs(sin->sin_port),
s->name, ip_to_str(&sin->sin_addr, AF_INET).str, ntohs(sin->sin_port),
s->so_mark, s->netif_index, s->type);
s = s->next;
}
@ -652,10 +650,8 @@ void print_config(const struct utun_config *cfg) {
struct CFG_CLIENT_LINK *link = c->links;
int link_num = 1;
while (link) {
char addr_buffer[64];
struct sockaddr_in *sin = (struct sockaddr_in *)&link->remote_addr;
inet_ntop(AF_INET, &sin->sin_addr, addr_buffer, sizeof(addr_buffer));
printf(" Link %d: %s:%d (via %s)\n", link_num++, addr_buffer, ntohs(sin->sin_port), link->local_srv->name);
printf(" Link %d: %s:%d (via %s)\n", link_num++, ip_to_str(&sin->sin_addr, AF_INET).str, ntohs(sin->sin_port), link->local_srv->name);
link = link->next;
}
printf(" Peer key: %s\n", c->peer_public_key_hex);

13
src/control_server.c

@ -209,17 +209,14 @@ int control_server_init(struct control_server* server,
return -1;
}
char addr_str[INET6_ADDRSTRLEN];
if (family == AF_INET) {
struct sockaddr_in* sin = (struct sockaddr_in*)bind_addr;
inet_ntop(AF_INET, &sin->sin_addr, addr_str, sizeof(addr_str));
DEBUG_INFO(DEBUG_CATEGORY_CONTROL, "Control server listening on %s:%d",
addr_str, ntohs(sin->sin_port));
ip_to_str(&sin->sin_addr, AF_INET).str, ntohs(sin->sin_port));
} else {
struct sockaddr_in6* sin6 = (struct sockaddr_in6*)bind_addr;
inet_ntop(AF_INET6, &sin6->sin6_addr, addr_str, sizeof(addr_str));
DEBUG_INFO(DEBUG_CATEGORY_CONTROL, "Control server listening on [%s]:%d",
addr_str, ntohs(sin6->sin6_port));
ip_to_str(&sin6->sin6_addr, AF_INET6).str, ntohs(sin6->sin6_port));
}
return 0;
@ -384,13 +381,13 @@ static void accept_callback(socket_t fd, void* arg) {
server->clients = client;
server->client_count++;
char addr_str[INET6_ADDRSTRLEN];
const char* addr_str;
if (client_addr.ss_family == AF_INET) {
struct sockaddr_in* sin = (struct sockaddr_in*)&client_addr;
inet_ntop(AF_INET, &sin->sin_addr, addr_str, sizeof(addr_str));
addr_str = ip_to_str(&sin->sin_addr, AF_INET).str;
} else {
struct sockaddr_in6* sin6 = (struct sockaddr_in6*)&client_addr;
inet_ntop(AF_INET6, &sin6->sin6_addr, addr_str, sizeof(addr_str));
addr_str = ip_to_str(&sin6->sin6_addr, AF_INET6).str;
}
if (server->log_file) {

7
src/dummynet.c

@ -102,13 +102,6 @@ struct shaper_cb_arg {
int dir_idx;
};
/* Получить текущее время в timebase units (0.1 мс) */
static inline uint64_t get_time_tb(void) {
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return (uint64_t)ts.tv_sec * 10000ULL + (uint64_t)ts.tv_nsec / 100000ULL;
}
/* Генерация случайного числа 0..max */
static inline uint32_t random_range(uint32_t max) {
if (max == 0) return 0;

24
src/etcp.c

@ -66,22 +66,8 @@ static void drain_and_free_inflight_queue(struct ETCP_CONN* etcp, struct ll_queu
*q = NULL;
}
// Get current time in 0.1ms units
uint64_t get_current_time_units() {
DEBUG_TRACE(DEBUG_CATEGORY_ETCP, "");
struct timeval tv;
gettimeofday(&tv, NULL);
uint64_t time_units = ((uint64_t)tv.tv_sec * 10000ULL) + (tv.tv_usec / 100);
// DEBUG_TRACE(DEBUG_CATEGORY_ETCP, "get_current_time_units: tv_sec=%ld, tv_usec=%ld, result=%llu",
// tv.tv_sec, tv.tv_usec, (unsigned long long)time_units);
return time_units;
}
uint16_t get_current_timestamp() {
DEBUG_TRACE(DEBUG_CATEGORY_ETCP, "");
uint16_t timestamp = (uint16_t)(get_current_time_units() & 0xFFFF);
// DEBUG_TRACE(DEBUG_CATEGORY_ETCP, "get_current_timestamp: result=%u", timestamp);
return timestamp;
return (uint16_t)get_time_tb();
}
// Timestamp diff (with wrap-around)
@ -116,7 +102,7 @@ struct ETCP_CONN* etcp_connection_create(struct UTUN_INSTANCE* instance) {
etcp->io_pool = memory_pool_init(sizeof(struct ETCP_FRAGMENT));
etcp->optimal_inflight=10000;
// Initialize log_name with local node_id (peer will be updated later when known)
snprintf(etcp->log_name, sizeof(etcp->log_name), "%04llu????",
snprintf(etcp->log_name, sizeof(etcp->log_name), "%04llu->????",
(unsigned long long)(instance->node_id % 10000));
if (!etcp->input_queue || !etcp->output_queue || !etcp->input_send_q || !etcp->recv_q || !etcp->ack_q ||
@ -249,7 +235,7 @@ void etcp_update_log_name(struct ETCP_CONN* etcp) {
if (!etcp || !etcp->instance) return;
uint64_t local_id = etcp->instance->node_id % 10000;
uint64_t peer_id = etcp->peer_node_id % 10000;
snprintf(etcp->log_name, sizeof(etcp->log_name), "%04llu%04llu",
snprintf(etcp->log_name, sizeof(etcp->log_name), "%04llu->%04llu",
(unsigned long long)local_id, (unsigned long long)peer_id);
}
@ -443,7 +429,7 @@ static void input_send_q_cb(struct ll_queue* q, void* arg) {// etcp->input_send_
static void ack_timeout_check(void* arg) {
DEBUG_TRACE(DEBUG_CATEGORY_ETCP, "");
struct ETCP_CONN* etcp = (struct ETCP_CONN*)arg;
uint64_t now = get_current_time_units();
uint64_t now = get_time_tb();
uint64_t timeout = 1000;//(uint64_t)(etcp->rtt_avg_10 * RETRANS_K1) + (uint64_t)(etcp->jitter * RETRANS_K2);
// DEBUG_DEBUG(DEBUG_CATEGORY_ETCP, "ack_timeout_check: starting check, now=%llu, timeout=%llu, rtt_avg_10=%u, jitter=%u",
@ -533,7 +519,7 @@ struct ETCP_DGRAM* etcp_request_pkt(struct ETCP_CONN* etcp) {
if (inf_pkt) {
// DEBUG_DEBUG(DEBUG_CATEGORY_ETCP, "[%s] prepare udp dgram for send packet %p (seq=%u, len=%u)", etcp->log_name, inf_pkt, inf_pkt->seq, inf_pkt->ll.len);
inf_pkt->last_timestamp=get_current_time_units();
inf_pkt->last_timestamp=get_time_tb();
inf_pkt->send_count++;
inf_pkt->state=INFLIGHT_STATE_WAIT_ACK;
queue_data_put(etcp->input_wait_ack, (struct ll_entry*)inf_pkt, inf_pkt->seq);// move dgram to wait_ack queue

1
src/etcp.h

@ -21,7 +21,6 @@ struct UTUN_INSTANCE;
struct UASYNC;
uint16_t get_current_timestamp(void);
uint64_t get_current_time_units(void);
// ETCP packet section types (from protocol spec)
#define ETCP_SECTION_PAYLOAD 0x00 // Data payload

51
src/etcp_connections.c

@ -88,9 +88,7 @@ static void etcp_link_send_init(struct ETCP_LINK* link, uint8_t reset) {
// Debug: print remote address before sending
if (link->remote_addr.ss_family == AF_INET) {
struct sockaddr_in* sin = (struct sockaddr_in*)&link->remote_addr;
char addr_str[INET_ADDRSTRLEN];
inet_ntop(AF_INET, &sin->sin_addr, addr_str, INET_ADDRSTRLEN);
DEBUG_INFO(DEBUG_CATEGORY_ETCP, "[ETCP] INIT sending to %s:%d, link=%p", addr_str, ntohs(sin->sin_port), link);
DEBUG_INFO(DEBUG_CATEGORY_ETCP, "[ETCP] INIT sending to %s:%d, link=%p", ip_to_str(&sin->sin_addr, AF_INET).str, ntohs(sin->sin_port), link);
}
etcp_encrypt_send(dgram);
@ -215,7 +213,7 @@ static void keepalive_timer_cb(void* arg) {
}
// Check keepalive timeout
uint64_t now = get_current_time_units();
uint64_t now = get_time_tb();
uint64_t timeout_units = (uint64_t)link->keepalive_timeout * 10; // ms -> 0.1ms units
uint64_t elapsed = now - link->last_recv_local_time;
@ -471,9 +469,7 @@ struct ETCP_SOCKET* etcp_socket_add(struct UTUN_INSTANCE* instance, struct socka
ip->ss_family, socket_strerror(socket_get_error()));
if (ip->ss_family == AF_INET) {
struct sockaddr_in* sin = (struct sockaddr_in*)ip;
char addr_str[INET_ADDRSTRLEN];
inet_ntop(AF_INET, &sin->sin_addr, addr_str, INET_ADDRSTRLEN);
DEBUG_ERROR(DEBUG_CATEGORY_ETCP, "[ETCP] Failed to bind to %s:%d", addr_str, ntohs(sin->sin_port));
DEBUG_ERROR(DEBUG_CATEGORY_ETCP, "[ETCP] Failed to bind to %s:%d", ip_to_str(&sin->sin_addr, AF_INET).str, ntohs(sin->sin_port));
}
socket_close_wrapper(e_sock->fd);
free(e_sock);
@ -481,10 +477,8 @@ struct ETCP_SOCKET* etcp_socket_add(struct UTUN_INSTANCE* instance, struct socka
}
struct sockaddr_in* sin = (struct sockaddr_in*)ip;
char addr_str[INET_ADDRSTRLEN];
inet_ntop(AF_INET, &sin->sin_addr, addr_str, INET_ADDRSTRLEN);
DEBUG_INFO(DEBUG_CATEGORY_ETCP, "[ETCP] Successfully bound socket to local address, family=%d %s:%d", ip->ss_family, addr_str, ntohs(sin->sin_port));
DEBUG_INFO(DEBUG_CATEGORY_ETCP, "[ETCP] Successfully bound socket to local address, family=%d %s:%d", ip->ss_family, ip_to_str(&sin->sin_addr, AF_INET).str, ntohs(sin->sin_port));
}
e_sock->instance = instance;
@ -585,7 +579,7 @@ struct ETCP_LINK* etcp_link_new(struct ETCP_CONN* etcp, struct ETCP_SOCKET* conn
// link->last_activity = time(NULL);
link->ip_port_hash = sockaddr_hash(remote_addr);
link->last_recv_local_time = get_current_time_units(); // Initialize to prevent immediate timeout
link->last_recv_local_time = get_time_tb(); // Initialize to prevent immediate timeout
insert_link(conn, link);
@ -723,9 +717,7 @@ int etcp_encrypt_send(struct ETCP_DGRAM* dgram) {
// Debug: print where we're sending the packet
if (addr->ss_family == AF_INET) {
struct sockaddr_in* sin = (struct sockaddr_in*)addr;
char addr_str[INET_ADDRSTRLEN];
inet_ntop(AF_INET, &sin->sin_addr, addr_str, INET_ADDRSTRLEN);
// DEBUG_INFO(DEBUG_CATEGORY_ETCP, "[ETCP] Sending packet to %s:%d, size=%zd", addr_str, ntohs(sin->sin_port), enc_buf_len + dgram->noencrypt_len);
// inet_ntop removed - use ip_to_str
}
ssize_t sent = socket_sendto(dgram->link->conn->fd, enc_buf, enc_buf_len + dgram->noencrypt_len,
@ -870,9 +862,8 @@ static void etcp_connections_read_callback_socket(socket_t sock, void* arg) {
memcpy(&conn->crypto_ctx, &sc, sizeof(sc));// добавляем ключ
conn->peer_node_id=peer_id;
etcp_update_log_name(conn); // Update log_name with peer_node_id
char buf[128];
addr_to_string(&addr, buf, sizeof(buf));
DEBUG_DEBUG(DEBUG_CATEGORY_CONNECTION, "New connection from %s peer_id=%ld etcp=%p", buf, peer_id, conn);
DEBUG_DEBUG(DEBUG_CATEGORY_CONNECTION, "New connection from %s peer_id=%ld etcp=%p",
ip_to_str(&addr, addr.ss_family).str, peer_id, conn);
// Add connection to instance list
conn->next = e_sock->instance->connections;
e_sock->instance->connections = conn;
@ -993,7 +984,7 @@ process_decrypted:
if ( link->remote_keepalive && !link->link_status && link->initialized) loadbalancer_link_ready(link);// up выставляем только если и remote=up и local=up
link->link_status=link->remote_keepalive;
link->last_recv_local_time=get_current_time_units();
link->last_recv_local_time=get_time_tb();
link->last_recv_timestamp=pkt->timestamp;
link->last_recv_updated=1;
@ -1036,33 +1027,23 @@ process_decrypted:
// First time receiving NAT info
link->nat_ip = new_nat_ip;
link->nat_port = new_nat_port;
char ip_str[INET_ADDRSTRLEN];
struct in_addr addr;
addr.s_addr = htonl(new_nat_ip);
inet_ntop(AF_INET, &addr, ip_str, sizeof(ip_str));
DEBUG_INFO(DEBUG_CATEGORY_CONNECTION, "[%s] NAT address initialized: %s:%u",
link->etcp->log_name, ip_str, new_nat_port);
link->etcp->log_name, ip_to_str(&addr, AF_INET).str, new_nat_port);
} else if (link->nat_ip != new_nat_ip || link->nat_port != new_nat_port) {
// NAT address changed
char old_ip_str[INET_ADDRSTRLEN], new_ip_str[INET_ADDRSTRLEN];
struct in_addr old_addr, new_addr;
old_addr.s_addr = htonl(link->nat_ip);
new_addr.s_addr = htonl(new_nat_ip);
inet_ntop(AF_INET, &old_addr, old_ip_str, sizeof(old_ip_str));
inet_ntop(AF_INET, &new_addr, new_ip_str, sizeof(new_ip_str));
link->nat_ip = new_nat_ip;
link->nat_port = new_nat_port;
link->nat_changes_count++;
DEBUG_INFO(DEBUG_CATEGORY_CONNECTION, "[%s] NAT address changed: %s:%u -> %s:%u (change #%u)",
link->etcp->log_name, old_ip_str, link->nat_port, new_ip_str, new_nat_port,
link->etcp->log_name, ip_to_str(&old_addr, AF_INET).str, link->nat_port, ip_to_str(&new_addr, AF_INET).str, new_nat_port,
link->nat_changes_count);
} else {
// NAT address unchanged
link->nat_hits_count++;
DEBUG_DEBUG(DEBUG_CATEGORY_CONNECTION, "[%s] NAT address unchanged (%u matches)",
link->etcp->log_name, link->nat_hits_count);
}
} else {
// Legacy format without NAT info
@ -1152,7 +1133,9 @@ int init_connections(struct UTUN_INSTANCE* instance) {
struct ETCP_SOCKET* e_sock = etcp_socket_add(instance, &server->ip, server->netif_index, server->so_mark, server->type);
if (e_sock && default_ip != 0) {
DEBUG_INFO(DEBUG_CATEGORY_ETCP, "Server %s type %d ip=%08x", server->name, server->type, default_ip);
struct in_addr addr;
addr.s_addr = default_ip;
DEBUG_INFO(DEBUG_CATEGORY_ETCP, "Server %s type %d ip=%s", server->name, server->type, ip_to_str(&addr, AF_INET).str);
e_sock->local_defaultroute_ip = default_ip;
}
if (!e_sock) {
@ -1165,12 +1148,10 @@ int init_connections(struct UTUN_INSTANCE* instance) {
char addr_str[INET6_ADDRSTRLEN + 6];
if (server->ip.ss_family == AF_INET) {
struct sockaddr_in* sin = (struct sockaddr_in*)&server->ip;
inet_ntop(AF_INET, &sin->sin_addr, addr_str, INET_ADDRSTRLEN);
sprintf(addr_str + strlen(addr_str), ":%d", ntohs(sin->sin_port));
snprintf(addr_str, sizeof(addr_str), "%s:%d", ip_to_str(&sin->sin_addr, AF_INET).str, ntohs(sin->sin_port));
} else {
struct sockaddr_in6* sin6 = (struct sockaddr_in6*)&server->ip;
inet_ntop(AF_INET6, &sin6->sin6_addr, addr_str, INET6_ADDRSTRLEN);
sprintf(addr_str + strlen(addr_str), ":%d", ntohs(sin6->sin6_port));
snprintf(addr_str, sizeof(addr_str), "%s:%d", ip_to_str(&sin6->sin6_addr, AF_INET6).str, ntohs(sin6->sin6_port));
}
DEBUG_INFO(DEBUG_CATEGORY_CONNECTION, "Initialized server %s on %s (links: %zu)",

4
src/etcp_loadbalancer.c

@ -33,7 +33,7 @@ struct ETCP_LINK* etcp_loadbalancer_select_link(struct ETCP_CONN* etcp) {
struct ETCP_LINK* best = NULL;
uint64_t min_load_tb = UINT64_MAX;
uint64_t now_tb=get_current_time_units();
uint64_t now_tb=get_time_tb();
struct ETCP_LINK* link = etcp->links;
int link_index = 0;
@ -129,7 +129,7 @@ void etcp_loadbalancer_send(struct ETCP_DGRAM* dgram) {
link->shaper_load_time_tb += carry;
// Check if exceeds burst -> schedule timer
uint64_t now_tb=get_current_time_units();
uint64_t now_tb=get_time_tb();
if (link->shaper_load_time_tb >= now_tb + SHAPER_BURST_DELAY_TB) {
uint64_t wait_tb = link->shaper_load_time_tb - now_tb;
link->shaper_timer = uasync_set_timeout(link->etcp->instance->ua, wait_tb, link, shaper_timer_cb);

20
src/routing.c

@ -100,9 +100,9 @@ static void routing_pkt_from_etcp_cb(struct ETCP_CONN* conn, struct ll_entry* en
// Log destination IP
uint32_t dst_ip = extract_dst_ip(ip_data, ip_len);
if (dst_ip != 0) {
char ip_str[INET_ADDRSTRLEN];
inet_ntop(AF_INET, &dst_ip, ip_str, sizeof(ip_str));
DEBUG_DEBUG(DEBUG_CATEGORY_ROUTING, "Packet from ETCP to %s", ip_str);
struct in_addr addr;
addr.s_addr = dst_ip;
DEBUG_DEBUG(DEBUG_CATEGORY_ROUTING, "Packet from ETCP to %s", ip_to_str(&addr, AF_INET).str);
}
// Allocate ETCP_FRAGMENT for TUN input_queue
@ -172,13 +172,13 @@ static void routing_pkt_from_tun_cb(struct ll_queue* q, void* arg) {
continue;
}
char ip_str[INET_ADDRSTRLEN];
inet_ntop(AF_INET, &dst_ip, ip_str, sizeof(ip_str));
struct in_addr addr;
addr.s_addr = dst_ip;
// Lookup route in routing table
struct ROUTE_ARRAY* routes = route_table_lookup(instance->rt, dst_ip);
if (!routes || routes->routes == 0) {
DEBUG_WARN(DEBUG_CATEGORY_ROUTING, "No route to %s, dropping packet", ip_str);
DEBUG_WARN(DEBUG_CATEGORY_ROUTING, "No route to %s, dropping packet", ip_to_str(&addr, AF_INET).str);
free(pkt->ll.dgram);
memory_pool_free(instance->tun->pool, pkt);
pkt = (struct ETCP_FRAGMENT*)queue_data_get(instance->tun->output_queue);
@ -188,7 +188,7 @@ static void routing_pkt_from_tun_cb(struct ll_queue* q, void* arg) {
// Use first (best) route
struct ETCP_CONN* conn = routes->entries[0]->next_hop;
if (!conn) {
DEBUG_WARN(DEBUG_CATEGORY_ROUTING, "Route to %s has no next_hop, dropping packet", ip_str);
DEBUG_WARN(DEBUG_CATEGORY_ROUTING, "Route to %s has no next_hop, dropping packet", ip_to_str(&addr, AF_INET).str);
free(pkt->ll.dgram);
memory_pool_free(instance->tun->pool, pkt);
pkt = (struct ETCP_FRAGMENT*)queue_data_get(instance->tun->output_queue);
@ -197,7 +197,7 @@ static void routing_pkt_from_tun_cb(struct ll_queue* q, void* arg) {
// Check connection has normalizer
if (!conn->normalizer) {
DEBUG_WARN(DEBUG_CATEGORY_ROUTING, "Connection for %s has no normalizer, dropping packet", ip_str);
DEBUG_WARN(DEBUG_CATEGORY_ROUTING, "Connection for %s has no normalizer, dropping packet", ip_to_str(&addr, AF_INET).str);
free(pkt->ll.dgram);
memory_pool_free(instance->tun->pool, pkt);
pkt = (struct ETCP_FRAGMENT*)queue_data_get(instance->tun->output_queue);
@ -223,12 +223,12 @@ static void routing_pkt_from_tun_cb(struct ll_queue* q, void* arg) {
// Send via etcp_send
if (etcp_send(conn, entry) != 0) {
DEBUG_WARN(DEBUG_CATEGORY_ROUTING, "etcp_send failed for %s", ip_str);
DEBUG_WARN(DEBUG_CATEGORY_ROUTING, "etcp_send failed for %s", ip_to_str(&addr, AF_INET).str);
queue_entry_free(entry);
queue_dgram_free(entry);
} else {
DEBUG_DEBUG(DEBUG_CATEGORY_ROUTING, "Forwarded packet to %s via ETCP conn [%s]",
ip_str, conn->log_name);
ip_to_str(&addr, AF_INET).str, conn->log_name);
}
// Free original TUN packet

391
src/secure_channel.c1

@ -1,391 +0,0 @@
/* sc_lib.c - Secure Channel library implementation using TinyCrypt */
#include "secure_channel.h"
#include "../tinycrypt/lib/include/tinycrypt/ecc.h"
#include "../tinycrypt/lib/include/tinycrypt/ecc_dh.h"
#include "../tinycrypt/lib/include/tinycrypt/aes.h"
#include "../tinycrypt/lib/include/tinycrypt/ccm_mode.h"
#include "../tinycrypt/lib/include/tinycrypt/constants.h"
#include "../tinycrypt/lib/include/tinycrypt/ecc_platform_specific.h"
#include "../tinycrypt/lib/include/tinycrypt/sha256.h"
#include <string.h>
#include <stddef.h>
#include <sys/types.h>
#include <unistd.h>
#include <sys/time.h>
#include <stdio.h>
// Simple debug macros
#define DEBUG_CATEGORY_CRYPTO 1
#define DEBUG_ERROR(category, fmt, ...) fprintf(stderr, "ERROR: " fmt "\n", ##__VA_ARGS__)
#define DEBUG_INFO(category, fmt, ...) fprintf(stdout, "INFO: " fmt "\n", ##__VA_ARGS__)
#include <stdio.h>
#include <fcntl.h>
#include "crc32.h"
static const struct uECC_Curve_t *curve = NULL;
static uint8_t sc_urandom_seed[8] = {0};
static int sc_urandom_initialized = 0;
static void sc_init_random_seed(void)
{
int fd = open("/dev/urandom", O_RDONLY);
if (fd >= 0) {
ssize_t ret = read(fd, sc_urandom_seed, 8);
close(fd);
if (ret == 8) {
sc_urandom_initialized = 1;
}
}
}
static int sc_rng(uint8_t *dest, unsigned size)
{
int fd = open("/dev/urandom", O_RDONLY);
if (fd < 0) {
return 0;
}
ssize_t ret = read(fd, dest, size);
close(fd);
if (ret != size) {
return 0;
}
/* Mix in PID and microtime for additional entropy */
pid_t pid = getpid();
struct timeval tv;
gettimeofday(&tv, NULL);
for (unsigned i = 0; i < size; i++) {
dest[i] ^= ((pid >> (i % (sizeof(pid) * 8))) & 0xFF);
dest[i] ^= ((tv.tv_sec >> (i % (sizeof(tv.tv_sec) * 8))) & 0xFF);
dest[i] ^= ((tv.tv_usec >> (i % (sizeof(tv.tv_usec) * 8))) & 0xFF);
}
return 1;
}
static int sc_validate_key(const uint8_t *public_key)
{
if (!curve) {
curve = uECC_secp256r1();
}
int result = uECC_valid_public_key(public_key, curve);
DEBUG_INFO(DEBUG_CATEGORY_CRYPTO, "sc_validate_key: uECC_valid_public_key returned %d", result);
return result;
}
sc_status_t sc_generate_keypair(struct SC_MYKEYS *pk)
{
if (!pk) {
return SC_ERR_INVALID_ARG;
}
if (!curve) {
curve = uECC_secp256r1();
}
/* Set custom RNG function */
uECC_set_rng(sc_rng);
if (!uECC_make_key(pk->public_key, pk->private_key, curve)) {
return SC_ERR_CRYPTO;
}
return SC_OK;
}
// Конвертация hex строки в бинарный формат
static int hex_to_binary(const char *hex_str, uint8_t *binary, size_t binary_len) {
if (!hex_str || !binary || strlen(hex_str) != binary_len * 2) return -1;
for (size_t i = 0; i < binary_len; i++) {
unsigned int byte;
if (sscanf(hex_str + i * 2, "%2x", &byte) != 1) return -1;
binary[i] = (uint8_t)byte;
}
return 0;
}
sc_status_t sc_init_local_keys(struct SC_MYKEYS *mykeys, const char *public_key, const char *private_key) {
if (!mykeys || !public_key || !private_key) {
DEBUG_ERROR(DEBUG_CATEGORY_CRYPTO, "sc_init_local_keys: invalid arguments");
return SC_ERR_INVALID_ARG;
}
if (!curve) {
curve = uECC_secp256r1();
}
DEBUG_INFO(DEBUG_CATEGORY_CRYPTO, "sc_init_local_keys: public_key len=%zu, private_key len=%zu",
strlen(public_key), strlen(private_key));
/* Convert hex to binary first */
if (hex_to_binary(public_key, mykeys->public_key, SC_PUBKEY_SIZE)) {
DEBUG_ERROR(DEBUG_CATEGORY_CRYPTO, "sc_init_local_keys: failed to convert public key from hex");
return SC_ERR_INVALID_ARG;
}
if (hex_to_binary(private_key, mykeys->private_key, SC_PRIVKEY_SIZE)) {
DEBUG_ERROR(DEBUG_CATEGORY_CRYPTO, "sc_init_local_keys: failed to convert private key from hex");
return SC_ERR_INVALID_ARG;
}
/* Validate the converted binary public key */
if (sc_validate_key(mykeys->public_key) != 0) {
DEBUG_ERROR(DEBUG_CATEGORY_CRYPTO, "sc_init_local_keys: public key validation failed");
return SC_ERR_INVALID_ARG;
}
DEBUG_INFO(DEBUG_CATEGORY_CRYPTO, "sc_init_local_keys: keys initialized successfully");
return SC_OK;
}
sc_status_t sc_init_ctx(sc_context_t *ctx, struct SC_MYKEYS *mykeys) {
ctx->pk=mykeys;
ctx->initialized = 1;
ctx->peer_key_set = 0;
ctx->session_ready = 0;
ctx->tx_counter = 0;
ctx->rx_counter = 0;
return SC_OK;
}
sc_status_t sc_set_peer_public_key(sc_context_t *ctx, const char *peer_public_key_h, int mode) {
uint8_t shared_secret[SC_SHARED_SECRET_SIZE];
uint8_t peer_public_key[SC_PUBKEY_SIZE];
if (mode) {
if (hex_to_binary(peer_public_key_h, peer_public_key, SC_PUBKEY_SIZE)) {
DEBUG_ERROR(DEBUG_CATEGORY_CRYPTO, "sc_set_peer_public_key: invalid hex key format");
return SC_ERR_INVALID_ARG;
}
}
else memcpy(peer_public_key, peer_public_key_h, SC_PUBKEY_SIZE);
if (!ctx) {
DEBUG_ERROR(DEBUG_CATEGORY_CRYPTO, "sc_set_peer_public_key: invalid ctx");
return SC_ERR_INVALID_ARG;
}
if (!ctx->initialized) {
DEBUG_ERROR(DEBUG_CATEGORY_CRYPTO, "sc_set_peer_public_key: ctx not initialized");
return SC_ERR_NOT_INITIALIZED;
}
if (!curve) {
curve = uECC_secp256r1();
}
/* Validate peer public key */
if (sc_validate_key(peer_public_key) != 0) {
DEBUG_ERROR(DEBUG_CATEGORY_CRYPTO, "sc_set_peer_public_key: invalid key");
return SC_ERR_INVALID_ARG;
}
/* Compute shared secret using ECDH */
if (!ctx->pk) {
DEBUG_ERROR(DEBUG_CATEGORY_CRYPTO, "sc_set_peer_public_key: no private key");
return SC_ERR_NOT_INITIALIZED;
}
if (!uECC_shared_secret(peer_public_key, ctx->pk->private_key,
shared_secret, curve)) {
DEBUG_ERROR(DEBUG_CATEGORY_CRYPTO, "sc_set_peer_public_key: shared secret error");
return SC_ERR_CRYPTO;
}
/* Derive session key from shared secret (simple copy for demo) */
memcpy(ctx->session_key, shared_secret, SC_SESSION_KEY_SIZE);
/* Store peer public key */
memcpy(ctx->peer_public_key, peer_public_key, SC_PUBKEY_SIZE);
ctx->peer_key_set = 1;
ctx->session_ready = 1;
return SC_OK;
}
static void sc_build_nonce(uint64_t counter, uint8_t *nonce_out)
{
struct tc_sha256_state_struct sha_ctx;
uint8_t hash[32];
struct timeval tv;
uint8_t data[8 + 8 + 8];
if (!sc_urandom_initialized) {
sc_init_random_seed();
}
gettimeofday(&tv, NULL);
memcpy(data, sc_urandom_seed, 8);
data[8] = (counter >> 0) & 0xFF;
data[9] = (counter >> 8) & 0xFF;
data[10] = (counter >> 16) & 0xFF;
data[11] = (counter >> 24) & 0xFF;
data[12] = (counter >> 32) & 0xFF;
data[13] = (counter >> 40) & 0xFF;
data[14] = (counter >> 48) & 0xFF;
data[15] = (counter >> 56) & 0xFF;
data[16] = (tv.tv_sec >> 0) & 0xFF;
data[17] = (tv.tv_sec >> 8) & 0xFF;
data[18] = (tv.tv_sec >> 16) & 0xFF;
data[19] = (tv.tv_sec >> 24) & 0xFF;
data[20] = (tv.tv_usec >> 0) & 0xFF;
data[21] = (tv.tv_usec >> 8) & 0xFF;
data[22] = (tv.tv_usec >> 16) & 0xFF;
data[23] = (tv.tv_usec >> 24) & 0xFF;
tc_sha256_init(&sha_ctx);
tc_sha256_update(&sha_ctx, data, 24);
tc_sha256_final(hash, &sha_ctx);
memcpy(nonce_out, hash, SC_NONCE_SIZE);
}
sc_status_t sc_encrypt(sc_context_t *ctx, const uint8_t *plaintext, size_t plaintext_len, uint8_t *ciphertext, size_t *ciphertext_len) {
uint8_t nonce[SC_NONCE_SIZE];
uint8_t plaintext_with_crc[plaintext_len + SC_CRC32_SIZE];
size_t total_plaintext_len = plaintext_len + SC_CRC32_SIZE;
uint8_t combined_output[total_plaintext_len + SC_TAG_SIZE];
struct tc_aes_key_sched_struct sched;
struct tc_ccm_mode_struct ccm_state;
if (!ctx || !plaintext || !ciphertext || !ciphertext_len) {
return SC_ERR_INVALID_ARG;
}
if (!ctx->session_ready) {
return SC_ERR_NOT_INITIALIZED;
}
if (plaintext_len == 0) {
return SC_ERR_INVALID_ARG;
}
/* Добавляем CRC32 к данным */
memcpy(plaintext_with_crc, plaintext, plaintext_len);
uint32_t crc = crc32_calc(plaintext, plaintext_len);
plaintext_with_crc[plaintext_len] = (crc >> 0) & 0xFF;
plaintext_with_crc[plaintext_len + 1] = (crc >> 8) & 0xFF;
plaintext_with_crc[plaintext_len + 2] = (crc >> 16) & 0xFF;
plaintext_with_crc[plaintext_len + 3] = (crc >> 24) & 0xFF;
/* Генерируем nonce с таймером */
sc_build_nonce(ctx->tx_counter, nonce);
/* Initialize AES key schedule */
if (tc_aes128_set_encrypt_key(&sched, ctx->session_key) != TC_CRYPTO_SUCCESS) {
return SC_ERR_CRYPTO;
}
/* Configure CCM mode */
if (tc_ccm_config(&ccm_state, &sched, nonce, SC_NONCE_SIZE, SC_TAG_SIZE) != TC_CRYPTO_SUCCESS) {
return SC_ERR_CRYPTO;
}
/* Encrypt and generate tag */
if (tc_ccm_generation_encryption(combined_output, sizeof(combined_output),
NULL, 0, /* no associated data */
plaintext_with_crc, total_plaintext_len,
&ccm_state) != TC_CRYPTO_SUCCESS) {
return SC_ERR_CRYPTO;
}
/* Copy nonce + ciphertext + tag to output buffer */
memcpy(ciphertext, nonce, SC_NONCE_SIZE);
memcpy(ciphertext + SC_NONCE_SIZE, combined_output, total_plaintext_len + SC_TAG_SIZE);
*ciphertext_len = SC_NONCE_SIZE + total_plaintext_len + SC_TAG_SIZE;
ctx->tx_counter++;
return SC_OK;
}
sc_status_t sc_decrypt(sc_context_t *ctx,
const uint8_t *ciphertext,
size_t ciphertext_len,
uint8_t *plaintext,
size_t *plaintext_len)
{
uint8_t nonce[SC_NONCE_SIZE];
struct tc_aes_key_sched_struct sched;
struct tc_ccm_mode_struct ccm_state;
size_t total_plaintext_len = ciphertext_len - SC_NONCE_SIZE - SC_TAG_SIZE;
uint8_t plaintext_with_crc[total_plaintext_len];
if (!ctx || !ciphertext || !plaintext || !plaintext_len) {
return SC_ERR_INVALID_ARG;
}
if (!ctx->session_ready) {
return SC_ERR_NOT_INITIALIZED;
}
if (ciphertext_len < SC_NONCE_SIZE + SC_TAG_SIZE + SC_CRC32_SIZE) {
return SC_ERR_INVALID_ARG;
}
/* Извлекаем nonce из начала ciphertext */
memcpy(nonce, ciphertext, SC_NONCE_SIZE);
/* Ciphertext для расшифровки начинается после nonce */
const uint8_t *encrypted_data = ciphertext + SC_NONCE_SIZE;
size_t encrypted_len = ciphertext_len - SC_NONCE_SIZE;
/* Initialize AES key schedule */
if (tc_aes128_set_encrypt_key(&sched, ctx->session_key) != TC_CRYPTO_SUCCESS) {
return SC_ERR_CRYPTO;
}
/* Configure CCM mode с извлечённым nonce */
if (tc_ccm_config(&ccm_state, &sched, nonce, SC_NONCE_SIZE, SC_TAG_SIZE) != TC_CRYPTO_SUCCESS) {
return SC_ERR_CRYPTO;
}
/* Decrypt and verify tag */
if (tc_ccm_decryption_verification(plaintext_with_crc, total_plaintext_len,
NULL, 0, /* no associated data */
encrypted_data, encrypted_len,
&ccm_state) != TC_CRYPTO_SUCCESS) {
return SC_ERR_AUTH_FAILED;
}
/* Проверяем CRC32 */
size_t data_len = total_plaintext_len - SC_CRC32_SIZE;
uint32_t expected_crc = crc32_calc(plaintext_with_crc, data_len);
uint32_t received_crc = (plaintext_with_crc[data_len] << 0) |
(plaintext_with_crc[data_len + 1] << 8) |
(plaintext_with_crc[data_len + 2] << 16) |
(plaintext_with_crc[data_len + 3] << 24);
if (expected_crc != received_crc) {
return SC_ERR_CRC_FAILED;
}
/* Копируем данные без CRC32 */
memcpy(plaintext, plaintext_with_crc, data_len);
*plaintext_len = data_len;
ctx->rx_counter++;
return SC_OK;
}
sc_status_t sc_compute_public_key_from_private(const uint8_t *private_key, uint8_t *public_key) {
if (!private_key || !public_key) {
return SC_ERR_INVALID_ARG;
}
if (!curve) {
curve = uECC_secp256r1();
}
if (!uECC_compute_public_key(private_key, public_key, curve)) {
return SC_ERR_CRYPTO;
}
return SC_OK;
}

4
src/tun_if.c

@ -124,9 +124,7 @@ struct tun_if* tun_init(struct UASYNC* ua, struct utun_config* config)
char ip_str[64];
if (config->global.tun_ip.family == AF_INET) {
char buf[INET_ADDRSTRLEN];
inet_ntop(AF_INET, &config->global.tun_ip.addr.v4, buf, sizeof(buf));
snprintf(ip_str, sizeof(ip_str), "%s/32", buf);
snprintf(ip_str, sizeof(ip_str), "%s/32", ip_to_str(&config->global.tun_ip.addr.v4, AF_INET).str);
} else {
free(tun);
return NULL;

20
src/tun_route.c

@ -127,16 +127,12 @@ static int netlink_route(int ifindex, uint32_t network, uint8_t prefix_len, int
// Fallback: use ip route command
static int ip_route_cmd(const char *ifname, uint32_t network, uint8_t prefix_len, const char *cmd) {
char network_str[INET_ADDRSTRLEN];
struct in_addr addr;
addr.s_addr = network;
if (!inet_ntop(AF_INET, &addr, network_str, sizeof(network_str))) {
return -1;
}
char cmdline[256];
snprintf(cmdline, sizeof(cmdline), "ip route %s %s/%d dev %s 2>/dev/null",
cmd, network_str, prefix_len, ifname);
cmd, ip_to_str(&addr, AF_INET).str, prefix_len, ifname);
int ret = system(cmdline);
return (ret == 0) ? 0 : -1;
@ -349,16 +345,12 @@ static int routing_socket_cmd(int ifindex, uint32_t network, uint8_t prefix_len,
// Fallback: use route command
static int route_cmd(const char *ifname, uint32_t network, uint8_t prefix_len, const char *cmd) {
char network_str[INET_ADDRSTRLEN];
struct in_addr addr;
addr.s_addr = htonl(network);
if (!inet_ntop(AF_INET, &addr, network_str, sizeof(network_str))) {
return -1;
}
char cmdline[256];
snprintf(cmdline, sizeof(cmdline), "route %s -net %s/%d -interface %s 2>/dev/null",
cmd, network_str, prefix_len, ifname);
cmd, ip_to_str(&addr, AF_INET).str, prefix_len, ifname);
int ret = system(cmdline);
return (ret == 0) ? 0 : -1;
@ -422,18 +414,14 @@ int tun_route_add_all(const char *ifname, struct CFG_ROUTE_ENTRY *routes) {
uint32_t network = ntohl(entry->ip.addr.v4.s_addr);
if (entry->ip.family == AF_INET) {
if (tun_route_add(ifname, network, entry->netmask) == 0) {
char ip_str[INET_ADDRSTRLEN];
struct in_addr addr;
addr.s_addr = entry->ip.addr.v4.s_addr;
inet_ntop(AF_INET, &addr, ip_str, sizeof(ip_str));
DEBUG_INFO(DEBUG_CATEGORY_TUN, "Added route %s/%d via %s", ip_str, entry->netmask, ifname);
DEBUG_INFO(DEBUG_CATEGORY_TUN, "Added route %s/%d via %s", ip_to_str(&addr, AF_INET).str, entry->netmask, ifname);
count++;
} else {
char ip_str[INET_ADDRSTRLEN];
struct in_addr addr;
addr.s_addr = entry->ip.addr.v4.s_addr;
inet_ntop(AF_INET, &addr, ip_str, sizeof(ip_str));
DEBUG_ERROR(DEBUG_CATEGORY_TUN, "Failed to add route %s/%d via %s", ip_str, entry->netmask, ifname);
DEBUG_ERROR(DEBUG_CATEGORY_TUN, "Failed to add route %s/%d via %s", ip_to_str(&addr, AF_INET).str, entry->netmask, ifname);
}
}
entry = entry->next;

12
src/utun_instance.c

@ -81,19 +81,15 @@ static int instance_init_common(struct UTUN_INSTANCE* instance, struct UASYNC* u
entry.destination_node_id = instance->node_id;
if (route_table_insert(instance->rt, &entry)) {
char ip_str[INET_ADDRSTRLEN];
struct in_addr addr;
addr.s_addr = htonl(entry.network); // Convert back to network byte order for printing
inet_ntop(AF_INET, &addr, ip_str, sizeof(ip_str));
addr.s_addr = htonl(entry.network);
DEBUG_INFO(DEBUG_CATEGORY_ROUTING, "Added local route: %s/%d",
ip_str, entry.prefix_length);
ip_to_str(&addr, AF_INET).str, entry.prefix_length);
} else {
char ip_str[INET_ADDRSTRLEN];
struct in_addr addr;
addr.s_addr = htonl(entry.network); // Convert back to network byte order for printing
inet_ntop(AF_INET, &addr, ip_str, sizeof(ip_str));
addr.s_addr = htonl(entry.network);
DEBUG_WARN(DEBUG_CATEGORY_ROUTING, "Failed to add local route: %s/%d (skipping)",
ip_str, entry.prefix_length);
ip_to_str(&addr, AF_INET).str, entry.prefix_length);
}
subnet = subnet->next;

Loading…
Cancel
Save