AES padding and queue fix

This commit is contained in:
Mark Qvist 2019-02-08 21:49:16 +01:00
parent 77e022f03b
commit ae0a53d9eb
2 changed files with 116 additions and 102 deletions

View File

@ -41,10 +41,11 @@
#define CONFIG_CSMA_P_DEFAULT 255
#define CONFIG_CSMA_SLOTTIME_DEFAULT 20
#define AX25_MIN_FRAME_LEN 1
#define AX25_MIN_FRAME_LEN 4
#define AX25_MAX_FRAME_LEN 611
// TODO: increase back to 576
#define AX25_MAX_PAYLOAD 576
#define AX25_MIN_PAYLOAD 2
#define AX25_ENCRYPTED_MIN_LENGTH 51 // Padding byte + IV + 1 Block + HMAC + CRC
// Packet settings
#define CONFIG_PASSALL false

View File

@ -36,10 +36,6 @@ bool ESCAPE;
uint8_t command = CMD_UNKNOWN;
//unsigned long custom_preamble = CONFIG_AFSK_PREAMBLE_LEN;
//unsigned long custom_tail = CONFIG_AFSK_TRAILER_LEN;
void kiss_init(AX25Ctx *ax25, Afsk *afsk, Serial *ser) {
ax25ctx = ax25;
serial = ser;
@ -72,52 +68,57 @@ void kiss_messageCallback(AX25Ctx *ctx) {
bool integrity_ok = false;
if (crypto_enabled()) {
size_t rxpos = 0;
if (ctx->frame_len >= AX25_ENCRYPTED_MIN_LENGTH) {
// Get padding size
uint8_t padding = ctx->buf[rxpos++];
size_t data_length = ctx->frame_len - 2 - 1 - CRYPTO_HMAC_SIZE - CRYPTO_KEY_SIZE;
size_t hmac_offset = ctx->frame_len - 2 - CRYPTO_HMAC_SIZE;
// Get padding size
uint8_t padding = ctx->buf[rxpos++];
size_t data_length = ctx->frame_len - 2 - 1 - CRYPTO_HMAC_SIZE - CRYPTO_KEY_SIZE;
size_t hmac_offset = ctx->frame_len - 2 - CRYPTO_HMAC_SIZE;
// Get HMAC
uint8_t hmac[CRYPTO_HMAC_SIZE];
memset(hmac, 0x00, CRYPTO_HMAC_SIZE);
for (uint8_t i = 0; i < CRYPTO_HMAC_SIZE; i++) {
size_t pos = hmac_offset + i;
hmac[i] = ctx->buf[pos];
}
// Calculate HMAC
crypto_generate_hmac(ctx->buf, ctx->frame_len-2-CRYPTO_HMAC_SIZE);
bool HMAC_ok = true;
for (uint8_t i = 0; i < CRYPTO_HMAC_SIZE; i++) {
if (hmac[i] != crypto_work_block[i]) {
HMAC_ok = false;
break;
}
}
if (HMAC_ok) {
// Get IV
for (uint8_t i = 0; i < CRYPTO_KEY_SIZE; i++) {
crypto_work_block[i] = ctx->buf[rxpos++];
// Get HMAC
uint8_t hmac[CRYPTO_HMAC_SIZE];
memset(hmac, 0x00, CRYPTO_HMAC_SIZE);
for (uint8_t i = 0; i < CRYPTO_HMAC_SIZE; i++) {
size_t pos = hmac_offset + i;
hmac[i] = ctx->buf[pos];
}
crypto_set_iv_from_workblock();
crypto_prepare();
uint8_t blocks = data_length / CRYPTO_KEY_SIZE;
// Calculate HMAC
crypto_generate_hmac(ctx->buf, ctx->frame_len-2-CRYPTO_HMAC_SIZE);
bool HMAC_ok = true;
for (uint8_t i = 0; i < CRYPTO_HMAC_SIZE; i++) {
if (hmac[i] != crypto_work_block[i]) {
HMAC_ok = false;
break;
}
}
size_t decrypted_pos = 0;
for (uint8_t block = 0; block < blocks; block++) {
if (HMAC_ok) {
// Get IV
for (uint8_t i = 0; i < CRYPTO_KEY_SIZE; i++) {
crypto_work_block[i] = ctx->buf[rxpos++];
}
crypto_decrypt_block(crypto_work_block);
for (uint8_t i = 0; i < CRYPTO_KEY_SIZE; i++) {
ctx->buf[decrypted_pos++] = crypto_work_block[i];
crypto_set_iv_from_workblock();
crypto_prepare();
uint8_t blocks = data_length / CRYPTO_KEY_SIZE;
size_t decrypted_pos = 0;
for (uint8_t block = 0; block < blocks; block++) {
for (uint8_t i = 0; i < CRYPTO_KEY_SIZE; i++) {
crypto_work_block[i] = ctx->buf[rxpos++];
}
crypto_decrypt_block(crypto_work_block);
for (uint8_t i = 0; i < CRYPTO_KEY_SIZE; i++) {
ctx->buf[decrypted_pos++] = crypto_work_block[i];
}
}
ctx->frame_len = data_length - padding + 2;
integrity_ok = true;
}
ctx->frame_len = data_length - padding;
integrity_ok = true;
}
} else {
integrity_ok = true;
@ -180,73 +181,76 @@ void kiss_flushQueue(void) {
size_t start = fifo16_pop_locked(&packet_starts);
size_t length = fifo16_pop_locked(&packet_lengths);
if (crypto_enabled()) {
uint8_t padding = CRYPTO_KEY_SIZE - (length % CRYPTO_KEY_SIZE);
if (padding == CRYPTO_KEY_SIZE) padding = 0;
if (length >= AX25_MIN_PAYLOAD) {
if (crypto_enabled()) {
uint8_t padding = CRYPTO_KEY_SIZE - (length % CRYPTO_KEY_SIZE);
if (padding == CRYPTO_KEY_SIZE) padding = 0;
uint8_t blocks = (length + padding) / CRYPTO_KEY_SIZE;
if (crypto_generate_iv()) {
crypto_prepare();
uint8_t blocks = (length + padding) / CRYPTO_KEY_SIZE;
if (crypto_generate_iv()) {
crypto_prepare();
size_t tx_pos = 0;
tx_buffer[tx_pos++] = padding;
size_t tx_pos = 0;
tx_buffer[tx_pos++] = padding;
uint8_t *iv = crypto_get_iv();
for (uint8_t i = 0; i < CRYPTO_KEY_SIZE; i++) {
tx_buffer[tx_pos++] = iv[i];
}
uint8_t *iv = crypto_get_iv();
for (uint8_t i = 0; i < CRYPTO_KEY_SIZE; i++) {
tx_buffer[tx_pos++] = iv[i];
}
// Encrypt each block
for (uint8_t i = 0; i < blocks; i++) {
if (i < blocks-1 || padding == 0) {
for (uint8_t j = 0; j < CRYPTO_KEY_SIZE; j++) {
size_t pos = (start+j)%CONFIG_QUEUE_SIZE;
crypto_work_block[j] = packet_queue[pos];
}
start += CRYPTO_KEY_SIZE;
} else {
for (uint8_t j = 0; j < CRYPTO_KEY_SIZE - padding; j++) {
size_t pos = (start+j)%CONFIG_QUEUE_SIZE;
crypto_work_block[j] = packet_queue[pos];
}
for (uint8_t j = CRYPTO_KEY_SIZE - padding; j < CRYPTO_KEY_SIZE; j++) {
crypto_work_block[j] = 0xFF;
}
}
crypto_encrypt_block(crypto_work_block);
// Encrypt each block
for (uint8_t i = 0; i < blocks; i++) {
if (i < blocks-1 || padding == 0) {
for (uint8_t j = 0; j < CRYPTO_KEY_SIZE; j++) {
size_t pos = (start+j)%CONFIG_QUEUE_SIZE;
crypto_work_block[j] = packet_queue[pos];
tx_buffer[tx_pos++] = crypto_work_block[j];
}
start += CRYPTO_KEY_SIZE;
}
// Genereate MAC
crypto_generate_hmac(tx_buffer, tx_pos);
for (uint8_t i = 0; i < CRYPTO_HMAC_SIZE; i++) {
tx_buffer[tx_pos++] = crypto_work_block[i];
}
// Check size and send
if (tx_pos <= AX25_MAX_FRAME_LEN) {
ax25_sendRaw(ax25ctx, tx_buffer, tx_pos);
processed++;
} else {
for (uint8_t j = 0; j < CRYPTO_KEY_SIZE - padding; j++) {
size_t pos = (start+j)%CONFIG_QUEUE_SIZE;
crypto_work_block[j] = packet_queue[pos];
}
for (uint8_t j = 0; j < padding; j++) {
crypto_work_block[j] = 0xFF;
}
processed++;
}
crypto_encrypt_block(crypto_work_block);
for (uint8_t j = 0; j < CRYPTO_KEY_SIZE; j++) {
tx_buffer[tx_pos++] = crypto_work_block[j];
}
}
// Genereate MAC
crypto_generate_hmac(tx_buffer, tx_pos);
for (uint8_t i = 0; i < CRYPTO_HMAC_SIZE; i++) {
tx_buffer[tx_pos++] = crypto_work_block[i];
}
// Check size and send
if (tx_pos <= AX25_MAX_FRAME_LEN) {
ax25_sendRaw(ax25ctx, tx_buffer, tx_pos);
processed++;
} else {
processed++;
LED_indicate_error_crypto();
}
} else {
for (size_t i = 0; i < length; i++) {
size_t pos = (start+i)%CONFIG_QUEUE_SIZE;
tx_buffer[i] = packet_queue[pos];
}
} else {
LED_indicate_error_crypto();
ax25_sendRaw(ax25ctx, tx_buffer, length);
processed++;
}
} else {
for (size_t i = 0; i < length; i++) {
size_t pos = (start+i)%CONFIG_QUEUE_SIZE;
tx_buffer[i] = packet_queue[pos];
}
ax25_sendRaw(ax25ctx, tx_buffer, length);
processed++;
}
}
@ -268,15 +272,24 @@ void kiss_serialCallback(uint8_t sbyte) {
IN_FRAME = false;
if (queue_height < CONFIG_QUEUE_MAX_LENGTH && queued_bytes < CONFIG_QUEUE_SIZE) {
queue_height++;
size_t s = current_packet_start;
size_t e = queue_cursor-1; if (e == -1) e = CONFIG_QUEUE_SIZE-1;
size_t l = (s < e) ? e - s + 1 : CONFIG_QUEUE_SIZE - s + e + 1;
size_t l;
fifo16_push_locked(&packet_starts, s);
fifo16_push_locked(&packet_lengths, l);
if (s != e) {
l = (s < e) ? e - s + 1 : CONFIG_QUEUE_SIZE - s + e + 1;
} else {
l = 1;
}
current_packet_start = queue_cursor;
if (l >= AX25_MIN_PAYLOAD) {
queue_height++;
fifo16_push_locked(&packet_starts, s);
fifo16_push_locked(&packet_lengths, l);
current_packet_start = queue_cursor;
}
}
} else if (sbyte == FEND) {