argon2: make blocks allocation indirect, keep the base address
This commit is contained in:
parent
d39202c16c
commit
9ef45f8456
@ -11,6 +11,10 @@
|
||||
* <http://creativecommons.org/publicdomain/zero/1.0/>.
|
||||
*/
|
||||
|
||||
#ifdef HAVE_SYS_MMAN_H
|
||||
# include <sys/mman.h>
|
||||
#endif
|
||||
#include <errno.h>
|
||||
#include <inttypes.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
@ -24,6 +28,10 @@
|
||||
#include "argon2-impl.h"
|
||||
#include "blake2b-long.h"
|
||||
|
||||
#if !defined(MAP_ANON) && defined(MAP_ANONYMOUS)
|
||||
# define MAP_ANON MAP_ANONYMOUS
|
||||
#endif
|
||||
|
||||
static fill_segment_fn fill_segment = fill_segment_ref;
|
||||
|
||||
/***************Instance and Position constructors**********/
|
||||
@ -57,21 +65,27 @@ static void store_block(void *output, const block *src) {
|
||||
}
|
||||
|
||||
/***************Memory allocators*****************/
|
||||
int allocate_memory(block **memory, uint32_t m_cost) {
|
||||
if (memory != NULL) {
|
||||
int allocate_memory(block_region **region, uint32_t m_cost) {
|
||||
if (region != NULL) {
|
||||
block *memory;
|
||||
size_t memory_size = sizeof(block) * m_cost;
|
||||
|
||||
if (m_cost == 0 ||
|
||||
memory_size / m_cost !=
|
||||
sizeof(block)) { /*1. Check for multiplication overflow*/
|
||||
return ARGON2_MEMORY_ALLOCATION_ERROR;
|
||||
}
|
||||
|
||||
*memory = (block *)malloc(memory_size); /*2. Try to allocate*/
|
||||
|
||||
if (!*memory) {
|
||||
*region = (block_region *)malloc(sizeof(block_region)); /*2. Try to allocate region*/
|
||||
if (!*region) {
|
||||
return ARGON2_MEMORY_ALLOCATION_ERROR;
|
||||
}
|
||||
|
||||
memory = (block *)malloc(memory_size); /*3. Try to allocate block*/
|
||||
if (!memory) {
|
||||
return ARGON2_MEMORY_ALLOCATION_ERROR;
|
||||
}
|
||||
(*region)->memory = memory;
|
||||
|
||||
return ARGON2_OK;
|
||||
} else {
|
||||
return ARGON2_MEMORY_ALLOCATION_ERROR;
|
||||
@ -81,26 +95,29 @@ int allocate_memory(block **memory, uint32_t m_cost) {
|
||||
/*********Memory functions*/
|
||||
|
||||
void clear_memory(argon2_instance_t *instance, int clear) {
|
||||
if (instance->memory != NULL && clear) {
|
||||
sodium_memzero(instance->memory,
|
||||
if (instance->region != NULL && clear) {
|
||||
sodium_memzero(instance->region->memory,
|
||||
sizeof(block) * instance->memory_blocks);
|
||||
}
|
||||
}
|
||||
|
||||
void free_memory(block *memory) { free(memory); }
|
||||
void free_memory(block_region *region) {
|
||||
free(region->memory);
|
||||
free(region);
|
||||
}
|
||||
|
||||
void finalize(const argon2_context *context, argon2_instance_t *instance) {
|
||||
if (context != NULL && instance != NULL) {
|
||||
block blockhash;
|
||||
uint32_t l;
|
||||
|
||||
copy_block(&blockhash, instance->memory + instance->lane_length - 1);
|
||||
copy_block(&blockhash, instance->region->memory + instance->lane_length - 1);
|
||||
|
||||
/* XOR the last blocks */
|
||||
for (l = 1; l < instance->lanes; ++l) {
|
||||
uint32_t last_block_in_lane =
|
||||
l * instance->lane_length + (instance->lane_length - 1);
|
||||
xor_block(&blockhash, instance->memory + last_block_in_lane);
|
||||
xor_block(&blockhash, instance->region->memory + last_block_in_lane);
|
||||
}
|
||||
|
||||
/* Hash the result */
|
||||
@ -120,10 +137,10 @@ void finalize(const argon2_context *context, argon2_instance_t *instance) {
|
||||
|
||||
/* Deallocate the memory */
|
||||
if (NULL != context->free_cbk) {
|
||||
context->free_cbk((uint8_t *)instance->memory,
|
||||
context->free_cbk((uint8_t *)instance->region->memory,
|
||||
instance->memory_blocks * sizeof(block));
|
||||
} else {
|
||||
free_memory(instance->memory);
|
||||
free_memory(instance->region);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -362,13 +379,13 @@ void fill_first_blocks(uint8_t *blockhash, const argon2_instance_t *instance) {
|
||||
store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH + 4, l);
|
||||
blake2b_long(blockhash_bytes, ARGON2_BLOCK_SIZE, blockhash,
|
||||
ARGON2_PREHASH_SEED_LENGTH);
|
||||
load_block(&instance->memory[l * instance->lane_length + 0],
|
||||
load_block(&instance->region->memory[l * instance->lane_length + 0],
|
||||
blockhash_bytes);
|
||||
|
||||
store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH, 1);
|
||||
blake2b_long(blockhash_bytes, ARGON2_BLOCK_SIZE, blockhash,
|
||||
ARGON2_PREHASH_SEED_LENGTH);
|
||||
load_block(&instance->memory[l * instance->lane_length + 1],
|
||||
load_block(&instance->region->memory[l * instance->lane_length + 1],
|
||||
blockhash_bytes);
|
||||
}
|
||||
sodium_memzero(blockhash_bytes, ARGON2_BLOCK_SIZE);
|
||||
@ -465,9 +482,9 @@ int initialize(argon2_instance_t *instance, argon2_context *context) {
|
||||
if (ARGON2_OK != result) {
|
||||
return result;
|
||||
}
|
||||
memcpy(&(instance->memory), p, sizeof(instance->memory));
|
||||
memcpy(&(instance->region->memory), p, sizeof(instance->region->memory));
|
||||
} else {
|
||||
result = allocate_memory(&(instance->memory), instance->memory_blocks);
|
||||
result = allocate_memory(&(instance->region), instance->memory_blocks);
|
||||
if (ARGON2_OK != result) {
|
||||
return result;
|
||||
}
|
||||
|
@ -48,6 +48,12 @@ enum argon2_core_constants {
|
||||
*/
|
||||
typedef struct block_ { uint64_t v[ARGON2_QWORDS_IN_BLOCK]; } block;
|
||||
|
||||
typedef struct block_region_ {
|
||||
void *base;
|
||||
block *memory;
|
||||
size_t size;
|
||||
} block_region;
|
||||
|
||||
/*****************Functions that work with the block******************/
|
||||
|
||||
/* Initialize each byte of the block with @in */
|
||||
@ -66,7 +72,7 @@ void xor_block(block *dst, const block *src);
|
||||
* thread
|
||||
*/
|
||||
typedef struct Argon2_instance_t {
|
||||
block *memory; /* Memory pointer */
|
||||
block_region *region; /* Memory region pointer */
|
||||
uint32_t passes; /* Number of passes */
|
||||
uint32_t memory_blocks; /* Number of blocks in memory */
|
||||
uint32_t segment_length;
|
||||
@ -102,7 +108,7 @@ typedef struct Argon2_thread_data {
|
||||
* @param m_cost number of blocks to allocate in the memory
|
||||
* @return ARGON2_OK if @memory is a valid pointer and memory is allocated
|
||||
*/
|
||||
int allocate_memory(block **memory, uint32_t m_cost);
|
||||
int allocate_memory(block_region **memory, uint32_t m_cost);
|
||||
|
||||
/* Clears memory
|
||||
* @param instance pointer to the current instance
|
||||
@ -113,7 +119,7 @@ void clear_memory(argon2_instance_t *instance, int clear);
|
||||
/* Deallocates memory
|
||||
* @param memory pointer to the blocks
|
||||
*/
|
||||
void free_memory(block *memory);
|
||||
void free_memory(block_region *memory);
|
||||
|
||||
/*
|
||||
* Computes absolute position of reference block in the lane following a skewed
|
||||
|
@ -158,7 +158,7 @@ void fill_segment_ref(const argon2_instance_t *instance,
|
||||
if (data_independent_addressing) {
|
||||
pseudo_rand = pseudo_rands[i];
|
||||
} else {
|
||||
pseudo_rand = instance->memory[prev_offset].v[0];
|
||||
pseudo_rand = instance->region->memory[prev_offset].v[0];
|
||||
}
|
||||
|
||||
/* 1.2.2 Computing the lane of the reference block */
|
||||
@ -178,9 +178,9 @@ void fill_segment_ref(const argon2_instance_t *instance,
|
||||
|
||||
/* 2 Creating a new block */
|
||||
ref_block =
|
||||
instance->memory + instance->lane_length * ref_lane + ref_index;
|
||||
curr_block = instance->memory + curr_offset;
|
||||
fill_block(instance->memory + prev_offset, ref_block, curr_block);
|
||||
instance->region->memory + instance->lane_length * ref_lane + ref_index;
|
||||
curr_block = instance->region->memory + curr_offset;
|
||||
fill_block(instance->region->memory + prev_offset, ref_block, curr_block);
|
||||
}
|
||||
|
||||
free(pseudo_rands);
|
||||
|
@ -138,7 +138,7 @@ void fill_segment_ssse3(const argon2_instance_t *instance,
|
||||
prev_offset = curr_offset - 1;
|
||||
}
|
||||
|
||||
memcpy(state, ((instance->memory + prev_offset)->v), ARGON2_BLOCK_SIZE);
|
||||
memcpy(state, ((instance->region->memory + prev_offset)->v), ARGON2_BLOCK_SIZE);
|
||||
|
||||
for (i = starting_index; i < instance->segment_length;
|
||||
++i, ++curr_offset, ++prev_offset) {
|
||||
@ -152,7 +152,7 @@ void fill_segment_ssse3(const argon2_instance_t *instance,
|
||||
if (data_independent_addressing) {
|
||||
pseudo_rand = pseudo_rands[i];
|
||||
} else {
|
||||
pseudo_rand = instance->memory[prev_offset].v[0];
|
||||
pseudo_rand = instance->region->memory[prev_offset].v[0];
|
||||
}
|
||||
|
||||
/* 1.2.2 Computing the lane of the reference block */
|
||||
@ -172,8 +172,8 @@ void fill_segment_ssse3(const argon2_instance_t *instance,
|
||||
|
||||
/* 2 Creating a new block */
|
||||
ref_block =
|
||||
instance->memory + instance->lane_length * ref_lane + ref_index;
|
||||
curr_block = instance->memory + curr_offset;
|
||||
instance->region->memory + instance->lane_length * ref_lane + ref_index;
|
||||
curr_block = instance->region->memory + curr_offset;
|
||||
fill_block(state, (uint8_t *)ref_block->v, (uint8_t *)curr_block->v);
|
||||
}
|
||||
|
||||
|
@ -50,7 +50,7 @@ int argon2_core(argon2_context *context, argon2_type type) {
|
||||
/* Ensure that all segments have equal length */
|
||||
memory_blocks = segment_length * (context->lanes * ARGON2_SYNC_POINTS);
|
||||
|
||||
instance.memory = NULL;
|
||||
instance.region = NULL;
|
||||
instance.passes = context->t_cost;
|
||||
instance.memory_blocks = memory_blocks;
|
||||
instance.segment_length = segment_length;
|
||||
|
Loading…
Reference in New Issue
Block a user