diff --git a/HISTORY b/HISTORY index 43f26dc..8d461df 100644 --- a/HISTORY +++ b/HISTORY @@ -1,5 +1,5 @@ -Version 1.23 2015-11-04 +Version 1.23 2015-11-05 * sched_thread.c: task can execute in a new thread * sched_thread.c: support delay tasks * add function get_current_time_us and get_current_time_ms diff --git a/src/fast_allocator.c b/src/fast_allocator.c index 5dd0b82..ef68ffa 100644 --- a/src/fast_allocator.c +++ b/src/fast_allocator.c @@ -1,11 +1,79 @@ //fast_allocator.c #include +#include #include #include "logger.h" #include "shared_func.h" #include "fast_allocator.h" +#define BYTES_ALIGN(x, pad_mask) (((x) + pad_mask) & (~pad_mask)) + +struct allocator_wrapper { + int alloc_bytes; + short allocator_index; + short magic_number; +}; + +static struct fast_allocator_info malloc_allocator; + +#define ADD_ALLOCATOR_TO_ARRAY(acontext, allocator, _pooled) \ + do { \ + (allocator)->index = acontext->allocator_array.count; \ + (allocator)->magic_number = rand(); \ + (allocator)->pooled = _pooled; \ + acontext->allocator_array.allocators[ \ + acontext->allocator_array.count++] = allocator; \ + /* logInfo("count: %d, magic_number: %d", acontext->allocator_array.count, (allocator)->magic_number); */\ + } while (0) + + +static int allocator_array_check_capacity(struct fast_allocator_context *acontext, + const int allocator_count) +{ + int result; + int bytes; + struct fast_allocator_info **new_allocators; + + if (acontext->allocator_array.alloc >= acontext->allocator_array.count + + allocator_count) + { + return 0; + } + if (acontext->allocator_array.alloc == 0) + { + acontext->allocator_array.alloc = 2 * allocator_count; + } + else + { + do + { + acontext->allocator_array.alloc *= 2; + } while (acontext->allocator_array.alloc < allocator_count); + } + + bytes = sizeof(struct fast_allocator_info*) * acontext->allocator_array.alloc; + new_allocators = (struct fast_allocator_info **)malloc(bytes); + if (new_allocators == NULL) + { + result = errno != 0 ? errno : ENOMEM; + logError("file: "__FILE__", line: %d, " + "malloc %d bytes fail, errno: %d, error info: %s", + __LINE__, bytes, result, STRERROR(result)); + return result; + } + + if (acontext->allocator_array.allocators != NULL) + { + memcpy(new_allocators, acontext->allocator_array.allocators, + sizeof(struct fast_allocator_info *) * + acontext->allocator_array.count); + free(acontext->allocator_array.allocators); + } + acontext->allocator_array.allocators = new_allocators; + return 0; +} + static int region_init(struct fast_allocator_context *acontext, struct fast_region_info *region) { @@ -13,11 +81,12 @@ static int region_init(struct fast_allocator_context *acontext, int bytes; int element_size; int allocator_count; - struct fast_mblock_man *mblock; + struct fast_allocator_info *allocator; + region->pad_mask = region->step - 1; allocator_count = (region->end - region->start) / region->step; - bytes = sizeof(struct fast_mblock_man) * allocator_count; - region->allocators = (struct fast_mblock_man *)malloc(bytes); + bytes = sizeof(struct fast_allocator_info) * allocator_count; + region->allocators = (struct fast_allocator_info *)malloc(bytes); if (region->allocators == NULL) { result = errno != 0 ? errno : ENOMEM; @@ -28,17 +97,25 @@ static int region_init(struct fast_allocator_context *acontext, } memset(region->allocators, 0, bytes); - result = 0; - mblock = region->allocators; - for (element_size=region->start+region->step; element_size<=region->end; - element_size+=region->step,mblock++) + + if ((result=allocator_array_check_capacity(acontext, allocator_count)) != 0) { - result = fast_mblock_init_ex(mblock, element_size, + return result; + } + + result = 0; + allocator = region->allocators; + for (element_size=region->start+region->step; element_size<=region->end; + element_size+=region->step,allocator++) + { + result = fast_mblock_init_ex(&allocator->mblock, element_size, region->alloc_elements_once, NULL, acontext->need_lock); if (result != 0) { break; } + + ADD_ALLOCATOR_TO_ARRAY(acontext, allocator, true); } return result; @@ -49,14 +126,14 @@ static void region_destroy(struct fast_allocator_context *acontext, { int element_size; int allocator_count; - struct fast_mblock_man *mblock; + struct fast_allocator_info *allocator; allocator_count = (region->end - region->start) / region->step; - mblock = region->allocators; + allocator = region->allocators; for (element_size=region->start+region->step; element_size<=region->end; - element_size+=region->step,mblock++) + element_size+=region->step,allocator++) { - fast_mblock_destroy(mblock); + fast_mblock_destroy(&allocator->mblock); } free(region->allocators); @@ -73,6 +150,7 @@ int fast_allocator_init_ex(struct fast_allocator_context *acontext, struct fast_region_info *pRegion; struct fast_region_info *region_end; + srand(time(NULL)); memset(acontext, 0, sizeof(*acontext)); if (region_count <= 0) { @@ -113,7 +191,7 @@ int fast_allocator_init_ex(struct fast_allocator_context *acontext, result = EINVAL; break; } - if (pRegion->step <= 0) + if (pRegion->step <= 0 || !is_power2(pRegion->step)) { logError("file: "__FILE__", line: %d, " "invalid step: %d", @@ -145,6 +223,16 @@ int fast_allocator_init_ex(struct fast_allocator_context *acontext, } } + if ((result=allocator_array_check_capacity(acontext, 1)) != 0) + { + return result; + } + + ADD_ALLOCATOR_TO_ARRAY(acontext, &malloc_allocator, false); + /* + logInfo("sizeof(struct allocator_wrapper): %d, allocator_array count: %d", + (int)sizeof(struct allocator_wrapper), acontext->allocator_array.count); + */ return result; } @@ -159,7 +247,7 @@ int fast_allocator_init_ex(struct fast_allocator_context *acontext, int fast_allocator_init(struct fast_allocator_context *acontext, const bool need_lock) { -#define DEFAULT_REGION_COUNT 6 +#define DEFAULT_REGION_COUNT 5 struct fast_region_info regions[DEFAULT_REGION_COUNT]; @@ -178,23 +266,119 @@ void fast_allocator_destroy(struct fast_allocator_context *acontext) struct fast_region_info *pRegion; struct fast_region_info *region_end; + if (acontext->regions != NULL) + { + region_end = acontext->regions + acontext->region_count; + for (pRegion=acontext->regions; pRegionregions); + } + + if (acontext->allocator_array.allocators != NULL) + { + free(acontext->allocator_array.allocators); + } + memset(acontext, 0, sizeof(*acontext)); +} + +static struct fast_allocator_info *get_allocator(struct fast_allocator_context *acontext, + int *alloc_bytes) +{ + struct fast_region_info *pRegion; + struct fast_region_info *region_end; + region_end = acontext->regions + acontext->region_count; for (pRegion=acontext->regions; pRegionend) + { + *alloc_bytes = BYTES_ALIGN(*alloc_bytes, pRegion->pad_mask); + return pRegion->allocators + ((*alloc_bytes - + pRegion->start) / pRegion->step) - 1; + } } - free(acontext->regions); - acontext->regions = NULL; + return &malloc_allocator; } -void* fast_allocator_alloc(struct fast_allocator_context *acontext, +void *fast_allocator_alloc(struct fast_allocator_context *acontext, const int bytes) { - return NULL; + int alloc_bytes; + struct fast_allocator_info *allocator_info; + void *ptr; + + if (bytes < 0) + { + return NULL; + } + + alloc_bytes = sizeof(struct allocator_wrapper) + bytes; + allocator_info = get_allocator(acontext, &alloc_bytes); + if (allocator_info->pooled) + { + ptr = fast_mblock_alloc_object(&allocator_info->mblock); + } + else + { + ptr = malloc(alloc_bytes); + } + if (ptr == NULL) + { + return NULL; + } + + ((struct allocator_wrapper *)ptr)->allocator_index = allocator_info->index; + ((struct allocator_wrapper *)ptr)->magic_number = allocator_info->magic_number; + ((struct allocator_wrapper *)ptr)->alloc_bytes = alloc_bytes; + + __sync_add_and_fetch(&acontext->alloc_bytes, alloc_bytes); + return (char *)ptr + sizeof(struct allocator_wrapper); } void fast_allocator_free(struct fast_allocator_context *acontext, void *ptr) { + struct allocator_wrapper *pWrapper; + struct fast_allocator_info *allocator_info; + void *obj; + if (ptr == NULL) + { + return; + } + + obj = (char *)ptr - sizeof(struct allocator_wrapper); + pWrapper = (struct allocator_wrapper *)obj; + if (pWrapper->allocator_index < 0 || pWrapper->allocator_index >= + acontext->allocator_array.count) + { + logError("file: "__FILE__", line: %d, " + "invalid allocator index: %d", + __LINE__, pWrapper->allocator_index); + return; + } + + allocator_info = acontext->allocator_array.allocators[pWrapper->allocator_index]; + if (pWrapper->magic_number != allocator_info->magic_number) + { + logError("file: "__FILE__", line: %d, " + "invalid magic number: %d != %d", + __LINE__, pWrapper->magic_number, + allocator_info->magic_number); + return; + } + + __sync_sub_and_fetch(&acontext->alloc_bytes, pWrapper->alloc_bytes); + pWrapper->allocator_index = -1; + pWrapper->magic_number = 0; + if (allocator_info->pooled) + { + fast_mblock_free_object(&allocator_info->mblock, obj); + } + else + { + free(obj); + } } diff --git a/src/fast_allocator.h b/src/fast_allocator.h index 6268fee..7a63ba0 100644 --- a/src/fast_allocator.h +++ b/src/fast_allocator.h @@ -18,13 +18,29 @@ #include "common_define.h" #include "fast_mblock.h" +struct fast_allocator_info +{ + int index; + short magic_number; + bool pooled; + struct fast_mblock_man mblock; +}; + struct fast_region_info { int start; int end; int step; int alloc_elements_once; - struct fast_mblock_man *allocators; + int pad_mask; //for internal use + struct fast_allocator_info *allocators; +}; + +struct fast_allocator_array +{ + int count; + int alloc; + struct fast_allocator_info **allocators; }; struct fast_allocator_context @@ -32,7 +48,10 @@ struct fast_allocator_context struct fast_region_info *regions; int region_count; - int64_t alloc_bytes; + struct fast_allocator_array allocator_array; + + volatile int64_t alloc_bytes; //total alloc bytes + //volatile int64_t padding_bytes; //bytes used by allocator bool need_lock; //if need mutex lock for acontext }; diff --git a/src/fast_mblock.c b/src/fast_mblock.c index a10c778..3c720ab 100644 --- a/src/fast_mblock.c +++ b/src/fast_mblock.c @@ -176,7 +176,7 @@ int fast_mblock_manager_stat(struct fast_mblock_info *stats, return result; } -int fast_mblock_manager_stat_print() +int fast_mblock_manager_stat_print(const bool hide_empty) { int result; int count; @@ -217,9 +217,17 @@ int fast_mblock_manager_stat_print() stat_end = stats + count; for (pStat=stats; pStatelement_total_count; - used_mem += block_size * pStat->element_used_count; + if (pStat->element_total_count > 0) + { + block_size = GET_BLOCK_SIZE(*pStat); + alloc_mem += block_size * pStat->element_total_count; + used_mem += block_size * pStat->element_used_count; + } + else if (hide_empty) + { + continue; + } + logInfo("%32s %12d %16d %10d %10d %14d %12d %11.2f%%", pStat->name, pStat->element_size, pStat->instance_count, pStat->trunk_total_count, pStat->trunk_used_count, diff --git a/src/fast_mblock.h b/src/fast_mblock.h index b5e8138..1ebaaf5 100644 --- a/src/fast_mblock.h +++ b/src/fast_mblock.h @@ -240,9 +240,10 @@ int fast_mblock_manager_stat(struct fast_mblock_info *stats, /** print mblock manager stat parameters: + hide_empty: if hide empty return error no, 0 for success, != 0 fail */ -int fast_mblock_manager_stat_print(); +int fast_mblock_manager_stat_print(const bool hide_empty); typedef void (*fast_mblock_free_trunks_func)(struct fast_mblock_man *mblock, struct fast_mblock_malloc *freelist); diff --git a/src/shared_func.c b/src/shared_func.c index 20fc70b..bd17bf5 100644 --- a/src/shared_func.c +++ b/src/shared_func.c @@ -2321,3 +2321,16 @@ int get_sys_cpu_count() #endif } +bool is_power2(const int64_t n) +{ + int64_t i; + + i = 2; + while (i < n) + { + i *= 2; + } + + return i == n; +} + diff --git a/src/shared_func.h b/src/shared_func.h index e216a44..8e61b70 100644 --- a/src/shared_func.h +++ b/src/shared_func.h @@ -542,6 +542,13 @@ int get_sys_total_mem_size(int64_t *mem_size); */ int get_sys_cpu_count(); +/** is the number power 2 + * parameters: + * n: the number to test + * return: true for power 2, otherwise false +*/ +bool is_power2(const int64_t n); + #ifdef __cplusplus } #endif