fast allocator ok
parent
fb5abf5927
commit
389234ff76
2
HISTORY
2
HISTORY
|
|
@ -1,5 +1,5 @@
|
||||||
|
|
||||||
Version 1.23 2015-11-04
|
Version 1.23 2015-11-05
|
||||||
* sched_thread.c: task can execute in a new thread
|
* sched_thread.c: task can execute in a new thread
|
||||||
* sched_thread.c: support delay tasks
|
* sched_thread.c: support delay tasks
|
||||||
* add function get_current_time_us and get_current_time_ms
|
* add function get_current_time_us and get_current_time_ms
|
||||||
|
|
|
||||||
|
|
@ -1,11 +1,79 @@
|
||||||
//fast_allocator.c
|
//fast_allocator.c
|
||||||
|
|
||||||
#include <errno.h>
|
#include <errno.h>
|
||||||
|
#include <stdlib.h>
|
||||||
#include <pthread.h>
|
#include <pthread.h>
|
||||||
#include "logger.h"
|
#include "logger.h"
|
||||||
#include "shared_func.h"
|
#include "shared_func.h"
|
||||||
#include "fast_allocator.h"
|
#include "fast_allocator.h"
|
||||||
|
|
||||||
|
#define BYTES_ALIGN(x, pad_mask) (((x) + pad_mask) & (~pad_mask))
|
||||||
|
|
||||||
|
struct allocator_wrapper {
|
||||||
|
int alloc_bytes;
|
||||||
|
short allocator_index;
|
||||||
|
short magic_number;
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct fast_allocator_info malloc_allocator;
|
||||||
|
|
||||||
|
#define ADD_ALLOCATOR_TO_ARRAY(acontext, allocator, _pooled) \
|
||||||
|
do { \
|
||||||
|
(allocator)->index = acontext->allocator_array.count; \
|
||||||
|
(allocator)->magic_number = rand(); \
|
||||||
|
(allocator)->pooled = _pooled; \
|
||||||
|
acontext->allocator_array.allocators[ \
|
||||||
|
acontext->allocator_array.count++] = allocator; \
|
||||||
|
/* logInfo("count: %d, magic_number: %d", acontext->allocator_array.count, (allocator)->magic_number); */\
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
|
||||||
|
static int allocator_array_check_capacity(struct fast_allocator_context *acontext,
|
||||||
|
const int allocator_count)
|
||||||
|
{
|
||||||
|
int result;
|
||||||
|
int bytes;
|
||||||
|
struct fast_allocator_info **new_allocators;
|
||||||
|
|
||||||
|
if (acontext->allocator_array.alloc >= acontext->allocator_array.count +
|
||||||
|
allocator_count)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
if (acontext->allocator_array.alloc == 0)
|
||||||
|
{
|
||||||
|
acontext->allocator_array.alloc = 2 * allocator_count;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
do
|
||||||
|
{
|
||||||
|
acontext->allocator_array.alloc *= 2;
|
||||||
|
} while (acontext->allocator_array.alloc < allocator_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
bytes = sizeof(struct fast_allocator_info*) * acontext->allocator_array.alloc;
|
||||||
|
new_allocators = (struct fast_allocator_info **)malloc(bytes);
|
||||||
|
if (new_allocators == NULL)
|
||||||
|
{
|
||||||
|
result = errno != 0 ? errno : ENOMEM;
|
||||||
|
logError("file: "__FILE__", line: %d, "
|
||||||
|
"malloc %d bytes fail, errno: %d, error info: %s",
|
||||||
|
__LINE__, bytes, result, STRERROR(result));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (acontext->allocator_array.allocators != NULL)
|
||||||
|
{
|
||||||
|
memcpy(new_allocators, acontext->allocator_array.allocators,
|
||||||
|
sizeof(struct fast_allocator_info *) *
|
||||||
|
acontext->allocator_array.count);
|
||||||
|
free(acontext->allocator_array.allocators);
|
||||||
|
}
|
||||||
|
acontext->allocator_array.allocators = new_allocators;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int region_init(struct fast_allocator_context *acontext,
|
static int region_init(struct fast_allocator_context *acontext,
|
||||||
struct fast_region_info *region)
|
struct fast_region_info *region)
|
||||||
{
|
{
|
||||||
|
|
@ -13,11 +81,12 @@ static int region_init(struct fast_allocator_context *acontext,
|
||||||
int bytes;
|
int bytes;
|
||||||
int element_size;
|
int element_size;
|
||||||
int allocator_count;
|
int allocator_count;
|
||||||
struct fast_mblock_man *mblock;
|
struct fast_allocator_info *allocator;
|
||||||
|
|
||||||
|
region->pad_mask = region->step - 1;
|
||||||
allocator_count = (region->end - region->start) / region->step;
|
allocator_count = (region->end - region->start) / region->step;
|
||||||
bytes = sizeof(struct fast_mblock_man) * allocator_count;
|
bytes = sizeof(struct fast_allocator_info) * allocator_count;
|
||||||
region->allocators = (struct fast_mblock_man *)malloc(bytes);
|
region->allocators = (struct fast_allocator_info *)malloc(bytes);
|
||||||
if (region->allocators == NULL)
|
if (region->allocators == NULL)
|
||||||
{
|
{
|
||||||
result = errno != 0 ? errno : ENOMEM;
|
result = errno != 0 ? errno : ENOMEM;
|
||||||
|
|
@ -28,17 +97,25 @@ static int region_init(struct fast_allocator_context *acontext,
|
||||||
}
|
}
|
||||||
memset(region->allocators, 0, bytes);
|
memset(region->allocators, 0, bytes);
|
||||||
|
|
||||||
result = 0;
|
|
||||||
mblock = region->allocators;
|
if ((result=allocator_array_check_capacity(acontext, allocator_count)) != 0)
|
||||||
for (element_size=region->start+region->step; element_size<=region->end;
|
|
||||||
element_size+=region->step,mblock++)
|
|
||||||
{
|
{
|
||||||
result = fast_mblock_init_ex(mblock, element_size,
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
result = 0;
|
||||||
|
allocator = region->allocators;
|
||||||
|
for (element_size=region->start+region->step; element_size<=region->end;
|
||||||
|
element_size+=region->step,allocator++)
|
||||||
|
{
|
||||||
|
result = fast_mblock_init_ex(&allocator->mblock, element_size,
|
||||||
region->alloc_elements_once, NULL, acontext->need_lock);
|
region->alloc_elements_once, NULL, acontext->need_lock);
|
||||||
if (result != 0)
|
if (result != 0)
|
||||||
{
|
{
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ADD_ALLOCATOR_TO_ARRAY(acontext, allocator, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
|
|
@ -49,14 +126,14 @@ static void region_destroy(struct fast_allocator_context *acontext,
|
||||||
{
|
{
|
||||||
int element_size;
|
int element_size;
|
||||||
int allocator_count;
|
int allocator_count;
|
||||||
struct fast_mblock_man *mblock;
|
struct fast_allocator_info *allocator;
|
||||||
|
|
||||||
allocator_count = (region->end - region->start) / region->step;
|
allocator_count = (region->end - region->start) / region->step;
|
||||||
mblock = region->allocators;
|
allocator = region->allocators;
|
||||||
for (element_size=region->start+region->step; element_size<=region->end;
|
for (element_size=region->start+region->step; element_size<=region->end;
|
||||||
element_size+=region->step,mblock++)
|
element_size+=region->step,allocator++)
|
||||||
{
|
{
|
||||||
fast_mblock_destroy(mblock);
|
fast_mblock_destroy(&allocator->mblock);
|
||||||
}
|
}
|
||||||
|
|
||||||
free(region->allocators);
|
free(region->allocators);
|
||||||
|
|
@ -73,6 +150,7 @@ int fast_allocator_init_ex(struct fast_allocator_context *acontext,
|
||||||
struct fast_region_info *pRegion;
|
struct fast_region_info *pRegion;
|
||||||
struct fast_region_info *region_end;
|
struct fast_region_info *region_end;
|
||||||
|
|
||||||
|
srand(time(NULL));
|
||||||
memset(acontext, 0, sizeof(*acontext));
|
memset(acontext, 0, sizeof(*acontext));
|
||||||
if (region_count <= 0)
|
if (region_count <= 0)
|
||||||
{
|
{
|
||||||
|
|
@ -113,7 +191,7 @@ int fast_allocator_init_ex(struct fast_allocator_context *acontext,
|
||||||
result = EINVAL;
|
result = EINVAL;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (pRegion->step <= 0)
|
if (pRegion->step <= 0 || !is_power2(pRegion->step))
|
||||||
{
|
{
|
||||||
logError("file: "__FILE__", line: %d, "
|
logError("file: "__FILE__", line: %d, "
|
||||||
"invalid step: %d",
|
"invalid step: %d",
|
||||||
|
|
@ -145,6 +223,16 @@ int fast_allocator_init_ex(struct fast_allocator_context *acontext,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ((result=allocator_array_check_capacity(acontext, 1)) != 0)
|
||||||
|
{
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
ADD_ALLOCATOR_TO_ARRAY(acontext, &malloc_allocator, false);
|
||||||
|
/*
|
||||||
|
logInfo("sizeof(struct allocator_wrapper): %d, allocator_array count: %d",
|
||||||
|
(int)sizeof(struct allocator_wrapper), acontext->allocator_array.count);
|
||||||
|
*/
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -159,7 +247,7 @@ int fast_allocator_init_ex(struct fast_allocator_context *acontext,
|
||||||
int fast_allocator_init(struct fast_allocator_context *acontext,
|
int fast_allocator_init(struct fast_allocator_context *acontext,
|
||||||
const bool need_lock)
|
const bool need_lock)
|
||||||
{
|
{
|
||||||
#define DEFAULT_REGION_COUNT 6
|
#define DEFAULT_REGION_COUNT 5
|
||||||
|
|
||||||
struct fast_region_info regions[DEFAULT_REGION_COUNT];
|
struct fast_region_info regions[DEFAULT_REGION_COUNT];
|
||||||
|
|
||||||
|
|
@ -178,23 +266,119 @@ void fast_allocator_destroy(struct fast_allocator_context *acontext)
|
||||||
struct fast_region_info *pRegion;
|
struct fast_region_info *pRegion;
|
||||||
struct fast_region_info *region_end;
|
struct fast_region_info *region_end;
|
||||||
|
|
||||||
|
if (acontext->regions != NULL)
|
||||||
|
{
|
||||||
|
region_end = acontext->regions + acontext->region_count;
|
||||||
|
for (pRegion=acontext->regions; pRegion<region_end; pRegion++)
|
||||||
|
{
|
||||||
|
region_destroy(acontext, pRegion);
|
||||||
|
}
|
||||||
|
free(acontext->regions);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (acontext->allocator_array.allocators != NULL)
|
||||||
|
{
|
||||||
|
free(acontext->allocator_array.allocators);
|
||||||
|
}
|
||||||
|
memset(acontext, 0, sizeof(*acontext));
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct fast_allocator_info *get_allocator(struct fast_allocator_context *acontext,
|
||||||
|
int *alloc_bytes)
|
||||||
|
{
|
||||||
|
struct fast_region_info *pRegion;
|
||||||
|
struct fast_region_info *region_end;
|
||||||
|
|
||||||
region_end = acontext->regions + acontext->region_count;
|
region_end = acontext->regions + acontext->region_count;
|
||||||
for (pRegion=acontext->regions; pRegion<region_end; pRegion++)
|
for (pRegion=acontext->regions; pRegion<region_end; pRegion++)
|
||||||
{
|
{
|
||||||
region_destroy(acontext, pRegion);
|
if (*alloc_bytes <= pRegion->end)
|
||||||
|
{
|
||||||
|
*alloc_bytes = BYTES_ALIGN(*alloc_bytes, pRegion->pad_mask);
|
||||||
|
return pRegion->allocators + ((*alloc_bytes -
|
||||||
|
pRegion->start) / pRegion->step) - 1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
free(acontext->regions);
|
return &malloc_allocator;
|
||||||
acontext->regions = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void* fast_allocator_alloc(struct fast_allocator_context *acontext,
|
void *fast_allocator_alloc(struct fast_allocator_context *acontext,
|
||||||
const int bytes)
|
const int bytes)
|
||||||
{
|
{
|
||||||
return NULL;
|
int alloc_bytes;
|
||||||
|
struct fast_allocator_info *allocator_info;
|
||||||
|
void *ptr;
|
||||||
|
|
||||||
|
if (bytes < 0)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
alloc_bytes = sizeof(struct allocator_wrapper) + bytes;
|
||||||
|
allocator_info = get_allocator(acontext, &alloc_bytes);
|
||||||
|
if (allocator_info->pooled)
|
||||||
|
{
|
||||||
|
ptr = fast_mblock_alloc_object(&allocator_info->mblock);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
ptr = malloc(alloc_bytes);
|
||||||
|
}
|
||||||
|
if (ptr == NULL)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
((struct allocator_wrapper *)ptr)->allocator_index = allocator_info->index;
|
||||||
|
((struct allocator_wrapper *)ptr)->magic_number = allocator_info->magic_number;
|
||||||
|
((struct allocator_wrapper *)ptr)->alloc_bytes = alloc_bytes;
|
||||||
|
|
||||||
|
__sync_add_and_fetch(&acontext->alloc_bytes, alloc_bytes);
|
||||||
|
return (char *)ptr + sizeof(struct allocator_wrapper);
|
||||||
}
|
}
|
||||||
|
|
||||||
void fast_allocator_free(struct fast_allocator_context *acontext, void *ptr)
|
void fast_allocator_free(struct fast_allocator_context *acontext, void *ptr)
|
||||||
{
|
{
|
||||||
|
struct allocator_wrapper *pWrapper;
|
||||||
|
struct fast_allocator_info *allocator_info;
|
||||||
|
void *obj;
|
||||||
|
if (ptr == NULL)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
obj = (char *)ptr - sizeof(struct allocator_wrapper);
|
||||||
|
pWrapper = (struct allocator_wrapper *)obj;
|
||||||
|
if (pWrapper->allocator_index < 0 || pWrapper->allocator_index >=
|
||||||
|
acontext->allocator_array.count)
|
||||||
|
{
|
||||||
|
logError("file: "__FILE__", line: %d, "
|
||||||
|
"invalid allocator index: %d",
|
||||||
|
__LINE__, pWrapper->allocator_index);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
allocator_info = acontext->allocator_array.allocators[pWrapper->allocator_index];
|
||||||
|
if (pWrapper->magic_number != allocator_info->magic_number)
|
||||||
|
{
|
||||||
|
logError("file: "__FILE__", line: %d, "
|
||||||
|
"invalid magic number: %d != %d",
|
||||||
|
__LINE__, pWrapper->magic_number,
|
||||||
|
allocator_info->magic_number);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
__sync_sub_and_fetch(&acontext->alloc_bytes, pWrapper->alloc_bytes);
|
||||||
|
pWrapper->allocator_index = -1;
|
||||||
|
pWrapper->magic_number = 0;
|
||||||
|
if (allocator_info->pooled)
|
||||||
|
{
|
||||||
|
fast_mblock_free_object(&allocator_info->mblock, obj);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
free(obj);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -18,13 +18,29 @@
|
||||||
#include "common_define.h"
|
#include "common_define.h"
|
||||||
#include "fast_mblock.h"
|
#include "fast_mblock.h"
|
||||||
|
|
||||||
|
struct fast_allocator_info
|
||||||
|
{
|
||||||
|
int index;
|
||||||
|
short magic_number;
|
||||||
|
bool pooled;
|
||||||
|
struct fast_mblock_man mblock;
|
||||||
|
};
|
||||||
|
|
||||||
struct fast_region_info
|
struct fast_region_info
|
||||||
{
|
{
|
||||||
int start;
|
int start;
|
||||||
int end;
|
int end;
|
||||||
int step;
|
int step;
|
||||||
int alloc_elements_once;
|
int alloc_elements_once;
|
||||||
struct fast_mblock_man *allocators;
|
int pad_mask; //for internal use
|
||||||
|
struct fast_allocator_info *allocators;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct fast_allocator_array
|
||||||
|
{
|
||||||
|
int count;
|
||||||
|
int alloc;
|
||||||
|
struct fast_allocator_info **allocators;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct fast_allocator_context
|
struct fast_allocator_context
|
||||||
|
|
@ -32,7 +48,10 @@ struct fast_allocator_context
|
||||||
struct fast_region_info *regions;
|
struct fast_region_info *regions;
|
||||||
int region_count;
|
int region_count;
|
||||||
|
|
||||||
int64_t alloc_bytes;
|
struct fast_allocator_array allocator_array;
|
||||||
|
|
||||||
|
volatile int64_t alloc_bytes; //total alloc bytes
|
||||||
|
//volatile int64_t padding_bytes; //bytes used by allocator
|
||||||
bool need_lock; //if need mutex lock for acontext
|
bool need_lock; //if need mutex lock for acontext
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -176,7 +176,7 @@ int fast_mblock_manager_stat(struct fast_mblock_info *stats,
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
int fast_mblock_manager_stat_print()
|
int fast_mblock_manager_stat_print(const bool hide_empty)
|
||||||
{
|
{
|
||||||
int result;
|
int result;
|
||||||
int count;
|
int count;
|
||||||
|
|
@ -217,9 +217,17 @@ int fast_mblock_manager_stat_print()
|
||||||
stat_end = stats + count;
|
stat_end = stats + count;
|
||||||
for (pStat=stats; pStat<stat_end; pStat++)
|
for (pStat=stats; pStat<stat_end; pStat++)
|
||||||
{
|
{
|
||||||
block_size = GET_BLOCK_SIZE(*pStat);
|
if (pStat->element_total_count > 0)
|
||||||
alloc_mem += block_size * pStat->element_total_count;
|
{
|
||||||
used_mem += block_size * pStat->element_used_count;
|
block_size = GET_BLOCK_SIZE(*pStat);
|
||||||
|
alloc_mem += block_size * pStat->element_total_count;
|
||||||
|
used_mem += block_size * pStat->element_used_count;
|
||||||
|
}
|
||||||
|
else if (hide_empty)
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
logInfo("%32s %12d %16d %10d %10d %14d %12d %11.2f%%", pStat->name,
|
logInfo("%32s %12d %16d %10d %10d %14d %12d %11.2f%%", pStat->name,
|
||||||
pStat->element_size, pStat->instance_count,
|
pStat->element_size, pStat->instance_count,
|
||||||
pStat->trunk_total_count, pStat->trunk_used_count,
|
pStat->trunk_total_count, pStat->trunk_used_count,
|
||||||
|
|
|
||||||
|
|
@ -240,9 +240,10 @@ int fast_mblock_manager_stat(struct fast_mblock_info *stats,
|
||||||
/**
|
/**
|
||||||
print mblock manager stat
|
print mblock manager stat
|
||||||
parameters:
|
parameters:
|
||||||
|
hide_empty: if hide empty
|
||||||
return error no, 0 for success, != 0 fail
|
return error no, 0 for success, != 0 fail
|
||||||
*/
|
*/
|
||||||
int fast_mblock_manager_stat_print();
|
int fast_mblock_manager_stat_print(const bool hide_empty);
|
||||||
|
|
||||||
typedef void (*fast_mblock_free_trunks_func)(struct fast_mblock_man *mblock,
|
typedef void (*fast_mblock_free_trunks_func)(struct fast_mblock_man *mblock,
|
||||||
struct fast_mblock_malloc *freelist);
|
struct fast_mblock_malloc *freelist);
|
||||||
|
|
|
||||||
|
|
@ -2321,3 +2321,16 @@ int get_sys_cpu_count()
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool is_power2(const int64_t n)
|
||||||
|
{
|
||||||
|
int64_t i;
|
||||||
|
|
||||||
|
i = 2;
|
||||||
|
while (i < n)
|
||||||
|
{
|
||||||
|
i *= 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
return i == n;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -542,6 +542,13 @@ int get_sys_total_mem_size(int64_t *mem_size);
|
||||||
*/
|
*/
|
||||||
int get_sys_cpu_count();
|
int get_sys_cpu_count();
|
||||||
|
|
||||||
|
/** is the number power 2
|
||||||
|
* parameters:
|
||||||
|
* n: the number to test
|
||||||
|
* return: true for power 2, otherwise false
|
||||||
|
*/
|
||||||
|
bool is_power2(const int64_t n);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue