mempool
Memory pool
When load is concerning on using large bunch of memory, loader itself
can starve of memory and so it will fail. To address this issue, we allocate
huge amount of memory called "segment", and using our own allocators.
There are three allocators:
- Page-frag allocator that allocates memory in 256-bytes fragments.
- SLAB-like allocator
- Traditional heap for small allocations (4 bytes - 256 bytes)
SLAB-allocator and Page-frag allocator are using bitmaps to track allocated areas
SLAB allocator
Use standard library allocator for mempool
It is faster than mempool, but not autonomous
Functions
mp_cache_init_impl, mp_cache_destroy
public
LIBEXPORT void mp_cache_init_impl(mp_cache_t* cache, const char* name, size_t item_size) LIBEXPORT void mp_cache_destroy(mp_cache_t* cache)
mp_cache_alloc, mp_cache_free
public
LIBEXPORT void* mp_cache_alloc(mp_cache_t* cache) LIBEXPORT void mp_cache_free(mp_cache_t* cache, void* ptr)
mp_cache_alloc_array, mp_cache_free_array
public
LIBEXPORT void* mp_cache_alloc_array(mp_cache_t* cache, unsigned num) LIBEXPORT void mp_cache_free_array(mp_cache_t* cache, void* array, unsigned num)
mp_malloc
public
Allocate at least sz bytes and return it from mempool allocators
For large (> MPHEAPMAXALLOC) or aligned (sz & MPFRAGMASK == 0) allocations uses frag allocator
Otherwise allocates from mempool heap
NOTES
Never returns NULL. If all memory in mempool segment is exhausted, it will abort execution
LIBEXPORT void* mp_malloc(size_t sz)
mp_realloc
public
Reallocate memory at oldptr to size sz.
Doesn't shrink memory
LIBEXPORT void* mp_realloc(void* old, size_t sz)
mp_free
public
LIBEXPORT void mp_free(void* ptr)
mempool_fini, mempool_init
public
LIBEXPORT int mempool_init(void) LIBEXPORT void mempool_fini(void)
mp_bitmap_find_region
private
Find sequence of num zeroes in u
RETURN VALUES
-1 if failed to do this or index of region
static int mp_bitmap_find_region(long u, int num)
mp_bitmap_cell_alloc
Allocate num items from cell
This function is MT-safe, but if somebody already using cell
it will fail and return -1
ARGUMENTS
-
cell - Cell where allocation proceeds
-
num - Number of items to allocate
-
max_items - Total number of items in cell
RETURN VALUES
index if item inside cell or -1 if allocation was unsuccessfull
int mp_bitmap_cell_alloc(atomic_t* cell, int num, int max_items)
mp_bitmap_alloc
Allocate items from bitmap
If allocation would be unsuccessful, it will retry up to MPMAXRETRIES times
Also it sleeps between retries for MPWAITTIME
ARGUMENTS
-
bitmap - Array of bitmap cells
-
num_items - Total number of items in bitmap
-
alloc_items - Number of items to allocate
RETURN VALUES
Index of allocated item or -1 if all tries was failed
int mp_bitmap_alloc(atomic_t* bitmap, int num_items, int alloc_items)
mp_bitmap_alloc_cells
Allocate multiple cells from bitmap
ARGUMENTS
-
bitmap - Array of bitmap cells
-
num_items - Total number of items in bitmap
-
alloc_items - Number of items to allocate
RETURN VALUES
Index of allocated item or -1 if all tries was failed
int mp_bitmap_alloc_cells(atomic_t* bitmap, int num_items, int alloc_cells)
mp_frag_alloc
Allocate page fragment, entire page or multiple pages
If appropriate fragment couldn't be found, aborts
ARGUMENTS
-
size - number of bytes to allocate
-
flags - fragment flags FRAG_COMMON, FRAG_SLAB or FRAG_HEAP
RETURN VALUES
pointer to fragment
void* mp_frag_alloc(size_t size, short flags)
mp_heap_page_alloc
Allocate new heap page, initialize it. If somebody already allocating page,
it will wait until it's done than return last_heap_page
RETURN VALUES
allocated page
mp_heap_page_t* mp_heap_page_alloc()
mp_heap_page_free
Free heap page
void mp_heap_page_free(mp_heap_page_t* page)
mp_heap_btree_linearize
private
Linearize free BST from root into free starting from index recursively
ARGUMENTS
-
free - Free list
-
root - Root entry of free BST (may be subtree root for recursive calls)
-
index - Index from which linearization should come (for recursive calls)
static int mp_heap_btree_linearize(mp_heap_header_t** free, mp_heap_header_t* root, int index)
mp_heap_page_defrag
Defragment free BST.
Linearizes free BST, finds sibling free fragments and enlarges them.
Uses mp_heap_btree_insert to form new free BST
void mp_heap_page_defrag(mp_heap_page_t* page)
mp_heap_btree_insert
Insert entry hh to free BST
TODO: According to mpbench measurments it's hotspot, should be optimized
ARGUMENTS
-
root - Pointer to root of tree (May be altered only if NULL)
-
hh - Entry to be inserted
void mp_heap_btree_insert(mp_heap_header_t** root, mp_heap_header_t* hh)
mp_heap_btree_delete
Removes element from free BST
MP_HH_N - No direction (hh is root and pparent == &root)
MP_HH_L - parent --(left)---> hh
MP_HH_R - parent --(right)--> hh
ARGUMENTS
-
root - Pointer to root of tree (may be altered)
-
parent - Parent of hh
-
hh - Header to remove
-
direction - Direction from parent to hh:
void mp_heap_btree_delete(mp_heap_header_t** root, mp_heap_header_t* parent, mp_heap_header_t* hh, int direction)
mp_heap_btree_find_delete
Find best match, remove it from free-tree split, and return
ARGUMENTS
-
root - Pointer to root of btree
-
units - Minimum size of allocated fragment in units
mp_heap_header_t* mp_heap_btree_find_delete(mp_heap_header_t** root, size_t units)
mp_heap_alloc
Allocate area from heap
ARGUMENTS
-
size - Size of allocation area
RETURN VALUES
allocated area
void* mp_heap_alloc(size_t size)
mp_heap_free
Free area allocated by heap
ARGUMENTS
-
ptr - Allocated area
void mp_heap_free(void* ptr)
mp_heap_get_size
private
Get size of allocated area from heap
static size_t mp_heap_get_size(void* ptr)
Types
typedef struct mp_cache
typedef struct mp_cache { thread_mutex_t c_page_lock; thread_rwlock_t c_list_lock; list_head_t c_page_list; struct mp_cache_page* c_last_page; size_t c_item_size; unsigned c_items_per_page; ptrdiff_t c_first_item_off; char c_name[MPCACHENAMELEN]; } mp_cache_t;