Memory pools refactoring.

This commit is contained in:
Igor Sysoev
2017-06-19 16:26:19 +03:00
parent 7574c64992
commit b1b9f621a4
17 changed files with 1133 additions and 937 deletions

View File

@@ -32,7 +32,7 @@ NXT_LIB_DEPS=" \
src/nxt_parse.h \ src/nxt_parse.h \
src/nxt_mem_pool.h \ src/nxt_mem_pool.h \
src/nxt_mem_pool_cleanup.h \ src/nxt_mem_pool_cleanup.h \
src/nxt_mem_cache_pool.h \ src/nxt_mp.h \
src/nxt_mem_zone.h \ src/nxt_mem_zone.h \
src/nxt_sprintf.h \ src/nxt_sprintf.h \
src/nxt_file_name.h \ src/nxt_file_name.h \
@@ -96,7 +96,7 @@ NXT_LIB_SRCS=" \
src/nxt_rbtree.c \ src/nxt_rbtree.c \
src/nxt_mem_pool.c \ src/nxt_mem_pool.c \
src/nxt_mem_pool_cleanup.c \ src/nxt_mem_pool_cleanup.c \
src/nxt_mem_cache_pool.c \ src/nxt_mp.c \
src/nxt_mem_zone.c \ src/nxt_mem_zone.c \
src/nxt_string.c \ src/nxt_string.c \
src/nxt_utf8.c \ src/nxt_utf8.c \
@@ -210,7 +210,7 @@ NXT_LIB_UNIT_TEST_SRCS=" \
test/nxt_rbtree_unit_test.c \ test/nxt_rbtree_unit_test.c \
test/nxt_term_parse_unit_test.c \ test/nxt_term_parse_unit_test.c \
test/nxt_msec_diff_unit_test.c \ test/nxt_msec_diff_unit_test.c \
test/nxt_mem_cache_pool_unit_test.c \ test/nxt_mp_unit_test.c \
test/nxt_mem_zone_unit_test.c \ test/nxt_mem_zone_unit_test.c \
test/nxt_lvlhsh_unit_test.c \ test/nxt_lvlhsh_unit_test.c \
test/nxt_gmtime_unit_test.c \ test/nxt_gmtime_unit_test.c \

View File

@@ -207,6 +207,12 @@ nxt_bswap32(val) \
| ((val) << 24)) | ((val) << 24))
#define nxt_is_power_of_two(value) \
((((value) - 1) & (value)) == 0)
#define nxt_lg2(value) \
(31 - __builtin_clz(value))
#define \ #define \
nxt_align_size(d, a) \ nxt_align_size(d, a) \

View File

@@ -528,6 +528,8 @@ nxt_event_engine_start(nxt_event_engine_t *engine)
break; break;
} }
thr->task = task;
handler(task, obj, data); handler(task, obj, data);
} }

View File

@@ -487,7 +487,7 @@ struct nxt_event_engine_s {
uint32_t max_connections; uint32_t max_connections;
nxt_port_t *port; nxt_port_t *port;
nxt_mem_cache_pool_t *mem_pool; nxt_mp_t *mem_pool;
nxt_queue_t joints; nxt_queue_t joints;
nxt_queue_t listen_connections; nxt_queue_t listen_connections;
nxt_queue_t idle_connections; nxt_queue_t idle_connections;

View File

@@ -121,9 +121,13 @@ nxt_log_debug(_log, ...) \
} \ } \
} while (0) } while (0)
#define nxt_debug_alloc(...)
#else #else
#define nxt_debug(...) #define nxt_debug(...)
#define nxt_debug_alloc(...)
#define \ #define \
nxt_log_debug(...) nxt_log_debug(...)

View File

@@ -76,7 +76,7 @@ typedef struct {
} nxt_mem_proto_t; } nxt_mem_proto_t;
#include <nxt_mem_cache_pool.h> #include <nxt_mp.h>
#include <nxt_mem_zone.h> #include <nxt_mem_zone.h>
#include <nxt_mem_pool_cleanup.h> #include <nxt_mem_pool_cleanup.h>
#include <nxt_thread_time.h> #include <nxt_thread_time.h>

View File

@@ -1,789 +0,0 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
/*
* A memory cache pool allocates memory in clusters of specified size and
* aligned to page_alignment. A cluster is divided on pages of specified
* size. Page size must be a power of 2. A page can be used entirely or
* can be divided on chunks of equal size. Chunk size must be a power of 2.
* A cluster can contains pages with different chunk sizes. Cluster size
* must be a multiple of page size and may be not a power of 2. Allocations
* greater than page are allocated outside clusters. Start addresses and
* sizes of the clusters and large allocations are stored in rbtree blocks
* to find them on free operations. The rbtree nodes are sorted by start
* addresses.
*/
typedef struct {
/*
* Used to link pages with free chunks in pool chunk slot list
* or to link free pages in clusters.
*/
nxt_queue_link_t link;
/*
* Size of chunks or page shifted by pool->chunk_size_shift.
* Zero means that page is free.
*/
uint8_t size;
/*
* Page number in page cluster.
* There can be no more than 256 pages in a cluster.
*/
uint8_t number;
/* Number of free chunks of a chunked page. */
uint8_t chunks;
uint8_t _unused;
/* Chunk bitmap. There can be no more than 32 chunks in a page. */
uint8_t map[4];
} nxt_mem_cache_page_t;
typedef enum {
/* Block of cluster. The block is allocated apart of the cluster. */
NXT_MEM_CACHE_CLUSTER_BLOCK = 0,
/*
* Block of large allocation.
* The block is allocated apart of the allocation.
*/
NXT_MEM_CACHE_DISCRETE_BLOCK,
/*
* Block of large allocation.
* The block is allocated just after of the allocation.
*/
NXT_MEM_CACHE_EMBEDDED_BLOCK,
} nxt_mem_cache_block_type_t;
typedef struct {
NXT_RBTREE_NODE (node);
nxt_mem_cache_block_type_t type:8;
/* Block size must be less than 4G. */
uint32_t size;
u_char *start;
nxt_mem_cache_page_t pages[];
} nxt_mem_cache_block_t;
typedef struct {
nxt_queue_t pages;
/* Size of page chunks. */
#if (NXT_64BIT)
uint32_t size;
#else
uint16_t size;
#endif
/* Maximum number of free chunks in chunked page. */
uint8_t chunks;
} nxt_mem_cache_slot_t;
struct nxt_mem_cache_pool_s {
/* rbtree of nxt_mem_cache_block_t. */
nxt_rbtree_t blocks;
nxt_queue_t free_pages;
uint8_t chunk_size_shift;
uint8_t page_size_shift;
uint32_t page_size;
uint32_t page_alignment;
uint32_t cluster_size;
nxt_mem_cache_slot_t slots[];
};
#define nxt_mem_cache_chunk_is_free(map, chunk) \
((map[chunk / 8] & (0x80 >> (chunk & 7))) == 0)
#define nxt_mem_cache_chunk_set_free(map, chunk) \
map[chunk / 8] &= ~(0x80 >> (chunk & 7))
#define nxt_mem_cache_free_junk(p, size) \
memset((p), 0x5A, size)
#define nxt_is_power_of_two(value) \
((((value) - 1) & (value)) == 0)
static nxt_uint_t nxt_mem_cache_shift(nxt_uint_t n);
#if !(NXT_DEBUG_MEMORY)
static void *nxt_mem_cache_alloc_small(nxt_mem_cache_pool_t *pool, size_t size);
static nxt_uint_t nxt_mem_cache_alloc_chunk(u_char *map, nxt_uint_t size);
static nxt_mem_cache_page_t *
nxt_mem_cache_alloc_page(nxt_mem_cache_pool_t *pool);
static nxt_mem_cache_block_t *
nxt_mem_cache_alloc_cluster(nxt_mem_cache_pool_t *pool);
#endif
static void *nxt_mem_cache_alloc_large(nxt_mem_cache_pool_t *pool,
size_t alignment, size_t size);
static intptr_t nxt_mem_cache_rbtree_compare(nxt_rbtree_node_t *node1,
nxt_rbtree_node_t *node2);
static nxt_mem_cache_block_t *nxt_mem_cache_find_block(nxt_rbtree_t *tree,
u_char *p);
static const char *nxt_mem_cache_chunk_free(nxt_mem_cache_pool_t *pool,
nxt_mem_cache_block_t *cluster, u_char *p);
nxt_mem_cache_pool_t *
nxt_mem_cache_pool_create(size_t cluster_size, size_t page_alignment,
size_t page_size, size_t min_chunk_size)
{
/* Alignment and sizes must be a power of 2. */
if (nxt_slow_path(!nxt_is_power_of_two(page_alignment)
|| !nxt_is_power_of_two(page_size)
|| !nxt_is_power_of_two(min_chunk_size)))
{
return NULL;
}
page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
if (nxt_slow_path(page_size < 64
|| page_size < page_alignment
|| page_size < min_chunk_size
|| min_chunk_size * 32 < page_size
|| cluster_size < page_size
|| cluster_size / page_size > 256
|| cluster_size % page_size != 0))
{
return NULL;
}
return nxt_mem_cache_pool_fast_create(cluster_size, page_alignment,
page_size, min_chunk_size);
}
nxt_mem_cache_pool_t *
nxt_mem_cache_pool_fast_create(size_t cluster_size, size_t page_alignment,
size_t page_size, size_t min_chunk_size)
{
nxt_uint_t slots, chunk_size;
nxt_mem_cache_slot_t *slot;
nxt_mem_cache_pool_t *pool;
slots = 0;
chunk_size = page_size;
do {
slots++;
chunk_size /= 2;
} while (chunk_size > min_chunk_size);
pool = nxt_zalloc(sizeof(nxt_mem_cache_pool_t)
+ slots * sizeof(nxt_mem_cache_slot_t));
if (nxt_fast_path(pool != NULL)) {
pool->page_size = page_size;
pool->page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
pool->cluster_size = cluster_size;
slot = pool->slots;
do {
nxt_queue_init(&slot->pages);
slot->size = chunk_size;
/* slot->chunks should be one less than actual number of chunks. */
slot->chunks = (page_size / chunk_size) - 1;
slot++;
chunk_size *= 2;
} while (chunk_size < page_size);
pool->chunk_size_shift = nxt_mem_cache_shift(min_chunk_size);
pool->page_size_shift = nxt_mem_cache_shift(page_size);
nxt_rbtree_init(&pool->blocks, nxt_mem_cache_rbtree_compare);
nxt_queue_init(&pool->free_pages);
}
return pool;
}
static nxt_uint_t
nxt_mem_cache_shift(nxt_uint_t n)
{
nxt_uint_t shift;
shift = 0;
n /= 2;
do {
shift++;
n /= 2;
} while (n != 0);
return shift;
}
nxt_bool_t
nxt_mem_cache_pool_is_empty(nxt_mem_cache_pool_t *pool)
{
return (nxt_rbtree_is_empty(&pool->blocks)
&& nxt_queue_is_empty(&pool->free_pages));
}
void
nxt_mem_cache_pool_destroy(nxt_mem_cache_pool_t *pool)
{
void *p;
nxt_rbtree_node_t *node, *next;
nxt_mem_cache_block_t *block;
next = nxt_rbtree_root(&pool->blocks);
while (next != nxt_rbtree_sentinel(&pool->blocks)) {
node = nxt_rbtree_destroy_next(&pool->blocks, &next);
block = (nxt_mem_cache_block_t *) node;
p = block->start;
if (block->type != NXT_MEM_CACHE_EMBEDDED_BLOCK) {
nxt_free(block);
}
nxt_free(p);
}
nxt_free(pool);
}
void *
nxt_mem_cache_alloc(nxt_mem_cache_pool_t *pool, size_t size)
{
// nxt_debug(task, "mem cache alloc: %zd", size);
#if !(NXT_DEBUG_MEMORY)
if (size <= pool->page_size) {
return nxt_mem_cache_alloc_small(pool, size);
}
#endif
return nxt_mem_cache_alloc_large(pool, NXT_MAX_ALIGNMENT, size);
}
void *
nxt_mem_cache_zalloc(nxt_mem_cache_pool_t *pool, size_t size)
{
void *p;
p = nxt_mem_cache_alloc(pool, size);
if (nxt_fast_path(p != NULL)) {
memset(p, 0, size);
}
return p;
}
void *
nxt_mem_cache_align(nxt_mem_cache_pool_t *pool, size_t alignment, size_t size)
{
// nxt_debug(task, "mem cache align: @%zd:%zd", alignment, size);
/* Alignment must be a power of 2. */
if (nxt_fast_path(nxt_is_power_of_two(alignment))) {
#if !(NXT_DEBUG_MEMORY)
if (size <= pool->page_size && alignment <= pool->page_alignment) {
size = nxt_max(size, alignment);
if (size <= pool->page_size) {
return nxt_mem_cache_alloc_small(pool, size);
}
}
#endif
return nxt_mem_cache_alloc_large(pool, alignment, size);
}
return NULL;
}
void *
nxt_mem_cache_zalign(nxt_mem_cache_pool_t *pool, size_t alignment, size_t size)
{
void *p;
p = nxt_mem_cache_align(pool, alignment, size);
if (nxt_fast_path(p != NULL)) {
memset(p, 0, size);
}
return p;
}
#if !(NXT_DEBUG_MEMORY)
nxt_inline u_char *
nxt_mem_cache_page_addr(nxt_mem_cache_pool_t *pool, nxt_mem_cache_page_t *page)
{
nxt_mem_cache_block_t *block;
block = (nxt_mem_cache_block_t *)
((u_char *) page - page->number * sizeof(nxt_mem_cache_page_t)
- offsetof(nxt_mem_cache_block_t, pages));
return block->start + (page->number << pool->page_size_shift);
}
static void *
nxt_mem_cache_alloc_small(nxt_mem_cache_pool_t *pool, size_t size)
{
u_char *p;
nxt_queue_link_t *link;
nxt_mem_cache_page_t *page;
nxt_mem_cache_slot_t *slot;
p = NULL;
if (size <= pool->page_size / 2) {
/* Find a slot with appropriate chunk size. */
for (slot = pool->slots; slot->size < size; slot++) { /* void */ }
size = slot->size;
if (nxt_fast_path(!nxt_queue_is_empty(&slot->pages))) {
link = nxt_queue_first(&slot->pages);
page = nxt_queue_link_data(link, nxt_mem_cache_page_t, link);
p = nxt_mem_cache_page_addr(pool, page);
p += nxt_mem_cache_alloc_chunk(page->map, size);
page->chunks--;
if (page->chunks == 0) {
/*
* Remove full page from the pool chunk slot list
* of pages with free chunks.
*/
nxt_queue_remove(&page->link);
}
} else {
page = nxt_mem_cache_alloc_page(pool);
if (nxt_fast_path(page != NULL)) {
nxt_queue_insert_head(&slot->pages, &page->link);
/* Mark the first chunk as busy. */
page->map[0] = 0x80;
page->map[1] = 0;
page->map[2] = 0;
page->map[3] = 0;
/* slot->chunks are already one less. */
page->chunks = slot->chunks;
page->size = size >> pool->chunk_size_shift;
p = nxt_mem_cache_page_addr(pool, page);
}
}
} else {
page = nxt_mem_cache_alloc_page(pool);
if (nxt_fast_path(page != NULL)) {
page->size = pool->page_size >> pool->chunk_size_shift;
p = nxt_mem_cache_page_addr(pool, page);
}
#if (NXT_DEBUG)
size = pool->page_size;
#endif
}
// nxt_debug(task, "mem cache chunk:%uz alloc: %p", size, p);
return p;
}
static nxt_uint_t
nxt_mem_cache_alloc_chunk(uint8_t *map, nxt_uint_t size)
{
uint8_t mask;
nxt_uint_t n, offset;
offset = 0;
n = 0;
/* The page must have at least one free chunk. */
for ( ;; ) {
if (map[n] != 0xff) {
mask = 0x80;
do {
if ((map[n] & mask) == 0) {
/* A free chunk is found. */
map[n] |= mask;
return offset;
}
offset += size;
mask >>= 1;
} while (mask != 0);
} else {
/* Fast-forward: all 8 chunks are occupied. */
offset += size * 8;
}
n++;
}
}
static nxt_mem_cache_page_t *
nxt_mem_cache_alloc_page(nxt_mem_cache_pool_t *pool)
{
nxt_queue_link_t *link;
nxt_mem_cache_page_t *page;
nxt_mem_cache_block_t *cluster;
if (nxt_queue_is_empty(&pool->free_pages)) {
cluster = nxt_mem_cache_alloc_cluster(pool);
if (nxt_slow_path(cluster == NULL)) {
return NULL;
}
}
link = nxt_queue_first(&pool->free_pages);
nxt_queue_remove(link);
page = nxt_queue_link_data(link, nxt_mem_cache_page_t, link);
return page;
}
static nxt_mem_cache_block_t *
nxt_mem_cache_alloc_cluster(nxt_mem_cache_pool_t *pool)
{
nxt_uint_t n;
nxt_mem_cache_block_t *cluster;
n = pool->cluster_size >> pool->page_size_shift;
cluster = nxt_zalloc(sizeof(nxt_mem_cache_block_t)
+ n * sizeof(nxt_mem_cache_page_t));
if (nxt_slow_path(cluster == NULL)) {
return NULL;
}
/* NXT_MEM_CACHE_CLUSTER_BLOCK type is zero. */
cluster->size = pool->cluster_size;
cluster->start = nxt_memalign(pool->page_alignment, pool->cluster_size);
if (nxt_slow_path(cluster->start == NULL)) {
nxt_free(cluster);
return NULL;
}
n--;
cluster->pages[n].number = n;
nxt_queue_insert_head(&pool->free_pages, &cluster->pages[n].link);
while (n != 0) {
n--;
cluster->pages[n].number = n;
nxt_queue_insert_before(&cluster->pages[n + 1].link,
&cluster->pages[n].link);
}
nxt_rbtree_insert(&pool->blocks, &cluster->node);
return cluster;
}
#endif
static void *
nxt_mem_cache_alloc_large(nxt_mem_cache_pool_t *pool, size_t alignment,
size_t size)
{
u_char *p;
size_t aligned_size;
uint8_t type;
nxt_mem_cache_block_t *block;
/* Allocation must be less than 4G. */
if (nxt_slow_path(size >= 0xffffffff)) {
return NULL;
}
if (nxt_is_power_of_two(size)) {
block = nxt_malloc(sizeof(nxt_mem_cache_block_t));
if (nxt_slow_path(block == NULL)) {
return NULL;
}
p = nxt_memalign(alignment, size);
if (nxt_slow_path(p == NULL)) {
nxt_free(block);
return NULL;
}
type = NXT_MEM_CACHE_DISCRETE_BLOCK;
} else {
aligned_size = nxt_align_size(size, sizeof(uintptr_t));
p = nxt_memalign(alignment,
aligned_size + sizeof(nxt_mem_cache_block_t));
if (nxt_slow_path(p == NULL)) {
return NULL;
}
block = (nxt_mem_cache_block_t *) (p + aligned_size);
type = NXT_MEM_CACHE_EMBEDDED_BLOCK;
}
block->type = type;
block->size = size;
block->start = p;
nxt_rbtree_insert(&pool->blocks, &block->node);
return p;
}
static intptr_t
nxt_mem_cache_rbtree_compare(nxt_rbtree_node_t *node1, nxt_rbtree_node_t *node2)
{
nxt_mem_cache_block_t *block1, *block2;
block1 = (nxt_mem_cache_block_t *) node1;
block2 = (nxt_mem_cache_block_t *) node2;
return (uintptr_t) block1->start - (uintptr_t) block2->start;
}
void
nxt_mem_cache_free(nxt_mem_cache_pool_t *pool, void *p)
{
const char *err;
nxt_mem_cache_block_t *block;
// nxt_debug(task, "mem cache free %p", p);
block = nxt_mem_cache_find_block(&pool->blocks, p);
if (nxt_fast_path(block != NULL)) {
if (block->type == NXT_MEM_CACHE_CLUSTER_BLOCK) {
err = nxt_mem_cache_chunk_free(pool, block, p);
if (nxt_fast_path(err == NULL)) {
return;
}
} else if (nxt_fast_path(p == block->start)) {
nxt_rbtree_delete(&pool->blocks, &block->node);
if (block->type == NXT_MEM_CACHE_DISCRETE_BLOCK) {
nxt_free(block);
}
nxt_free(p);
return;
} else {
err = "freed pointer points to middle of block: %p";
}
} else {
err = "freed pointer is out of pool: %p";
}
// nxt_log(task, NXT_LOG_CRIT, err, p);
}
static nxt_mem_cache_block_t *
nxt_mem_cache_find_block(nxt_rbtree_t *tree, u_char *p)
{
nxt_rbtree_node_t *node, *sentinel;
nxt_mem_cache_block_t *block;
node = nxt_rbtree_root(tree);
sentinel = nxt_rbtree_sentinel(tree);
while (node != sentinel) {
block = (nxt_mem_cache_block_t *) node;
if (p < block->start) {
node = node->left;
} else if (p >= block->start + block->size) {
node = node->right;
} else {
return block;
}
}
return NULL;
}
static const char *
nxt_mem_cache_chunk_free(nxt_mem_cache_pool_t *pool,
nxt_mem_cache_block_t *cluster, u_char *p)
{
u_char *start;
uintptr_t offset;
nxt_uint_t n, size, chunk;
nxt_mem_cache_page_t *page;
nxt_mem_cache_slot_t *slot;
n = (p - cluster->start) >> pool->page_size_shift;
start = cluster->start + (n << pool->page_size_shift);
page = &cluster->pages[n];
if (page->size == 0) {
return "freed pointer points to already free page: %p";
}
size = page->size << pool->chunk_size_shift;
if (size != pool->page_size) {
offset = (uintptr_t) (p - start) & (pool->page_size - 1);
chunk = offset / size;
if (nxt_slow_path(offset != chunk * size)) {
return "freed pointer points to wrong chunk: %p";
}
if (nxt_slow_path(nxt_mem_cache_chunk_is_free(page->map, chunk))) {
return "freed pointer points to already free chunk: %p";
}
nxt_mem_cache_chunk_set_free(page->map, chunk);
/* Find a slot with appropriate chunk size. */
for (slot = pool->slots; slot->size < size; slot++) { /* void */ }
if (page->chunks != slot->chunks) {
page->chunks++;
if (page->chunks == 1) {
/*
* Add the page to the head of pool chunk slot list
* of pages with free chunks.
*/
nxt_queue_insert_head(&slot->pages, &page->link);
}
nxt_mem_cache_free_junk(p, size);
return NULL;
} else {
/*
* All chunks are free, remove the page from pool chunk slot
* list of pages with free chunks.
*/
nxt_queue_remove(&page->link);
}
} else if (nxt_slow_path(p != start)) {
return "invalid pointer to chunk: %p";
}
/* Add the free page to the pool's free pages tree. */
page->size = 0;
nxt_queue_insert_head(&pool->free_pages, &page->link);
nxt_mem_cache_free_junk(p, size);
/* Test if all pages in the cluster are free. */
page = cluster->pages;
n = pool->cluster_size >> pool->page_size_shift;
do {
if (page->size != 0) {
return NULL;
}
page++;
n--;
} while (n != 0);
/* Free cluster. */
page = cluster->pages;
n = pool->cluster_size >> pool->page_size_shift;
do {
nxt_queue_remove(&page->link);
page++;
n--;
} while (n != 0);
nxt_rbtree_delete(&pool->blocks, &cluster->node);
p = cluster->start;
nxt_free(cluster);
nxt_free(p);
return NULL;
}

View File

@@ -1,40 +0,0 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_MEM_CACHE_POOL_H_INCLUDED_
#define _NXT_MEM_CACHE_POOL_H_INCLUDED_
typedef struct nxt_mem_cache_pool_s nxt_mem_cache_pool_t;
NXT_EXPORT nxt_mem_cache_pool_t *nxt_mem_cache_pool_create(size_t cluster_size,
size_t page_alignment, size_t page_size, size_t min_chunk_size)
NXT_MALLOC_LIKE;
NXT_EXPORT nxt_mem_cache_pool_t *
nxt_mem_cache_pool_fast_create(size_t cluster_size,
size_t page_alignment, size_t page_size, size_t min_chunk_size)
NXT_MALLOC_LIKE;
NXT_EXPORT nxt_bool_t nxt_mem_cache_pool_is_empty(nxt_mem_cache_pool_t *pool);
NXT_EXPORT void nxt_mem_cache_pool_destroy(nxt_mem_cache_pool_t *pool);
NXT_EXPORT void *nxt_mem_cache_alloc(nxt_mem_cache_pool_t *pool, size_t size)
NXT_MALLOC_LIKE;
NXT_EXPORT void *nxt_mem_cache_zalloc(nxt_mem_cache_pool_t *pool, size_t size)
NXT_MALLOC_LIKE;
NXT_EXPORT void *nxt_mem_cache_align(nxt_mem_cache_pool_t *pool,
size_t alignment, size_t size)
NXT_MALLOC_LIKE;
NXT_EXPORT void *nxt_mem_cache_zalign(nxt_mem_cache_pool_t *pool,
size_t alignment, size_t size)
NXT_MALLOC_LIKE;
NXT_EXPORT void nxt_mem_cache_free(nxt_mem_cache_pool_t *pool, void *p);
extern const nxt_mem_proto_t nxt_mem_cache_proto;
#endif /* _NXT_MEM_CACHE_POOL_H_INCLUDED_ */

890
src/nxt_mp.c Normal file
View File

@@ -0,0 +1,890 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
/*
* A memory pool allocates memory in clusters of specified size and aligned
* to page_alignment. A cluster is divided on pages of specified size. Page
* size must be a power of 2. A page can be used entirely or can be divided
* on chunks of equal size. Chunk size must be a power of 2. Non-freeable
* memory is also allocated from pages. A cluster can contains a mix of pages
* with different chunk sizes and non-freeable pages. Cluster size must be
* a multiple of page size and may be not a power of 2. Allocations greater
* than page are allocated outside clusters. Start addresses and sizes of
* the clusters and large allocations are stored in rbtree blocks to find
* them on free operations. The rbtree nodes are sorted by start addresses.
* The rbtree is also used to destroy memory pool.
*/
typedef struct {
/*
* Used to link
* *) pages with free chunks in pool chunk pages lists,
* *) pages with free space for non-freeable allocations,
* *) free pages in clusters.
*/
nxt_queue_link_t link;
union {
/* Chunk bitmap. There can be no more than 32 chunks in a page. */
uint32_t map;
/* Size of taken non-freeable space. */
uint32_t taken;
} u;
/*
* Size of chunks or page shifted by pool->chunk_size_shift. Zero means
* that page is free, 0xFF means page with non-freeable allocations.
*/
uint8_t size;
/* Number of free chunks of a chunked page. */
uint8_t chunks;
/*
* Number of allocation fails due to free space insufficiency
* in non-freeable page.
*/
uint8_t fails;
/*
* Page number in page cluster.
* There can be no more than 256 pages in a cluster.
*/
uint8_t number;
} nxt_mp_page_t;
/*
* Some malloc implementations (e.g. jemalloc) allocates large enough
* blocks (e.g. greater than 4K) with 4K alignment. So if a block
* descriptor will be allocated together with the block it will take
* excessive 4K memory. So it is better to allocate the block descriptor
* apart.
*/
typedef enum {
/* Block of cluster. The block is allocated apart of the cluster. */
NXT_MP_CLUSTER_BLOCK = 0,
/*
* Block of large allocation.
* The block is allocated apart of the allocation.
*/
NXT_MP_DISCRETE_BLOCK,
/*
* Block of large allocation.
* The block is allocated just after of the allocation.
*/
NXT_MP_EMBEDDED_BLOCK,
} nxt_mp_block_type_t;
typedef struct {
NXT_RBTREE_NODE (node);
nxt_mp_block_type_t type:8;
/* Block size must be less than 4G. */
uint32_t size;
u_char *start;
nxt_mp_page_t pages[];
} nxt_mp_block_t;
struct nxt_mp_s {
/* rbtree of nxt_mp_block_t. */
nxt_rbtree_t blocks;
uint8_t chunk_size_shift;
uint8_t page_size_shift;
uint32_t page_size;
uint32_t page_alignment;
uint32_t cluster_size;
uint32_t retain;
/* Lists of nxt_mp_page_t. */
nxt_queue_t free_pages;
nxt_queue_t nget_pages;
nxt_queue_t get_pages;
nxt_queue_t chunk_pages[0];
};
#define nxt_mp_chunk_get_free(map) \
(__builtin_ffs(map) - 1)
#define nxt_mp_chunk_is_free(map, chunk) \
((map & (1 << chunk)) != 0)
#define nxt_mp_chunk_set_busy(map, chunk) \
map &= ~(1 << chunk)
#define nxt_mp_chunk_set_free(map, chunk) \
map |= (1 << chunk)
#define nxt_mp_free_junk(p, size) \
memset((p), 0x5A, size)
#if !(NXT_DEBUG_MEMORY)
static void *nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size);
static void *nxt_mp_alloc_small(nxt_mp_t *mp, size_t size);
static nxt_mp_page_t *nxt_mp_alloc_page(nxt_mp_t *mp);
static nxt_mp_block_t *nxt_mp_alloc_cluster(nxt_mp_t *mp);
#endif
static void *nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size);
static intptr_t nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1,
nxt_rbtree_node_t *node2);
static nxt_mp_block_t *nxt_mp_find_block(nxt_rbtree_t *tree, u_char *p);
static const char *nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster,
u_char *p);
nxt_mp_t *
nxt_mp_create(size_t cluster_size, size_t page_alignment, size_t page_size,
size_t min_chunk_size)
{
nxt_mp_t *mp;
nxt_uint_t pages, chunk_size;
nxt_queue_t *chunk_pages;
pages = 0;
chunk_size = page_size;
do {
pages++;
chunk_size /= 2;
} while (chunk_size > min_chunk_size);
mp = nxt_zalloc(sizeof(nxt_mp_t) + pages * sizeof(nxt_queue_t));
if (nxt_fast_path(mp != NULL)) {
mp->retain = 1;
mp->page_size = page_size;
mp->page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
mp->cluster_size = cluster_size;
chunk_pages = mp->chunk_pages;
do {
nxt_queue_init(chunk_pages);
chunk_pages++;
chunk_size *= 2;
} while (chunk_size < page_size);
mp->chunk_size_shift = nxt_lg2(min_chunk_size);
mp->page_size_shift = nxt_lg2(page_size);
nxt_rbtree_init(&mp->blocks, nxt_mp_rbtree_compare);
nxt_queue_init(&mp->free_pages);
}
return mp;
}
void
nxt_mp_destroy(nxt_mp_t *mp)
{
void *p;
nxt_mp_block_t *block;
nxt_rbtree_node_t *node, *next;
nxt_debug_alloc("mp destroy");
next = nxt_rbtree_root(&mp->blocks);
while (next != nxt_rbtree_sentinel(&mp->blocks)) {
node = nxt_rbtree_destroy_next(&mp->blocks, &next);
block = (nxt_mp_block_t *) node;
p = block->start;
if (block->type != NXT_MP_EMBEDDED_BLOCK) {
nxt_free(block);
}
nxt_free(p);
}
nxt_free(mp);
}
nxt_bool_t
nxt_mp_test_sizes(size_t cluster_size, size_t page_alignment, size_t page_size,
size_t min_chunk_size)
{
nxt_bool_t valid;
/* Alignment and sizes must be a power of 2. */
valid = nxt_expect(1, (nxt_is_power_of_two(page_alignment)
&& nxt_is_power_of_two(page_size)
&& nxt_is_power_of_two(min_chunk_size)));
if (!valid) {
return 0;
}
page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
valid = nxt_expect(1, (page_size >= 64
&& page_size >= page_alignment
&& page_size >= min_chunk_size
&& min_chunk_size * 32 >= page_size
&& cluster_size >= page_size
&& cluster_size / page_size <= 256
&& cluster_size % page_size == 0));
if (!valid) {
return 0;
}
return 1;
}
nxt_bool_t
nxt_mp_is_empty(nxt_mp_t *mp)
{
return (nxt_rbtree_is_empty(&mp->blocks)
&& nxt_queue_is_empty(&mp->free_pages));
}
void *
nxt_mp_alloc(nxt_mp_t *mp, size_t size)
{
nxt_debug_alloc("mp alloc: %uz", size);
#if !(NXT_DEBUG_MEMORY)
if (size <= mp->page_size) {
return nxt_mp_alloc_small(mp, size);
}
#endif
return nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
}
void *
nxt_mp_zalloc(nxt_mp_t *mp, size_t size)
{
void *p;
p = nxt_mp_alloc(mp, size);
if (nxt_fast_path(p != NULL)) {
memset(p, 0, size);
}
return p;
}
void *
nxt_mp_align(nxt_mp_t *mp, size_t alignment, size_t size)
{
nxt_debug_alloc("mp align: @%uz:%uz", alignment, size);
/* Alignment must be a power of 2. */
if (nxt_fast_path(nxt_is_power_of_two(alignment))) {
#if !(NXT_DEBUG_MEMORY)
if (size <= mp->page_size && alignment <= mp->page_alignment) {
size = nxt_max(size, alignment);
if (size <= mp->page_size) {
return nxt_mp_alloc_small(mp, size);
}
}
#endif
return nxt_mp_alloc_large(mp, alignment, size);
}
return NULL;
}
void *
nxt_mp_zalign(nxt_mp_t *mp, size_t alignment, size_t size)
{
void *p;
p = nxt_mp_align(mp, alignment, size);
if (nxt_fast_path(p != NULL)) {
memset(p, 0, size);
}
return p;
}
#if !(NXT_DEBUG_MEMORY)
nxt_inline u_char *
nxt_mp_page_addr(nxt_mp_t *mp, nxt_mp_page_t *page)
{
size_t page_offset;
nxt_mp_block_t *block;
page_offset = page->number * sizeof(nxt_mp_page_t)
+ offsetof(nxt_mp_block_t, pages);
block = (nxt_mp_block_t *) ((u_char *) page - page_offset);
return block->start + (page->number << mp->page_size_shift);
}
nxt_inline nxt_uint_t
nxt_mp_chunk_pages_index(nxt_mp_t *mp, size_t size)
{
nxt_int_t n, index;
index = 0;
if (size > 1) {
n = nxt_lg2(size - 1) + 1 - mp->chunk_size_shift;
if (n > 0) {
index = n;
}
}
return index;
}
static void *
nxt_mp_alloc_small(nxt_mp_t *mp, size_t size)
{
u_char *p;
nxt_uint_t n, index;
nxt_queue_t *chunk_pages;
nxt_mp_page_t *page;
nxt_queue_link_t *link;
p = NULL;
if (size <= mp->page_size / 2) {
index = nxt_mp_chunk_pages_index(mp, size);
chunk_pages = &mp->chunk_pages[index];
if (nxt_fast_path(!nxt_queue_is_empty(chunk_pages))) {
link = nxt_queue_first(chunk_pages);
page = nxt_queue_link_data(link, nxt_mp_page_t, link);
p = nxt_mp_page_addr(mp, page);
n = nxt_mp_chunk_get_free(page->u.map);
nxt_mp_chunk_set_busy(page->u.map, n);
p += ((n << index) << mp->chunk_size_shift);
page->chunks--;
if (page->chunks == 0) {
/*
* Remove full page from the pool chunk pages list
* of pages with free chunks.
*/
nxt_queue_remove(&page->link);
}
} else {
page = nxt_mp_alloc_page(mp);
if (nxt_fast_path(page != NULL)) {
page->size = (1 << index);
n = mp->page_size_shift - (index + mp->chunk_size_shift);
page->chunks = (1 << n) - 1;
nxt_queue_insert_head(chunk_pages, &page->link);
/* Mark the first chunk as busy. */
page->u.map = 0xFFFFFFFE;
p = nxt_mp_page_addr(mp, page);
}
}
} else {
page = nxt_mp_alloc_page(mp);
if (nxt_fast_path(page != NULL)) {
page->size = mp->page_size >> mp->chunk_size_shift;
p = nxt_mp_page_addr(mp, page);
}
}
nxt_debug_alloc("mp chunk:%uz alloc: %p",
page->size << mp->chunk_size_shift, p);
return p;
}
static nxt_mp_page_t *
nxt_mp_alloc_page(nxt_mp_t *mp)
{
nxt_mp_page_t *page;
nxt_mp_block_t *cluster;
nxt_queue_link_t *link;
if (nxt_queue_is_empty(&mp->free_pages)) {
cluster = nxt_mp_alloc_cluster(mp);
if (nxt_slow_path(cluster == NULL)) {
return NULL;
}
}
link = nxt_queue_first(&mp->free_pages);
nxt_queue_remove(link);
page = nxt_queue_link_data(link, nxt_mp_page_t, link);
return page;
}
static nxt_mp_block_t *
nxt_mp_alloc_cluster(nxt_mp_t *mp)
{
nxt_uint_t n;
nxt_mp_block_t *cluster;
n = mp->cluster_size >> mp->page_size_shift;
cluster = nxt_zalloc(sizeof(nxt_mp_block_t) + n * sizeof(nxt_mp_page_t));
if (nxt_slow_path(cluster == NULL)) {
return NULL;
}
/* NXT_MP_CLUSTER_BLOCK type is zero. */
cluster->size = mp->cluster_size;
cluster->start = nxt_memalign(mp->page_alignment, mp->cluster_size);
if (nxt_slow_path(cluster->start == NULL)) {
nxt_free(cluster);
return NULL;
}
n--;
cluster->pages[n].number = n;
nxt_queue_insert_head(&mp->free_pages, &cluster->pages[n].link);
while (n != 0) {
n--;
cluster->pages[n].number = n;
nxt_queue_insert_before(&cluster->pages[n + 1].link,
&cluster->pages[n].link);
}
nxt_rbtree_insert(&mp->blocks, &cluster->node);
return cluster;
}
#endif
static void *
nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size)
{
u_char *p;
size_t aligned_size;
uint8_t type;
nxt_mp_block_t *block;
/* Allocation must be less than 4G. */
if (nxt_slow_path(size >= 0xFFFFFFFF)) {
return NULL;
}
if (nxt_is_power_of_two(size)) {
block = nxt_malloc(sizeof(nxt_mp_block_t));
if (nxt_slow_path(block == NULL)) {
return NULL;
}
p = nxt_memalign(alignment, size);
if (nxt_slow_path(p == NULL)) {
nxt_free(block);
return NULL;
}
type = NXT_MP_DISCRETE_BLOCK;
} else {
aligned_size = nxt_align_size(size, sizeof(uintptr_t));
p = nxt_memalign(alignment, aligned_size + sizeof(nxt_mp_block_t));
if (nxt_slow_path(p == NULL)) {
return NULL;
}
block = (nxt_mp_block_t *) (p + aligned_size);
type = NXT_MP_EMBEDDED_BLOCK;
}
block->type = type;
block->size = size;
block->start = p;
nxt_rbtree_insert(&mp->blocks, &block->node);
return p;
}
static intptr_t
nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1, nxt_rbtree_node_t *node2)
{
nxt_mp_block_t *block1, *block2;
block1 = (nxt_mp_block_t *) node1;
block2 = (nxt_mp_block_t *) node2;
return (uintptr_t) block1->start - (uintptr_t) block2->start;
}
void
nxt_mp_free(nxt_mp_t *mp, void *p)
{
const char *err;
nxt_thread_t *thread;
nxt_mp_block_t *block;
nxt_debug_alloc("mp free %p", p);
block = nxt_mp_find_block(&mp->blocks, p);
if (nxt_fast_path(block != NULL)) {
if (block->type == NXT_MP_CLUSTER_BLOCK) {
err = nxt_mp_chunk_free(mp, block, p);
if (nxt_fast_path(err == NULL)) {
return;
}
} else if (nxt_fast_path(p == block->start)) {
nxt_rbtree_delete(&mp->blocks, &block->node);
if (block->type == NXT_MP_DISCRETE_BLOCK) {
nxt_free(block);
}
nxt_free(p);
return;
} else {
err = "freed pointer points to middle of block: %p";
}
} else {
err = "freed pointer is out of pool: %p";
}
thread = nxt_thread();
nxt_log(thread->task, NXT_LOG_CRIT, err, p);
}
static nxt_mp_block_t *
nxt_mp_find_block(nxt_rbtree_t *tree, u_char *p)
{
nxt_mp_block_t *block;
nxt_rbtree_node_t *node, *sentinel;
node = nxt_rbtree_root(tree);
sentinel = nxt_rbtree_sentinel(tree);
while (node != sentinel) {
block = (nxt_mp_block_t *) node;
if (p < block->start) {
node = node->left;
} else if (p >= block->start + block->size) {
node = node->right;
} else {
return block;
}
}
return NULL;
}
static const char *
nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster, u_char *p)
{
u_char *start;
uintptr_t offset;
nxt_uint_t n, size, chunk;
nxt_queue_t *chunk_pages;
nxt_mp_page_t *page;
n = (p - cluster->start) >> mp->page_size_shift;
start = cluster->start + (n << mp->page_size_shift);
page = &cluster->pages[n];
if (nxt_slow_path(page->size == 0)) {
return "freed pointer points to already free page: %p";
}
if (nxt_slow_path(page->size == 0xFF)) {
return "freed pointer points to non-freeble page: %p";
}
size = page->size << mp->chunk_size_shift;
if (size != mp->page_size) {
offset = (uintptr_t) (p - start) & (mp->page_size - 1);
chunk = offset / size;
if (nxt_slow_path(offset != chunk * size)) {
return "freed pointer points to wrong chunk: %p";
}
if (nxt_slow_path(nxt_mp_chunk_is_free(page->u.map, chunk))) {
return "freed pointer points to already free chunk: %p";
}
nxt_mp_chunk_set_free(page->u.map, chunk);
if (page->u.map != 0xFFFFFFFF) {
page->chunks++;
if (page->chunks == 1) {
/*
* Add the page to the head of pool chunk pages list
* of pages with free chunks.
*/
n = nxt_mp_chunk_pages_index(mp, size);
chunk_pages = &mp->chunk_pages[n];
nxt_queue_insert_head(chunk_pages, &page->link);
}
nxt_mp_free_junk(p, size);
return NULL;
} else {
/*
* All chunks are free, remove the page from pool
* chunk pages list of pages with free chunks.
*/
nxt_queue_remove(&page->link);
}
} else if (nxt_slow_path(p != start)) {
return "invalid pointer to chunk: %p";
}
/* Add the free page to the pool's free pages tree. */
page->size = 0;
nxt_queue_insert_head(&mp->free_pages, &page->link);
nxt_mp_free_junk(p, size);
/* Test if all pages in the cluster are free. */
n = mp->cluster_size >> mp->page_size_shift;
page = cluster->pages;
do {
if (page->size != 0) {
return NULL;
}
page++;
n--;
} while (n != 0);
/* Free cluster. */
n = mp->cluster_size >> mp->page_size_shift;
page = cluster->pages;
do {
nxt_queue_remove(&page->link);
page++;
n--;
} while (n != 0);
nxt_rbtree_delete(&mp->blocks, &cluster->node);
p = cluster->start;
nxt_free(cluster);
nxt_free(p);
return NULL;
}
void *
nxt_mp_retain(nxt_mp_t *mp, size_t size)
{
void *p;
p = nxt_mp_alloc(mp, size);
if (nxt_fast_path(p != NULL)) {
mp->retain++;
nxt_debug_alloc("mp retain: %uD", mp->retain);
}
return p;
}
void
nxt_mp_release(nxt_mp_t *mp, void *p)
{
nxt_mp_free(mp, p);
mp->retain--;
nxt_debug_alloc("mp release: %uD", mp->retain);
if (mp->retain == 0) {
nxt_mp_destroy(mp);
}
}
void *
nxt_mp_nget(nxt_mp_t *mp, size_t size)
{
nxt_debug_alloc("mp nget: %uz", size);
#if !(NXT_DEBUG_MEMORY)
if (size <= mp->page_size) {
return nxt_mp_get_small(mp, &mp->nget_pages, size);
}
#endif
return nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
}
void *
nxt_mp_get(nxt_mp_t *mp, size_t size)
{
nxt_debug_alloc("mp get: %uz", size);
#if !(NXT_DEBUG_MEMORY)
if (size <= mp->page_size) {
size = nxt_max(size, NXT_MAX_ALIGNMENT);
return nxt_mp_get_small(mp, &mp->get_pages, size);
}
#endif
return nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
}
void *
nxt_mp_zget(nxt_mp_t *mp, size_t size)
{
void *p;
p = nxt_mp_get(mp, size);
if (nxt_fast_path(p != NULL)) {
memset(p, 0, size);
}
return p;
}
static void *
nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size)
{
u_char *p;
uint32_t available;
nxt_mp_page_t *page;
nxt_queue_link_t *link, *next;
for (link = nxt_queue_first(pages);
link != nxt_queue_tail(pages);
link = next)
{
next = nxt_queue_next(link);
page = nxt_queue_link_data(link, nxt_mp_page_t, link);
available = mp->page_size - page->u.taken;
if (size <= available) {
goto found;
}
if (available == 0 || page->fails++ > 100) {
nxt_queue_remove(link);
}
}
page = nxt_mp_alloc_page(mp);
if (nxt_slow_path(page == NULL)) {
return page;
}
nxt_queue_insert_head(pages, &page->link);
page->size = 0xFF;
found:
p = nxt_mp_page_addr(mp, page);
p += page->u.taken;
page->u.taken += size;
nxt_debug_alloc("mp get: %p", p);
return p;
}

112
src/nxt_mp.h Normal file
View File

@@ -0,0 +1,112 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_MP_H_INCLUDED_
#define _NXT_MP_H_INCLUDED_
/*
* Memory pool keeps track of all allocations so they can be freed at once
* on pool destruction. A memory pool is not thread safe, so only one thread
* must work with the pool. If an allocation should be passed to another
* thread, it should be allocated with nxt_mp_retain() and then should be
* freed with nxt_mp_release(). These functions updates pool retention
* counter. Memory pools decrease number of malloc() and free() calls and
* thus reduces thread contention on locks in malloc library. Memory pools
* allow to make both freeable and non-freeable allocations. The freeable
* memory is allocated in fixed size chunks to decrease memory fragmentaiton
* on reallocations. The non-freeable memory is intended to allocate
* structures and other items which should be available until memory pool
* destruction. Due to allocation strategy described in nxt_mp.c memory pools
* may also improve data cache locality.
*/
typedef struct nxt_mp_s nxt_mp_t;
/*
* nxt_mp_create() creates a memory pool and sets the pool's retention
* counter to 1.
*/
NXT_EXPORT nxt_mp_t *nxt_mp_create(size_t cluster_size, size_t page_alignment,
size_t page_size, size_t min_chunk_size)
NXT_MALLOC_LIKE;
/*
* nxt_mp_destroy() destroys memory pool in spite of the pool's retention
* counter.
*/
NXT_EXPORT void nxt_mp_destroy(nxt_mp_t *mp);
/* nxt_mp_test_sizes() tests validity of memory pool parameters. */
NXT_EXPORT nxt_bool_t nxt_mp_test_sizes(size_t cluster_size,
size_t page_alignment, size_t page_size, size_t min_chunk_size);
/* nxt_mp_is_empty() tests that pool is empty. */
NXT_EXPORT nxt_bool_t nxt_mp_is_empty(nxt_mp_t *mp);
/*
* nxt_mp_alloc() returns aligned freeable memory.
* The alignment is sutiable to allocate structures.
*/
NXT_EXPORT void *nxt_mp_alloc(nxt_mp_t *mp, size_t size)
NXT_MALLOC_LIKE;
/*
* nxt_mp_zalloc() returns zeroed aligned freeable memory.
* The alignment is sutiable to allocate structures.
*/
NXT_EXPORT void *nxt_mp_zalloc(nxt_mp_t *mp, size_t size)
NXT_MALLOC_LIKE;
/* nxt_mp_align() returns aligned freeable memory. */
NXT_EXPORT void *nxt_mp_align(nxt_mp_t *mp, size_t alignment, size_t size)
NXT_MALLOC_LIKE;
/* nxt_mp_zalign() returns zeroed aligned freeable memory. */
NXT_EXPORT void *nxt_mp_zalign(nxt_mp_t *mp, size_t alignment, size_t size)
NXT_MALLOC_LIKE;
/* nxt_mp_free() frees freeable memory. */
NXT_EXPORT void nxt_mp_free(nxt_mp_t *mp, void *p);
/*
* nxt_mp_retain() returns aligned freeable memory and increases memory
* pool retention counter.
*/
NXT_EXPORT void *nxt_mp_retain(nxt_mp_t *mp, size_t size)
NXT_MALLOC_LIKE;
/*
* nxt_mp_release() returns freeable memory and decreases memory pool
* retention counter. If the counter becomes zero the pool is destroyed.
*/
NXT_EXPORT void nxt_mp_release(nxt_mp_t *mp, void *p);
/* nxt_mp_nget() returns non-aligned non-freeable memory. */
NXT_EXPORT void *nxt_mp_nget(nxt_mp_t *mp, size_t size)
NXT_MALLOC_LIKE;
/*
* nxt_mp_get() returns aligned non-freeable memory.
* The alignment is sutiable to allocate structures.
*/
NXT_EXPORT void *nxt_mp_get(nxt_mp_t *mp, size_t size)
NXT_MALLOC_LIKE;
/*
* nxt_mp_zget() returns zeroed aligned non-freeable memory.
* The alignment is sutiable to allocate structures.
*/
NXT_EXPORT void *nxt_mp_zget(nxt_mp_t *mp, size_t size)
NXT_MALLOC_LIKE;
#endif /* _NXT_MP_H_INCLUDED_ */

View File

@@ -668,9 +668,10 @@ nxt_router_thread_start(void *data)
engine->task.thread = thread; engine->task.thread = thread;
engine->task.log = thread->log; engine->task.log = thread->log;
thread->engine = engine; thread->engine = engine;
thread->task = &engine->task;
thread->fiber = &engine->fibers->fiber; thread->fiber = &engine->fibers->fiber;
engine->mem_pool = nxt_mem_cache_pool_create(4096, 1024, 1024, 64); engine->mem_pool = nxt_mp_create(4096, 128, 1024, 64);
nxt_event_engine_start(engine); nxt_event_engine_start(engine);
} }
@@ -870,7 +871,7 @@ nxt_router_thread_exit_handler(nxt_task_t *task, void *obj, void *data)
nxt_queue_remove(&engine->link); nxt_queue_remove(&engine->link);
nxt_mem_cache_pool_destroy(engine->mem_pool); nxt_mp_destroy(engine->mem_pool);
nxt_event_engine_free(engine); nxt_event_engine_free(engine);

View File

@@ -168,6 +168,8 @@ struct nxt_thread_s {
nxt_log_t *log; nxt_log_t *log;
nxt_log_t main_log; nxt_log_t main_log;
nxt_task_t *task;
nxt_tid_t tid; nxt_tid_t tid;
nxt_thread_handle_t handle; nxt_thread_handle_t handle;
#if (NXT_THREADS) #if (NXT_THREADS)

View File

@@ -24,6 +24,7 @@ nxt_msec_less(nxt_msec_t first, nxt_msec_t second)
int nxt_cdecl int nxt_cdecl
main(int argc, char **argv) main(int argc, char **argv)
{ {
nxt_task_t task;
nxt_thread_t *thr; nxt_thread_t *thr;
if (nxt_lib_start("lib_unit_test", argv, &environ) != NXT_OK) { if (nxt_lib_start("lib_unit_test", argv, &environ) != NXT_OK) {
@@ -31,8 +32,10 @@ main(int argc, char **argv)
} }
nxt_main_log.level = NXT_LOG_INFO; nxt_main_log.level = NXT_LOG_INFO;
task.log = &nxt_main_log;
thr = nxt_thread(); thr = nxt_thread();
thr->task = &task;
#if (NXT_UNIT_TEST_RTDTSC) #if (NXT_UNIT_TEST_RTDTSC)
@@ -96,15 +99,15 @@ main(int argc, char **argv)
return 1; return 1;
} }
if (nxt_mem_cache_pool_unit_test(thr, 100, 40000, 128 - 1) != NXT_OK) { if (nxt_mp_unit_test(thr, 100, 40000, 128 - 1) != NXT_OK) {
return 1; return 1;
} }
if (nxt_mem_cache_pool_unit_test(thr, 100, 1000, 4096 - 1) != NXT_OK) { if (nxt_mp_unit_test(thr, 100, 1000, 4096 - 1) != NXT_OK) {
return 1; return 1;
} }
if (nxt_mem_cache_pool_unit_test(thr, 1000, 100, 64 * 1024 - 1) != NXT_OK) { if (nxt_mp_unit_test(thr, 1000, 100, 64 * 1024 - 1) != NXT_OK) {
return 1; return 1;
} }

View File

@@ -54,7 +54,7 @@ void nxt_rbtree1_mb_delete(nxt_thread_t *thr);
#endif #endif
nxt_int_t nxt_mem_cache_pool_unit_test(nxt_thread_t *thr, nxt_uint_t runs, nxt_int_t nxt_mp_unit_test(nxt_thread_t *thr, nxt_uint_t runs,
nxt_uint_t nblocks, size_t max_size); nxt_uint_t nblocks, size_t max_size);
nxt_int_t nxt_mem_zone_unit_test(nxt_thread_t *thr, nxt_uint_t runs, nxt_int_t nxt_mem_zone_unit_test(nxt_thread_t *thr, nxt_uint_t runs,
nxt_uint_t nblocks, size_t max_size); nxt_uint_t nblocks, size_t max_size);

View File

@@ -21,14 +21,14 @@ nxt_lvlhsh_unit_test_key_test(nxt_lvlhsh_query_t *lhq, void *data)
static void * static void *
nxt_lvlhsh_unit_test_pool_alloc(void *pool, size_t size, nxt_uint_t nalloc) nxt_lvlhsh_unit_test_pool_alloc(void *pool, size_t size, nxt_uint_t nalloc)
{ {
return nxt_mem_cache_align(pool, size, size); return nxt_mp_align(pool, size, size);
} }
static void static void
nxt_lvlhsh_unit_test_pool_free(void *pool, void *p, size_t size) nxt_lvlhsh_unit_test_pool_free(void *pool, void *p, size_t size)
{ {
nxt_mem_cache_free(pool, p); nxt_mp_free(pool, p);
} }
@@ -132,11 +132,11 @@ nxt_int_t
nxt_lvlhsh_unit_test(nxt_thread_t *thr, nxt_uint_t n, nxt_bool_t use_pool) nxt_lvlhsh_unit_test(nxt_thread_t *thr, nxt_uint_t n, nxt_bool_t use_pool)
{ {
uintptr_t key; uintptr_t key;
nxt_mp_t *mp;
nxt_nsec_t start, end; nxt_nsec_t start, end;
nxt_uint_t i; nxt_uint_t i;
nxt_lvlhsh_t lh; nxt_lvlhsh_t lh;
nxt_lvlhsh_each_t lhe; nxt_lvlhsh_each_t lhe;
nxt_mem_cache_pool_t *pool;
const nxt_lvlhsh_proto_t *proto; const nxt_lvlhsh_proto_t *proto;
const size_t min_chunk_size = 32; const size_t min_chunk_size = 32;
@@ -148,9 +148,9 @@ nxt_lvlhsh_unit_test(nxt_thread_t *thr, nxt_uint_t n, nxt_bool_t use_pool)
start = nxt_thread_monotonic_time(thr); start = nxt_thread_monotonic_time(thr);
if (use_pool) { if (use_pool) {
pool = nxt_mem_cache_pool_create(cluster_size, page_alignment, mp = nxt_mp_create(cluster_size, page_alignment, page_size,
page_size, min_chunk_size); min_chunk_size);
if (pool == NULL) { if (mp == NULL) {
return NXT_ERROR; return NXT_ERROR;
} }
@@ -162,7 +162,7 @@ nxt_lvlhsh_unit_test(nxt_thread_t *thr, nxt_uint_t n, nxt_bool_t use_pool)
nxt_log_error(NXT_LOG_NOTICE, thr->log, nxt_log_error(NXT_LOG_NOTICE, thr->log,
"lvlhsh unit test started: %uD malloc", n); "lvlhsh unit test started: %uD malloc", n);
proto = &malloc_proto; proto = &malloc_proto;
pool = NULL; mp = NULL;
} }
nxt_memzero(&lh, sizeof(nxt_lvlhsh_t)); nxt_memzero(&lh, sizeof(nxt_lvlhsh_t));
@@ -171,7 +171,7 @@ nxt_lvlhsh_unit_test(nxt_thread_t *thr, nxt_uint_t n, nxt_bool_t use_pool)
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
key = nxt_murmur_hash2(&key, sizeof(uint32_t)); key = nxt_murmur_hash2(&key, sizeof(uint32_t));
if (nxt_lvlhsh_unit_test_add(&lh, proto, pool, key) != NXT_OK) { if (nxt_lvlhsh_unit_test_add(&lh, proto, mp, key) != NXT_OK) {
nxt_log_error(NXT_LOG_NOTICE, thr->log, nxt_log_error(NXT_LOG_NOTICE, thr->log,
"lvlhsh add unit test failed at %ui", i); "lvlhsh add unit test failed at %ui", i);
return NXT_ERROR; return NXT_ERROR;
@@ -206,19 +206,18 @@ nxt_lvlhsh_unit_test(nxt_thread_t *thr, nxt_uint_t n, nxt_bool_t use_pool)
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
key = nxt_murmur_hash2(&key, sizeof(uint32_t)); key = nxt_murmur_hash2(&key, sizeof(uint32_t));
if (nxt_lvlhsh_unit_test_delete(&lh, proto, pool, key) != NXT_OK) { if (nxt_lvlhsh_unit_test_delete(&lh, proto, mp, key) != NXT_OK) {
return NXT_ERROR; return NXT_ERROR;
} }
} }
if (pool != NULL) { if (mp != NULL) {
if (!nxt_mem_cache_pool_is_empty(pool)) { if (!nxt_mp_is_empty(mp)) {
nxt_log_error(NXT_LOG_NOTICE, thr->log, nxt_log_error(NXT_LOG_NOTICE, thr->log, "mem pool is not empty");
"mem cache pool is not empty");
return NXT_ERROR; return NXT_ERROR;
} }
nxt_mem_cache_pool_destroy(pool); nxt_mp_destroy(mp);
} }
nxt_thread_time_update(thr); nxt_thread_time_update(thr);

View File

@@ -1,83 +0,0 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
nxt_int_t
nxt_mem_cache_pool_unit_test(nxt_thread_t *thr, nxt_uint_t runs,
nxt_uint_t nblocks, size_t max_size)
{
void **blocks;
size_t total;
uint32_t value, size;
nxt_uint_t i, n;
nxt_mem_cache_pool_t *pool;
const size_t min_chunk_size = 16;
const size_t page_size = 128;
const size_t page_alignment = 128;
const size_t cluster_size = page_size * 8;
nxt_thread_time_update(thr);
nxt_log_error(NXT_LOG_NOTICE, thr->log,
"mem cache pool unit test started, max:%uz", max_size);
blocks = nxt_malloc(nblocks * sizeof(void *));
if (blocks == NULL) {
return NXT_ERROR;
}
pool = nxt_mem_cache_pool_create(cluster_size, page_alignment,
page_size, min_chunk_size);
if (pool == NULL) {
return NXT_ERROR;
}
value = 0;
for (i = 0; i < runs; i++) {
total = 0;
for (n = 0; n < nblocks; n++) {
value = nxt_murmur_hash2(&value, sizeof(uint32_t));
size = value & max_size;
if (size == 0) {
size++;
}
total += size;
blocks[n] = nxt_mem_cache_alloc(pool, size);
if (blocks[n] == NULL) {
nxt_log_error(NXT_LOG_NOTICE, thr->log,
"mem cache pool unit test failed: %uz", total);
return NXT_ERROR;
}
}
for (n = 0; n < nblocks; n++) {
nxt_mem_cache_free(pool, blocks[n]);
}
}
if (!nxt_mem_cache_pool_is_empty(pool)) {
nxt_log_error(NXT_LOG_NOTICE, thr->log, "mem cache pool is not empty");
return NXT_ERROR;
}
nxt_mem_cache_pool_destroy(pool);
nxt_free(blocks);
nxt_thread_time_update(thr);
nxt_log_error(NXT_LOG_NOTICE, thr->log, "mem cache pool unit test passed");
return NXT_OK;
}

89
test/nxt_mp_unit_test.c Normal file
View File

@@ -0,0 +1,89 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
nxt_int_t
nxt_mp_unit_test(nxt_thread_t *thr, nxt_uint_t runs, nxt_uint_t nblocks,
size_t max_size)
{
void **blocks;
size_t total;
uint32_t value, size;
nxt_mp_t *mp;
nxt_bool_t valid;
nxt_uint_t i, n;
const size_t min_chunk_size = 16;
const size_t page_size = 128;
const size_t page_alignment = 128;
const size_t cluster_size = page_size * 8;
nxt_thread_time_update(thr);
nxt_log_error(NXT_LOG_NOTICE, thr->log,
"mem pool unit test started, max:%uz", max_size);
blocks = nxt_malloc(nblocks * sizeof(void *));
if (blocks == NULL) {
return NXT_ERROR;
}
valid = nxt_mp_test_sizes(cluster_size, page_alignment, page_size,
min_chunk_size);
if (!valid) {
return NXT_ERROR;
}
mp = nxt_mp_create(cluster_size, page_alignment, page_size, min_chunk_size);
if (mp == NULL) {
return NXT_ERROR;
}
value = 0;
for (i = 0; i < runs; i++) {
total = 0;
for (n = 0; n < nblocks; n++) {
value = nxt_murmur_hash2(&value, sizeof(uint32_t));
size = value & max_size;
if (size == 0) {
size++;
}
total += size;
blocks[n] = nxt_mp_alloc(mp, size);
if (blocks[n] == NULL) {
nxt_log_error(NXT_LOG_NOTICE, thr->log,
"mem pool unit test failed: %uz", total);
return NXT_ERROR;
}
}
for (n = 0; n < nblocks; n++) {
nxt_mp_free(mp, blocks[n]);
}
}
if (!nxt_mp_is_empty(mp)) {
nxt_log_error(NXT_LOG_NOTICE, thr->log, "mem pool is not empty");
return NXT_ERROR;
}
nxt_mp_destroy(mp);
nxt_free(blocks);
nxt_thread_time_update(thr);
nxt_log_error(NXT_LOG_NOTICE, thr->log, "mem pool unit test passed");
return NXT_OK;
}