nginx自身实现了内存池,所有内存分配都是基于内存池来操作。基本思想是预申请一段内存空间,低于指定大小的内存(小段内存)直接从内存池中申请,超过指定大小的内存(大段内存)直接调用malloc申请。相关代码在os/unix/ngx_alloc.{c,h}和core/ngx_palloc.{c,h}。
os/unix/ngx_alloc.{c,h}文件封装了内存分配的系统调用,其中:
core/ngx_palloc.{c,h}包含了nginx内存池实现的主要代码,先看看内存池的相关数据结构:
// 内存池数据区
typedef struct {
u_char *last; // 内存池可用空间起始地址
u_char *end; // 内存池末端地址
ngx_pool_t *next;
ngx_uint_t failed; // 尝试分配失败次数
} ngx_pool_data_t;
typedef struct ngx_pool_large_s ngx_pool_large_t;
struct ngx_pool_large_s {
ngx_pool_large_t *next;
void *alloc; // 大段内存空间,调用malloc申请
};
struct ngx_pool_s {
ngx_pool_data_t d;
size_t max; // 内存池分配空间(不超过page_size-1)
ngx_pool_t *current; // 当前内存池
ngx_chain_t *chain;
ngx_pool_large_t *large; // 大段内存链表
ngx_pool_cleanup_t *cleanup;
ngx_log_t *log;
};
转换成为数据结构图可能更加清晰一些:
注意图中current指针指向的并不是所在的内存池,实际上current指针的指向是会变化的。
依次查看ngx_palloc.c文件中的代码:
ngx_pool_t *p;
// 申请NGX_POLL_ALIGNMENT对齐的内存空间
p = ngx_memalign(NGX_POOL_ALIGNMENT, size, log);
// 实际可分配的空间为size-sizeof(ngx_poll_t)
p->d.last = (u_char *) p + sizeof(ngx_pool_t);
p->d.end = (u_char *) p + size;
size = size - sizeof(ngx_pool_t);
p->max = (size < NGX_MAX_ALLOC_FROM_POOL) ? size : NGX_MAX_ALLOC_FROM_POOL;
...
typedef void (*ngx_pool_cleanup_pt)(void *data);
typedef struct ngx_pool_cleanup_s ngx_pool_cleanup_t;
struct ngx_pool_cleanup_s {
ngx_pool_cleanup_pt handler;
void *data;
ngx_pool_cleanup_t *next;
};
void *
ngx_palloc(ngx_pool_t *pool, size_t size)
{
u_char *m;
ngx_pool_t *p;
// 申请大小不大于内存池空间大小
if (size <= pool->max) {
p = pool->current;
do {
// 计算last地址对齐后的地址
m = ngx_align_ptr(p->d.last, NGX_ALIGNMENT);
// 可用空间超过size则直接返回
if ((size_t) (p->d.end - m) >= size) {
p->d.last = m + size;
return m;
}
p = p->d.next;
} while (p);
// 重新申请内存池
return ngx_palloc_block(pool, size);
}
// 申请大段内存
return ngx_palloc_large(pool, size);
}
do {
// 唯一与ngx_palloc不同的地方
m = p->d.last;
if ((size_t) (p->d.end - m) >= size) {
p->d.last = m + size;
return m;
}
p = p->d.next;
} while (p);
static void *
ngx_palloc_block(ngx_pool_t *pool, size_t size)
{
u_char *m;
size_t psize;
ngx_pool_t *p, *new, *current;
// 计算当前内存池分配大小
psize = (size_t) (pool->d.end - (u_char *) pool);
// 申请内存地址对齐的空间
m = ngx_memalign(NGX_POOL_ALIGNMENT, psize, pool->log);
if (m == NULL) {
return NULL;
}
// 初始化工作,与ngx_create_poll中类似
new = (ngx_pool_t *) m;
new->d.end = m + psize;
new->d.next = NULL;
new->d.failed = 0;
m += sizeof(ngx_pool_data_t);
m = ngx_align_ptr(m, NGX_ALIGNMENT);
new->d.last = m + size;
// 重新设置current指针,将刚申请的内存池放到链表末尾
current = pool->current;
for (p = current; p->d.next; p = p->d.next) {
if (p->d.failed++ > 4) {
current = p->d.next;
}
}
p->d.next = new;
pool->current = current ? current : new;
return m;
}
static void *
ngx_palloc_large(ngx_pool_t *pool, size_t size)
{
void *p;
ngx_uint_t n;
ngx_pool_large_t *large;
// 调用malloc申请空间
p = ngx_alloc(size, pool->log);
if (p == NULL) {
return NULL;
}
n = 0;
// 尝试将申请的空间放入large链表中(仅尝试4次?)
for (large = pool->large; large; large = large->next) {
if (large->alloc == NULL) {
large->alloc = p;
return p;
}
if (n++ > 3) {
break;
}
}
// 申请ngx_pool_large_t数据结构,初始化alloc指向申请的大段内存,放到large链表头
large = ngx_palloc(pool, sizeof(ngx_pool_large_t));
if (large == NULL) {
ngx_free(p);
return NULL;
}
large->alloc = p;
large->next = pool->large;
pool->large = large;
return p;
}
void *
ngx_pmemalign(ngx_pool_t *pool, size_t size, size_t alignment)
{
void *p;
ngx_pool_large_t *large;
p = ngx_memalign(alignment, size, pool->log);
if (p == NULL) {
return NULL;
}
large = ngx_palloc(pool, sizeof(ngx_pool_large_t));
if (large == NULL) {
ngx_free(p);
return NULL;
}
large->alloc = p;
large->next = pool->large;
pool->large = large;
return p;
}
ngx_int_t
ngx_pfree(ngx_pool_t *pool, void *p)
{
ngx_pool_large_t *l;
// 遍历large链表
for (l = pool->large; l; l = l->next) {
if (p == l->alloc) {
ngx_log_debug1(NGX_LOG_DEBUG_ALLOC, pool->log, 0,
"free: %p", l->alloc);
ngx_free(l->alloc);
l->alloc = NULL;
return NGX_OK;
}
}
return NGX_DECLINED;
}
ngx_pool_cleanup_add、ngx_pool_run_cleanup_file、ngx_pool_cleanup_file和ngx_pool_delete_file函数暂时不对其进行解析。以上便是nginx内存池实现的全部,本代码基于nginx1.7.0,在CentOS6.0下完成编译。
原文地址:http://blog.csdn.net/cool_sti/article/details/45504149