标签:
早就应该写部分的内容了。。。。最近比较负能量。。。伤不起啊。。
上一篇说到了,在python的内存分配中两个非常重要的方法:PyObject_Malloc和PyObject_Free
在具体的来这两个方法之前,先要看看别的一些东西
//这里用usedpool构成了一个双向链表 //只用了两个指针就搞定了。。我擦。。。 //这里将保存的地址减去了两个指针的大小,那么根据pool结构体的定义,那么将地址加上两个指针,正好就是next,加上3个指针正好就是prev //比较的巧妙 #define PTA(x) ( (poolp ) ((uchar *)&(usedpools[2*(x)]) - 2*sizeof(block *)) ) #define PT(x) PTA(x), PTA(x) static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = { PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7) #if NB_SMALL_SIZE_CLASSES > 8 , PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15) #if NB_SMALL_SIZE_CLASSES > 16 , PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23) #if NB_SMALL_SIZE_CLASSES > 24 , PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31) #if NB_SMALL_SIZE_CLASSES > 32 , PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39) #if NB_SMALL_SIZE_CLASSES > 40 , PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47) #if NB_SMALL_SIZE_CLASSES > 48 , PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55) #if NB_SMALL_SIZE_CLASSES > 56 , PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63) #if NB_SMALL_SIZE_CLASSES > 64 #error "NB_SMALL_SIZE_CLASSES should be less than 64" #endif /* NB_SMALL_SIZE_CLASSES > 64 */ #endif /* NB_SMALL_SIZE_CLASSES > 56 */ #endif /* NB_SMALL_SIZE_CLASSES > 48 */ #endif /* NB_SMALL_SIZE_CLASSES > 40 */ #endif /* NB_SMALL_SIZE_CLASSES > 32 */ #endif /* NB_SMALL_SIZE_CLASSES > 24 */ #endif /* NB_SMALL_SIZE_CLASSES > 16 */ #endif /* NB_SMALL_SIZE_CLASSES > 8 */ };
好了,pool是在arena上面分配,但是并不是由他来管理,那么是由什么东西来管理的呢。。。?上面贴出来的代码定义了一个usedpools数组,嗯,就是用它来定义的。。。
而且,每种类型的pool都构成了一个双链表,这个数组就保存了这个双链表的头部。。。
呵呵,没有搞错吧,这个事怎么实现的呢。。?这个实现真的时够trick的,我也是看了很久才理解到,真是才疏学浅,以前还真没有见过这样子的实现。。。。
有前面的宏PTA和PT就可以知道,PT(0)其实是定义了两个指针,而且保存的值正好是第一个第一个指针的地址再减去两个指针的地址,嗯,再看看pool的定义:
struct pool_header { union { block *_padding; uint count; } ref; /* number of allocated blocks */ //当前pool上面分配的block的数量 block *freeblock; /* pool's free list head */ //指向下一个可用的block,这里构成了一个链表, 它是一个离散的链表,很有意思 struct pool_header *nextpool; /* next pool of this size class */ //通过这两个指针形成pool的双链表 struct pool_header *prevpool; /* previous pool "" */ uint arenaindex; /* index into arenas of base adr */ //在arena里面的索引 uint szidx; /* block size class index */ //分配内存的类别,8字节,16或者。。。 uint nextoffset; /* bytes to virgin block */ //下一个可用的block的内存偏移量 uint maxnextoffset; /* largest valid nextoffset */ //最后一个block距离开始位置的距离 }; typedef struct pool_header *poolp; //头部
嗯,看看加上两个指针大小是不是正好指向nextpool指针,加上三个指针大小是不是正好指向prevpool指针。。。这里的实现就不多说啦。。其实上面的注释也解释了一些东西。。。如果不能理解,那就自己再好好看。。这个过程一定是要自己经过努力去理解才好。。。。。
那么接下来来看看PyObject_Malloc的定义吧:
void * PyObject_Malloc(size_t nbytes) { block *bp; //将会用这个指针指向分配的内存地址 poolp pool; poolp next; uint size; #ifdef WITH_VALGRIND if (UNLIKELY(running_on_valgrind == -1)) running_on_valgrind = RUNNING_ON_VALGRIND; if (UNLIKELY(running_on_valgrind)) goto redirect; #endif /* * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes. * Most python internals blindly use a signed Py_ssize_t to track * things without checking for overflows or negatives. * As size_t is unsigned, checking for nbytes < 0 is not required. */ if (nbytes > PY_SSIZE_T_MAX) //这个难道 都会出现。。? return NULL; /* * This implicitly redirects malloc(0). */ if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) { //如果这里可以用小地址分配模式 LOCK(); /* * Most frequent paths first */ //其实这里计算的时当前nbytes是8的几倍,这里从0开始计数 //例如8的话,就是0,9就是1,16为1,17为2,主要是用来确定当前这个内存应该在哪一个pool里面进行分配 //因为每种pool都是分配固定大小的内存的 size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT; //ALIGNMENT_SHIFT=3 pool = usedpools[size + size]; //获取当前类型pool的双链表的头部 if (pool != pool->nextpool) { //这里说明当前下面有可用的pool /* * There is a used pool for this size class. * Pick up the head block of its free list. */ ++pool->ref.count; //当前分配的block的数量+1 bp = pool->freeblock; //将bp指针指向当前的freeblock,也就是分配的内存就是这里了 assert(bp != NULL); //这里将freeblock的值赋值为原来指向的那块地址里面存的值,其实哪里存的是下一个可用的block的地址 //这里就可以理解为将链表指向下一个可用的block if ((pool->freeblock = *(block **)bp) != NULL) { //走到了这里,说明下面都还有可用的block,那么可以直接返回了 UNLOCK(); return (void *)bp; } //到这里说明了当前已经到了freeblock链表的尾部,那么尝试一下扩展当前的链表 if (pool->nextoffset <= pool->maxnextoffset) { /* There is room for another block. */ //当前pool还有内存可以分配,那么将freeblock指向下一个可用的地址 pool->freeblock = (block*)pool + pool->nextoffset; pool->nextoffset += INDEX2SIZE(size); // 更新nextoffset值 *(block **)(pool->freeblock) = NULL; // 将下一个可用地址的值设置为NULL,表示它是尾部 UNLOCK(); return (void *)bp; } /* Pool is full, unlink from used pools. */ //代码执行到了这里,说明整个pool都与已经用完了,那么将这个pool从usedpool里面移除出去 next = pool->nextpool; pool = pool->prevpool; next->prevpool = pool; pool->nextpool = next; UNLOCK(); return (void *)bp; } /* There isn't a pool of the right size class immediately * available: use a free pool. */ //走到这里,说明当前已经没有这种类型的pool可以分配内存了 //那么接下里创建一个pool if (usable_arenas == NULL) { //若果没有可用的arena,那么获取一个 /* No arena has a free pool: allocate a new arena. */ #ifdef WITH_MEMORY_LIMITS if (narenas_currently_allocated >= MAX_ARENAS) { UNLOCK(); goto redirect; } #endif usable_arenas = new_arena(); if (usable_arenas == NULL) { UNLOCK(); goto redirect; } usable_arenas->nextarena = usable_arenas->prevarena = NULL; } assert(usable_arenas->address != 0); /* Try to get a cached free pool. */ //从arena里面建立一个pool pool = usable_arenas->freepools; if (pool != NULL) { /* Unlink from cached pools. */ //将arena的freepool指向下一个 usable_arenas->freepools = pool->nextpool; /* This arena already had the smallest nfreepools * value, so decreasing nfreepools doesn't change * that, and we don't need to rearrange the * usable_arenas list. However, if the arena has * become wholly allocated, we need to remove its * arena_object from usable_arenas. */ --usable_arenas->nfreepools; //可分配的pool的数量减1 if (usable_arenas->nfreepools == 0) { //走到这里,说明这个arena已经将所有的内存都分配完了,那么将这个arena结构从usable_arenas移除 /* Wholly allocated: remove. */ assert(usable_arenas->freepools == NULL); assert(usable_arenas->nextarena == NULL || usable_arenas->nextarena->prevarena == usable_arenas); usable_arenas = usable_arenas->nextarena; if (usable_arenas != NULL) { usable_arenas->prevarena = NULL; assert(usable_arenas->address != 0); } } else { /* nfreepools > 0: it must be that freepools * isn't NULL, or that we haven't yet carved * off all the arena's pools for the first * time. */ assert(usable_arenas->freepools != NULL || usable_arenas->pool_address <= (block*)usable_arenas->address + ARENA_SIZE - POOL_SIZE); } init_pool: /* Frontlink to used pools. */ //初始化当前创建的pool,然后将其插入到其对应的usedpool保存的双链表上面去 //获取当前大小pool的双向链表的头指针,更新当前的双链表 next = usedpools[size + size]; /* == prev */ pool->nextpool = next; pool->prevpool = next; next->nextpool = pool; next->prevpool = pool; pool->ref.count = 1; //这里将分配的block的数量设置为1 if (pool->szidx == size) { //这里说明这个pool以前就是按当前大小分配过,就直接接着分配内存就好了 /* Luckily, this pool last contained blocks * of the same size class, so its header * and free list are already initialized. */ bp = pool->freeblock; pool->freeblock = *(block **)bp; //下一个可用的block的地址 UNLOCK(); return (void *)bp; } /* * Initialize the pool header, set up the free list to * contain just the second block, and return the first * block. */ //走到这里,说明这个pool都才刚刚开始用,那么先进行一下头部的初始化 pool->szidx = size; size = INDEX2SIZE(size); //获取需要分配的内存大小 bp = (block *)pool + POOL_OVERHEAD; //这里需要跳过头部需要的内存,这里可以理解为头部也是保存在arena申请的内存里面 pool->nextoffset = POOL_OVERHEAD + (size << 1); pool->maxnextoffset = POOL_SIZE - size; //这里设置最后一个可用的block的偏移量 pool->freeblock = bp + size; //下一个可用的block的首地址 *(block **)(pool->freeblock) = NULL; //通过设置null,表示这个freeblock之后没有链接的可用block,记住,这里形成了一个离散的链表 UNLOCK(); return (void *)bp; } /* Carve off a new pool. */ //在arena里面没有可用的pool,一开始创建的arena是没有freepool的,因为都没有分配 //等到分配的freepool又完全空闲了之后,才会将其放到freepool链表里面 assert(usable_arenas->nfreepools > 0); assert(usable_arenas->freepools == NULL); pool = (poolp)usable_arenas->pool_address; //这里可以理解为分配一个pool assert((block*)pool <= (block*)usable_arenas->address + ARENA_SIZE - POOL_SIZE); pool->arenaindex = usable_arenas - arenas; //当前pool所属的arena是第几个 assert(&arenas[pool->arenaindex] == usable_arenas); pool->szidx = DUMMY_SIZE_IDX; //通过这个标志来表示当前这个pool其实还没有进行分配过,待会在这个pool上面进行数据分配的时候就知道先初始化一下头部了 usable_arenas->pool_address += POOL_SIZE; //将下一个可用的pool地址向下移动 --usable_arenas->nfreepools; //可分配pool的数量减一 //表示在当前arena里面已经没法分配新的pool了,所以这里更新usable_arenas指向下一个 if (usable_arenas->nfreepools == 0) { assert(usable_arenas->nextarena == NULL || usable_arenas->nextarena->prevarena == usable_arenas); /* Unlink the arena: it is completely allocated. */ usable_arenas = usable_arenas->nextarena; if (usable_arenas != NULL) { usable_arenas->prevarena = NULL; assert(usable_arenas->address != 0); } } //接下来初始化pool goto init_pool; } /* The small block allocator ends here. */ redirect: /* Redirect the original request to the underlying (libc) allocator. * We jump here on bigger requests, on error in the code above (as a * last chance to serve the request) or when the max memory limit * has been reached. */ if (nbytes == 0) nbytes = 1; return (void *)malloc(nbytes); }
基本上内容在上面代码的注释应该说的很清楚了吧。。。这里另外有一个很有意思的地方就是pool通过freeblock指针来实现了一个离散的单链表,释放的block将会放到这个单链表的头部。。。。至于这个单链表是怎么实现的。。。要看一下PyObject_Free的实现才好说。。。。
那么接下来又来看看它的实现吧:
void PyObject_Free(void *p) { poolp pool; //用这个指针指向当前要释放的内存所在的pool的地址 block *lastfree; poolp next, prev; uint size; #ifndef Py_USING_MEMORY_DEBUGGER uint arenaindex_temp; #endif if (p == NULL) /* free(NULL) has no effect */ return; #ifdef WITH_VALGRIND if (UNLIKELY(running_on_valgrind > 0)) goto redirect; #endif //通过这宏来找到当前这个block所属的pool的地址,因为在arena里面已经保证了pool的地址 //肯定是4KB的整数倍,所以通过寻找前面最近的4KB整数倍地址就好了 pool = POOL_ADDR(p); if (Py_ADDRESS_IN_RANGE(p, pool)) { /* We allocated this address. */ LOCK(); /* Link p to the start of the pool's freeblock list. Since * the pool had at least the p block outstanding, the pool * wasn't empty (so it's already in a usedpools[] list, or * was full and is in no list -- it's not in the freeblocks * list in any case). */ assert(pool->ref.count > 0); /* else it was empty */ //下面是构建freeblock链表的关键,将当要释放的内存,用它来保存当前的freelock,这样也就构成了一个链表 //也就是将当前释放的block放到可用blcok链表的头部 *(block **)p = lastfree = pool->freeblock; //这里将当前P指向上的地址赋值为freelock的地址,也就构成了离散的链表 pool->freeblock = (block *)p; //将freeblock指向当前释放的地址,相当于将当前可用block链表的头部指向这里 if (lastfree) { //走到这里,说明在回收这次内存之前,pool就已经有空闲的空间 //接下来要做的事情主要是处理一下当前pool空了的情况 struct arena_object* ao; uint nf; /* ao->nfreepools */ /* freeblock wasn't NULL, so the pool wasn't full, * and the pool is in a usedpools[] list. */ if (--pool->ref.count != 0) { //如果当前pool还是有分配出去的block,那么继续将其放在usedpools链表里面 /* pool isn't empty: leave it in usedpools */ UNLOCK(); return; } /* Pool is now empty: unlink from usedpools, and * link to the front of freepools. This ensures that * previously freed pools will be allocated later * (being not referenced, they are perhaps paged out). */ //走到这里,说明当前这个pool已经是空的了,那么将这个pool从其所在的usedpool的双链表里面移除 next = pool->nextpool; prev = pool->prevpool; next->prevpool = prev; prev->nextpool = next; /* Link the pool to freepools. This is a singly-linked * list, and pool->prevpool isn't used there. */ //接下来将这个pool的空间还给其所在的arena,这里将当前的pool放到arena的可用pool链表的头部 ao = &arenas[pool->arenaindex]; pool->nextpool = ao->freepools; ao->freepools = pool; nf = ++ao->nfreepools; // 将当前arena的可分配的pool的数量+1 /* All the rest is arena management. We just freed * a pool, and there are 4 cases for arena mgmt: * 1. If all the pools are free, return the arena to * the system free(). * 2. If this is the only free pool in the arena, * add the arena back to the `usable_arenas` list. * 3. If the "next" arena has a smaller count of free * pools, we have to "slide this arena right" to * restore that usable_arenas is sorted in order of * nfreepools. * 4. Else there's nothing more to do. */ /** (1)如果当前arena所有的pool都已经空了,那么将为这个arena分配的256KB释放掉 (2)如果当前这个释放的pool是当前arena唯一的空闲的pool,那么将这个arena放到usable_arenas链表的头部 (3) 如果后面的arena可用的pool数目比当前arena还要少,那么将他们放到这个arena的头部,也就是修改链表,按可用pool数目的升序排序 **/ if (nf == ao->ntotalpools) { //这里说明当前arena已经没有分配任何pool了 //那么接下来要做的事情是将其移除 /* Case 1. First unlink ao from usable_arenas. */ assert(ao->prevarena == NULL || ao->prevarena->address != 0); assert(ao ->nextarena == NULL || ao->nextarena->address != 0); /* Fix the pointer in the prevarena, or the * usable_arenas pointer. */ if (ao->prevarena == NULL) { usable_arenas = ao->nextarena; assert(usable_arenas == NULL || usable_arenas->address != 0); } else { assert(ao->prevarena->nextarena == ao); ao->prevarena->nextarena = ao->nextarena; } /* Fix the pointer in the nextarena. */ if (ao->nextarena != NULL) { assert(ao->nextarena->prevarena == ao); ao->nextarena->prevarena = ao->prevarena; } /* Record that this arena_object slot is * available to be reused. */ //将当前arena结构放到unused_arena_objects里面去 ao->nextarena = unused_arena_objects; unused_arena_objects = ao; /* Free the entire arena. */ //释放为这个arena分配的256KB的内存 #ifdef ARENAS_USE_MMAP munmap((void *)ao->address, ARENA_SIZE); #else free((void *)ao->address); #endif ao->address = 0; /* mark unassociated */ --narenas_currently_allocated; UNLOCK(); return; } if (nf == 1) { //当前可供分配的pool数量为1 /* Case 2. Put ao at the head of * usable_arenas. Note that because * ao->nfreepools was 0 before, ao isn't * currently on the usable_arenas list. */ //那么这个arena放到usable_arenas的头部去 ao->nextarena = usable_arenas; ao->prevarena = NULL; if (usable_arenas) usable_arenas->prevarena = ao; usable_arenas = ao; assert(usable_arenas->address != 0); UNLOCK(); return; } /* If this arena is now out of order, we need to keep * the list sorted. The list is kept sorted so that * the "most full" arenas are used first, which allows * the nearly empty arenas to be completely freed. In * a few un-scientific tests, it seems like this * approach allowed a lot more memory to be freed. */ //如果当前arena的下一个arena为空,或者当前arena空闲的pool数量小于 //下一个arena的数目,那么保持它的位置 if (ao->nextarena == NULL || nf <= ao->nextarena->nfreepools) { /* Case 4. Nothing to do. */ UNLOCK(); return; } /* Case 3: We have to move the arena towards the end * of the list, because it has more free pools than * the arena to its right. * First unlink ao from usable_arenas. */ //走到这里说明当前arena的next还有arena,而且它们空闲的pool的数目更少 if (ao->prevarena != NULL) { /* ao isn't at the head of the list */ assert(ao->prevarena->nextarena == ao); ao->prevarena->nextarena = ao->nextarena; } else { /* ao is at the head of the list */ assert(usable_arenas == ao); usable_arenas = ao->nextarena; } ao->nextarena->prevarena = ao->prevarena; /* Locate the new insertion point by iterating over * the list, using our nextarena pointer. */ //将当前arena移动到其改在的位置上,这里按照可分配pool的数目的升序 while (ao->nextarena != NULL && nf > ao->nextarena->nfreepools) { ao->prevarena = ao->nextarena; ao->nextarena = ao->nextarena->nextarena; } /* Insert ao at this point. */ assert(ao->nextarena == NULL || ao->prevarena == ao->nextarena->prevarena); assert(ao->prevarena->nextarena == ao->nextarena); ao->prevarena->nextarena = ao; if (ao->nextarena != NULL) ao->nextarena->prevarena = ao; /* Verify that the swaps worked. */ assert(ao->nextarena == NULL || nf <= ao->nextarena->nfreepools); assert(ao->prevarena == NULL || nf > ao->prevarena->nfreepools); assert(ao->nextarena == NULL || ao->nextarena->prevarena == ao); assert((usable_arenas == ao && ao->prevarena == NULL) || ao->prevarena->nextarena == ao); UNLOCK(); return; } /* Pool was full, so doesn't currently live in any list: * link it to the front of the appropriate usedpools[] list. * This mimics LRU pool usage for new allocations and * targets optimal filling when several pools contain * blocks of the same size class. */ //走到这里,说明以前这个pool已经满了,那么释放了一个之后就有新的空间了, //那么将这个pool再次放入到pool的链表里面 --pool->ref.count; //已经分配的block的数目减去1 assert(pool->ref.count > 0); /* else the pool is empty */ size = pool->szidx; next = usedpools[size + size]; //获取当前szidx类型的pool的链表头部 prev = next->prevpool; /* insert pool before next: prev <-> pool <-> next */ //将当前这个pool插入到链表里面 pool->nextpool = next; pool->prevpool = prev; next->prevpool = pool; prev->nextpool = pool; UNLOCK(); return; } #ifdef WITH_VALGRIND redirect: #endif /* We didn't allocate this address. */ free(p); }
这个,其实代码应该很好理解,多了很多的判断,pool是否空,arena是否已经空了,然后调整arena的顺序啥的。。。
另外就是上面提到的freeblock的单链表,怎么实现的。。。其实好好看看应该就能理解的。。。这里就不细说了。。。好多东西还是要自己看懂才好。。。
到这里,python一些内存分配的东西也算有了大致的了解。。。。
标签:
原文地址:http://blog.csdn.net/fjslovejhl/article/details/45954923