PoolChunk是netty内存池中负责较大内存块的维护(PoolChunk默认会申请16MB),当申请超过8KB的内存时会找PoolChunk申请。
数据结构
PoolChunk的16MB内存会组织成一个平衡二叉树,整颗二叉树分为12层(从0开始)
memoryMap和depthMap初始化数据为均为:
即,第0位为0,从第一位开始,数组元素值为下标所在的层数。不同的是depthMap数组初始化后就不会变,memoryMap会值会根据内存分配逐渐变化。memoryMap初始值代表当前节点能分配的空间大小,比如10代表可分配16KB,如果此节点的子节点被分配出去一个(8KB),那此节点数值就变为11(只剩一个节点可分配,即为8KB),如果又分出去一个节点,此节点值就变为12(大于层数,表示无内存可分配)
类属性
PoolChunk的属性较多,这里先简单说下
final PoolArena<T> arena;
final T memory; //内存块
final boolean unpooled;
final int offset;
private final byte[] memoryMap; //分配二叉树
private final byte[] depthMap; //高度二叉树
private final PoolSubpage<T>[] subpages; //subpage数组
//判断请求内存大小是否大于等于pageSize
private final int subpageOverflowMask;
//页面大小,默认8KB
private final int pageSize;
//辅助计算,默认为13, 2>>>13=8192
private final int pageShifts;
//二叉树最大高度,默认为11,从0开始
private final int maxOrder;
private final int chunkSize; //块大小,默认为16M
private final int log2ChunkSize; //log2(16M)=24
private final int maxSubpageAllocs; //可分配subpage的最大节点数,默认为2048
// 标示节点不可用,已分配
private final byte unusable;
下面开始学习PoolChunk的主要方法:分配内存和释放内存。
分配方法allocate:
boolean allocate(PooledByteBuf<T> buf, int reqCapacity, int normCapacity, PoolThreadCache threadCache) {
final long handle;
//要求分配的内存大于pageSize,则走allocateRun分配,否则走allocateSubpage
if ((normCapacity & subpageOverflowMask) != 0) { // >= pageSize
handle = allocateRun(normCapacity);
} else {
// 参见后面PoolSubpage分析
handle = allocateSubpage(normCapacity);
}
//handle < 0表示内存分配失败,返回false,上游方法会直接像内存申请,而不是netty内存池
if (handle < 0) {
return false;
}
ByteBuffer nioBuffer = cachedNioBuffers != null ? cachedNioBuffers.pollLast() : null;
//拿到分配的handler开始封装buf返回
initBuf(buf, nioBuffer, handle, reqCapacity, threadCache);
return true;
}
继续看allocateRun方法:
private long allocateRun(int normCapacity) {
//计算需要的内存大小在内存二叉树哪层能获取到
int d = maxOrder - (log2(normCapacity) - pageShifts); //pageShifts=13,表示8KB
int id = allocateNode(d);
if (id < 0) {
return id;
}
freeBytes -= runLength(id);
return id;
}
//根据层数d获取分配的页面在memoryMap中的下标index
private int allocateNode(int d) {
int id = 1;
int initial = - (1 << d); // has last d bits = 0 and rest all = 1
//获取memoryMap[id]
byte val = value(id);
//如果val>d,val初始化为1,每分配一批内存,val值会减少,如果val=10,表示只剩16KB可分配了,而d=9(需要32KB),那么分配就会失败
if (val > d) { // unusable
return -1;
}
//比较当前的层数是否比d小,小代表内存够分配,继续遍历左节点,然后遍历节点,直到找到能分配的那个节点
while (val < d || (id & initial) == 0) { // id & initial == 1 << d for all ids at depth d, for < d it is 0
//找左子节点
id <<= 1;
val = value(id);
if (val > d) {
//找兄弟节点
id ^= 1;
val = value(id);
}
}
byte value = value(id);
assert value == d && (id & initial) == 1 << d : String.format("val = %d, id & initial = %d, d = %d",
value, id & initial, d);
//将memoryMap中此节点设为12,表示已分配
setValue(id, unusable); // mark as unusable
//递归更新父节点信息
updateParentsAlloc(id);
return id;
}
内存释放方法free
void free(long handle, ByteBuffer nioBuffer) {
//
int memoryMapIdx = memoryMapIdx(handle);
int bitmapIdx = bitmapIdx(handle);
if (bitmapIdx != 0) { // free a subpage
PoolSubpage<T> subpage = subpages[subpageIdx(memoryMapIdx)];
assert subpage != null && subpage.doNotDestroy;
// Obtain the head of the PoolSubPage pool that is owned by the PoolArena and synchronize on it.
// This is need as we may add it back and so alter the linked-list structure.
PoolSubpage<T> head = arena.findSubpagePoolHead(subpage.elemSize);
synchronized (head) {
if (subpage.free(head, bitmapIdx & 0x3FFFFFFF)) {
return;
}
}
}
//计算要释放的内存块大小
freeBytes += runLength(memoryMapIdx);
//将要释放的内存块对应节点值重置
setValue(memoryMapIdx, depth(memoryMapIdx));
//递归更新父节点值
updateParentsFree(memoryMapIdx);
if (nioBuffer != null && cachedNioBuffers != null &&
cachedNioBuffers.size() < PooledByteBufAllocator.DEFAULT_MAX_CACHED_BYTEBUFFERS_PER_CHUNK) {
cachedNioBuffers.offer(nioBuffer);
}
}
待续