【netty学习笔记十五】PoolArena原理分析

PoolArena是全局分配内存的类,该类是一个抽象类,实现分为Heap和Direct。

类属性

private final int maxOrder; //chunk二叉树高度,默认为11,从0开始
    final int pageSize; //二叉树单个节点(page)大小,默认8KB
    final int pageShifts; 
    final int chunkSize; //chunk大小
    final int subpageOverflowMask; //判断申请的内存大小是small还是tiny
    final int numSmallSubpagePools; //small请求的双向链表个数
    final int directMemoryCacheAlignment; 
    final int directMemoryCacheAlignmentMask;
    private final PoolSubpage<T>[] tinySubpagePools; //subpage双向链表
    private final PoolSubpage<T>[] smallSubpagePools; 

    //PoolChunkList双向链表,使用不同的利用率管理poolchunk
    private final PoolChunkList<T> q050;
    private final PoolChunkList<T> q025;
    private final PoolChunkList<T> q000;
    private final PoolChunkList<T> qInit;
    private final PoolChunkList<T> q075;
    private final PoolChunkList<T> q100;

初始化init方法和PoolChunkList构造方法将PoolChunkList双向链表连接起来。
无涯:
PoolArena是全局分配内存的类,该类是一个抽象类,实现分为Heap和Direct。

类属性

private final int maxOrder; //chunk二叉树高度,默认为11,从0开始
    final int pageSize; //二叉树单个节点(page)大小,默认8KB
    final int pageShifts; 
    final int chunkSize; //chunk大小
    final int subpageOverflowMask; //判断申请的内存大小是small还是tiny
    final int numSmallSubpagePools; //small请求的双向链表个数
    final int directMemoryCacheAlignment; 
    final int directMemoryCacheAlignmentMask;
    private final PoolSubpage<T>[] tinySubpagePools; //subpage双向链表
    private final PoolSubpage<T>[] smallSubpagePools; 

    //PoolChunkList双向链表,使用不同的利用率管理poolchunk
    private final PoolChunkList<T> q050;
    private final PoolChunkList<T> q025;
    private final PoolChunkList<T> q000;
    private final PoolChunkList<T> qInit;
    private final PoolChunkList<T> q075;
    private final PoolChunkList<T> q100;

初始化init方法和PoolChunkList构造方法将PoolChunkList双向链表连接起来。

内存分配方法allocate

private void allocate(PoolThreadCache cache, PooledByteBuf<T> buf, final int reqCapacity) {
        final int normCapacity = normalizeCapacity(reqCapacity);
        //如果申请内存小于pageSize=8KB,则视为tiny或small内存
        if (isTinyOrSmall(normCapacity)) { // capacity < pageSize
            int tableIdx;
            PoolSubpage<T>[] table;
            boolean tiny = isTiny(normCapacity);
            //小于512B视为tiny
            if (tiny) { // < 512
                //尝试从cache分配
                if (cache.allocateTiny(this, buf, reqCapacity, normCapacity)) {
                    // was able to allocate out of the cache so move on
                    return;
                }
                tableIdx = tinyIdx(normCapacity);
                table = tinySubpagePools;
            } else {
                //尝试从cache分配
                if (cache.allocateSmall(this, buf, reqCapacity, normCapacity)) {
                    // was able to allocate out of the cache so move on
                    return;
                }
                tableIdx = smallIdx(normCapacity);
                table = smallSubpagePools;
            }
            //从poolSubpage链表中查找
            final 

无涯:
final PoolSubpage<T> head = table[tableIdx];

            /**
             * Synchronize on the head. This is needed as {@link PoolChunk#allocateSubpage(int)} and
             * {@link PoolChunk#free(long)} may modify the doubly linked list as well.
             */
            synchronized (head) {
                final PoolSubpage<T> s = head.next;
                //如果链表有空余节点,则直接分配(head节点不参与分配)
                if (s != head) {
                    assert s.doNotDestroy && s.elemSize == normCapacity;
                    long handle = s.allocate();
                    assert handle >= 0;
                    s.chunk.initBufWithSubpage(buf, null, handle, reqCapacity, cache);
                    incTinySmallAllocation(tiny);
                    return;
                }
            }
            synchronized (this) {
                //从PoolChunkList中尝试分配,没有则新建一个Chunk,并将其加入PoolChunkList(qinit)
                allocateNormal(buf, reqCapacity, normCapacity, cache);
            }
            //计数并返回
            incTinySmallAllocation(tiny);
            return;
        }
        if (normCapacity <= chunkSize) {
            //先尝试从cache中分配
            if (cache.allocateNormal(this, buf, reqCapacity, normCapacity)) {
                // was able to allocate out of the cache so move on
                return;
            }
            synchronized (this) {
                //从PoolChunkList中尝试分配,没有则新建一个Chunk,并将其加入PoolChunkList(qinit)
                allocateNormal(buf, reqCapacity, normCapacity, cache);
                ++allocationsNormal;
            }
        } else {
            //大于16MB则直接分配
            // Huge allocations are never served via the cache so just call allocateHuge
            allocateHuge(buf, reqCapacity);
        }
    }

总结下:1. 除了Huge请求采用Unpooled直接分配以外,其他请求都先尝试从缓存中分配;2. 如果缓存中没有,Tiny/Small请求
会以第一次请求大小来找分组的Subpage双向链表进行分配,如果双向链表未初始化,则使用Normal请求分配Chunk中一个Page,
并加入双向链表PoolChunkList中;而从缓存中获取失败的Normal请求从PoolChunkList中尝试分配,没有则新建一个Chunk并使用伙伴算法进行分配,最后将其加入双向链表PoolChunkList

无涯:

内存释放方法free

void free(PoolChunk<T> chunk, ByteBuffer nioBuffer, long handle, int normCapacity, PoolThreadCache cache) {
        if (chunk.unpooled) { //huge直接释放
            int size = chunk.chunkSize();
            destroyChunk(chunk);
            activeBytesHuge.add(-size);
            deallocationsHuge.increment();
        } else {
            SizeClass sizeClass = sizeClass(normCapacity);
            //可以缓存则放入缓存PoolThreadCache
            if (cache != null && cache.add(this, chunk, nioBuffer, handle, normCapacity, sizeClass)) {
                // cached so not free it. 
                return;
            }

            freeChunk(chunk, handle, sizeClass, nioBuffer, false);
        }
    }

void freeChunk(PoolChunk<T> chunk, long handle, SizeClass sizeClass, ByteBuffer nioBuffer, boolean finalizer) {
        final boolean destroyChunk;
        synchronized (this) {
            if (!finalizer) {
                switch (sizeClass) {
                    case Normal:
                        ++deallocationsNormal;
                        break;
                    case Small:
                        ++deallocationsSmall;
                        break;
                    case Tiny:
                        ++deallocationsTiny;
                        break;
                    default:
                        throw new Error();
                }
            }
            //parent为所属的poolChunkList,遍历并释放,参见poolChunkList.free
            destroyChunk = !chunk.parent.free(chunk, handle, nioBuffer);
        }
        if (destroyChunk) {
            // destroyChunk not need to be called while holding the synchronized lock.
            destroyChunk(chunk);
        }
    }

本篇分析到此结束

©著作权归作者所有,转载或内容合作请联系作者
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。