【内存管理】CMA内存分配器(Contiguous Memory Allocator)【转】

2022/8/6 5:22:52

本文主要是介绍【内存管理】CMA内存分配器(Contiguous Memory Allocator)【转】,对大家解决编程问题具有一定的参考价值,需要的程序猿们随着小编来一起学习吧!

转自:https://www.cnblogs.com/yibuyibu/p/14806878.html

什么是CMA
参考这两篇博文,写得很好:
http://www.wowotech.net/memory_management/cma.html
https://www.cnblogs.com/LoyenWang/p/12182594.html
https://biscuitos.github.io/blog/CMA/


CMA的初始化创建
* 默认cma创建(dma_contiguous_default_area),两种方式:
通过cmdline传递的参数"cma=",然后在kernel初始化阶段解析参数,并调用start_kernel()->setup_arch()->arm64_memblock_init()->dma_contiguous_reserve()完成创建(android中一般不通过cmdline传递):
static phys_addr_t size_cmdline = -1;
static phys_addr_t base_cmdline;
static phys_addr_t limit_cmdline;
//解析cmdline传递的cma参数
static int __init early_cma(char *p)
{
    pr_debug("%s(%s)\n", __func__, p);
    size_cmdline = memparse(p, &p);
    if (*p != '@')
        return 0;
    base_cmdline = memparse(p + 1, &p);
    if (*p != '-') {
        limit_cmdline = base_cmdline + size_cmdline;
        return 0;
    }
    limit_cmdline = memparse(p + 1, &p);

    return 0;
}
early_param("cma", early_cma);
通过dts中配置cma节点,属性中包含"shared-dma-pool"以及"linux,cma-default",在kernel初始化阶段,通过调用start_kernel()->setup_arch()->arm64_memblock_init()->early_init_fdt_scan_reserved_mem()->fdt_init_reserved_mem()->__reserved_mem_init_node()完成对默认cma的创建和初始化:
static int __init __reserved_mem_init_node(struct reserved_mem *rmem)
{
    extern const struct of_device_id __reservedmem_of_table[];
    const struct of_device_id *i;
    //__reservedmem_of_table是初始化中的一个section段,通过RESERVEDMEM_OF_DECLARE定义的都会被链接到这个段中
    //参考:https://blog.csdn.net/rikeyone/article/details/79975138
    for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) {
        reservedmem_of_init_fn initfn = i->data;
        const char *compat = i->compatible;

        if (!of_flat_dt_is_compatible(rmem->fdt_node, compat))
            continue;

        if (initfn(rmem) == 0) {
            pr_info("initialized node %s, compatible id %s\n",
                rmem->name, compat);
            return 0;
        }
    }
    return -ENOENT;
}
//dma-contiguous.c文件中定义了该默认cma的创建回调。
//如果dts中没有配置,那该回调也不会执行。
//参考:https://blog.csdn.net/rikeyone/article/details/79975138
RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);
默认cma似乎在好些android平台上都没有创建。
*其他CMA区创建
其他CMA区域创建都应该类似默认cma一样,通过RESERVEDMEM_OF_DECLARE接口定义一个结构体变量在__reservedmem_of_table段中,开机启动时就会调用对应的initfn完成初始化,同时还需要在dts中配置对应的属性节点。

所有CMA的创建最终都会调用cma_init_reserved_mem()函数:

主要从cma全局数组cma_areas中分配一个cma实体并将传递过来的参数用于初始化该cam实体。
初始化参数包括,cma的name、起始页框号base_pfn,总共页数count,以及每个bit代表多少个页2^(order_per_bit)。
更新全局变量totalcma_pages,记录总的cma页面数量,在meminfo中CmaTotal就是这个值。
int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
                 unsigned int order_per_bit,
                 const char *name,
                 struct cma **res_cma)
{
    struct cma *cma;
    phys_addr_t alignment;

    /* Sanity checks */
    //判断cma数量是否已经满了,因为cma_areas数组指定了系统中总的cma数量,通过内核宏控制
    if (cma_area_count == ARRAY_SIZE(cma_areas)) {
        pr_err("Not enough slots for CMA reserved regions!\n");
        return -ENOSPC;
    }
    //判断该cma内存区间是否与reversed中的某个区间是交叉的?为什么要这样判断?
    if (!size || !memblock_is_region_reserved(base, size))
        return -EINVAL;

    /* ensure minimal alignment required by mm core */
    //对齐方式按pageblock,也就是1024页(4M)
    alignment = PAGE_SIZE <<
            max_t(unsigned long, MAX_ORDER - 1, pageblock_order);

    /* alignment should be aligned with order_per_bit */
    //判断对齐方式alignment本身的大小与单个bit表示的内存大小,是否对齐
    if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
        return -EINVAL;
    //判断base和size以aligment方式对齐后,得到的值是否还是原来的值,也就是判断base和size是否基于alignment对齐
    if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
        return -EINVAL;

    /*
     * Each reserved area must be initialised later, when more kernel
     * subsystems (like slab allocator) are available.
     */
     //1. memblock是系统最初的内存管理器,分为memory type和reserved type,CMA最开始就属于reserved type
     //2. 运行到这里,就表示memblock已经建立,并且buddy还没建立,CMA在buddy前建立OK
     //3. CMA建立OK后,接着memblock中的memory type会释放给buddy,reserved type则不会
     //4. CMA作为特殊的reserved type,最终通过系统初始化调用cma_init_reserved_areas,将内存归还给buddy

    //从cma_areas数组中分配一个cma对象
    cma = &cma_areas[cma_area_count];
    if (name) {
        cma->name = name;
    } else {
        cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count);
        if (!cma->name)
            return -ENOMEM;
    }
    
    cma->base_pfn = PFN_DOWN(base); //起始页号
    cma->count = size >> PAGE_SHIFT; //总共页面数
    cma->order_per_bit = order_per_bit; //一个bit代表的阶数
    *res_cma = cma;
    cma_area_count++;
    totalcma_pages += (size / PAGE_SIZE); //totalcma_pages记录总的cma页面数量,在meminfo中CmaTotal就是这个值

    return 0;
}
到这里,只是完成对cma内存的保留和初始化,cma区最终还需要释放给buddy。

CMA区域释放给buddy
释放也是在kernel初始化过程中,会比cma的创建稍晚一些,是通过cma_init_reserved_areas接口完成的所有cma的初始化并将内存返还给buddy。

core_initcall(cma_init_reserved_areas)定义在kernel的init段中,通过start_kernel()->rest_init()创建内核线程kernel_init->kernel_init_freeable()->do_basic_setup()->do_initcalls()完成对各个init level的初始化。core init属于level1。
cma_init_reserved_areas()函数,遍历当前cma全局数组中已经分配的cma实体,通过调用cma_activate_area函数完成激活初始化,同时将内存释放给buddy:

static int __init cma_init_reserved_areas(void)
{
    int i;

    for (i = 0; i < cma_area_count; i++) {
        int ret = cma_activate_area(&cma_areas[i]);

        if (ret)
            return ret;
    }

    return 0;
}
core_initcall(cma_init_reserved_areas);
cma_activate_area()函数:
以pageblock为单位,设置migrate type为MIGRATE_CMA,然后将其整个pageblock包含的页全部释放给buddy,并更新整个系统的可用内存总数
static int __init cma_activate_area(struct cma *cma)
{
    int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
    unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
    //i代表有多少个page block,一般一个pageblock是1024页
    unsigned i = cma->count >> pageblock_order;
    struct zone *zone;

    //cma也是通过bitmap来管理,每个bit代表多大,由order_per_bit决定。
    //默认的cma的order_per_bit为0,一个bit代表2^0个page。
    //分配bitmap
    cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);

    if (!cma->bitmap)
        return -ENOMEM;

    WARN_ON_ONCE(!pfn_valid(pfn));
    zone = page_zone(pfn_to_page(pfn));

    //以pageblock遍历,
    do {
        unsigned j;

        //记录当前pageblock的起始页
        base_pfn = pfn;
        //判断当前pageblock中的所有页面是否满足要求:合法的页号、都在同一个zone中
        for (j = pageblock_nr_pages; j; --j, pfn++) {
            WARN_ON_ONCE(!pfn_valid(pfn));
            /*
             * alloc_contig_range requires the pfn range
             * specified to be in the same zone. Make this
             * simple by forcing the entire CMA resv range
             * to be in the same zone.
             */
            if (page_zone(pfn_to_page(pfn)) != zone)
                goto not_in_zone;
        }
        //将当前pageblock初始化并释放给buddy
        init_cma_reserved_pageblock(pfn_to_page(base_pfn));
    } while (--i);

    mutex_init(&cma->lock);

#ifdef CONFIG_CMA_DEBUGFS
    INIT_HLIST_HEAD(&cma->mem_head);
    spin_lock_init(&cma->mem_head_lock);
#endif

    return 0;

not_in_zone:
    pr_err("CMA area %s could not be activated\n", cma->name);
    kfree(cma->bitmap);
    cma->count = 0;
    return -EINVAL;
}
cma_activate_area()->init_cma_reserved_pageblock()函数设置pageblock类型并释放内存给buddy:
void __init init_cma_reserved_pageblock(struct page *page)
{
    unsigned i = pageblock_nr_pages;
    struct page *p = page;

    do {
        //清除页描述flag中的PG_Reserved标志位
        __ClearPageReserved(p);
        //设置page->_refcount = 0
        set_page_count(p, 0);
    } while (++p, --i);
    //设置pageblock的迁移类型为MIGRATE_CMA
    set_pageblock_migratetype(page, MIGRATE_CMA);

    if (pageblock_order >= MAX_ORDER) {
        i = pageblock_nr_pages;
        p = page;
        do {
            set_page_refcounted(p);
            __free_pages(p, MAX_ORDER - 1);
            p += MAX_ORDER_NR_PAGES;
        } while (i -= MAX_ORDER_NR_PAGES);
    } else {
        //设置page->_refcount = 1
        set_page_refcounted(page);
        //释放pages到buddy中,以pageblock释放,order为10
        __free_pages(page, pageblock_order);
    }
    //调整对应zone中的managed_pages可管理页面数,即加上一个pageblock数量
    //调整总的内存数量totalram_pages,即加上一个pageblock数量
    adjust_managed_page_count(page, pageblock_nr_pages);
}
CMA的分配
CMA分配通过统一接口cma_alloc函数,会从bitmap中先查找满足要求的连续bit,然后通过alloc_contig_range实现分配,成功后的页面会从buddy总摘出来:
struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
               gfp_t gfp_mask)
{
    unsigned long mask, offset;
    unsigned long pfn = -1;
    unsigned long start = 0;
    unsigned long bitmap_maxno, bitmap_no, bitmap_count;
    struct page *page = NULL;
    int ret = -ENOMEM;

    if (!cma || !cma->count)
        return NULL;

    pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
         count, align);

    if (!count)
        return NULL;

    mask = cma_bitmap_aligned_mask(cma, align);
    offset = cma_bitmap_aligned_offset(cma, align);
    bitmap_maxno = cma_bitmap_maxno(cma);
    bitmap_count = cma_bitmap_pages_to_bits(cma, count);

    if (bitmap_count > bitmap_maxno)
        return NULL;

    for (;;) {
        mutex_lock(&cma->lock);
        //1. 从cma->bitmap中查找连续bitmap_count个为0的bit
        bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
                bitmap_maxno, start, bitmap_count, mask,
                offset);
        if (bitmap_no >= bitmap_maxno) {
            mutex_unlock(&cma->lock);
            break;
        }
        //2. 将查找到的连续bit设置为1,表示内存被分配占用
        bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
        /*
         * It's safe to drop the lock here. We've marked this region for
         * our exclusive use. If the migration fails we will take the
         * lock again and unmark it.
         */
        mutex_unlock(&cma->lock);
        //3. 计算分配的起始页的页号
        pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
        mutex_lock(&cma_mutex);
        //4. 分配从起始页开始的连续count个页,分配的migrate type为CMA类型
        ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
                     gfp_mask);
        mutex_unlock(&cma_mutex);
        //5. 分配成功,就返回起始page
        if (ret == 0) {
            page = pfn_to_page(pfn);
            break;
        }

        cma_clear_bitmap(cma, pfn, count);
        if (ret != -EBUSY)
            break;

        pr_debug("%s(): memory range at %p is busy, retrying\n",
             __func__, pfn_to_page(pfn));
        /* try again with a bit different memory target */
        start = bitmap_no + mask + 1;
    }

    trace_cma_alloc(pfn, page, count, align);

    if (ret && !(gfp_mask & __GFP_NOWARN)) {
        pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n",
            __func__, count, ret);
        cma_debug_show_areas(cma);
    }

    pr_debug("%s(): returned %p\n", __func__, page);
    return page;
}
CMA的释放
释放操作也很清晰,通过cma_release函数实现,会将页面释放回buddy系统,并将cma的bitmap相应bit清零:
bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
{
    unsigned long pfn;

    if (!cma || !pages)
        return false;

    pr_debug("%s(page %p)\n", __func__, (void *)pages);

    pfn = page_to_pfn(pages);

    if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
        return false;

    VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
    //释放回buddy
    free_contig_range(pfn, count);
    //清零bit位,表示对应cma内存可用
    cma_clear_bitmap(cma, pfn, count);
    trace_cma_release(pfn, pages, count);

    return true;
}
CMA与buddy
后续补充

 



这篇关于【内存管理】CMA内存分配器(Contiguous Memory Allocator)【转】的文章就介绍到这儿,希望我们推荐的文章对大家有所帮助,也希望大家多多支持为之网!


扫一扫关注最新编程教程