selph
selph
发布于 2024-01-03 / 132 阅读
0
0

[libc 2.35 源码学习] 释放内存到 tcachebin

__libc_free

void __libc_free(void *mem)
{
    mstate ar_ptr;
    mchunkptr p; /* chunk corresponding to mem */
    // 如果释放0内存,直接返回
    if (mem == 0) /* free(0) has no effect */
        return;

    /* Quickly check that the freed pointer matches the tag for the memory.
       This gives a useful double-free detection.  */
    if (__glibc_unlikely(mtag_enabled))
        *(volatile char *)mem;

    int err = errno;
    // 切换到chunk ptr
    p = mem2chunk(mem);
    // 映射内存的处理
    if (chunk_is_mmapped(p)) /* release mmapped memory. */
    {
        /* See if the dynamic brk/mmap threshold needs adjusting.
       Dumped fake mmapped chunks do not affect the threshold.  */
        if (!mp_.no_dyn_threshold && chunksize_nomask(p) > mp_.mmap_threshold && chunksize_nomask(p) <= DEFAULT_MMAP_THRESHOLD_MAX)
        {
            mp_.mmap_threshold = chunksize(p);
            mp_.trim_threshold = 2 * mp_.mmap_threshold;
            LIBC_PROBE(memory_mallopt_free_dyn_thresholds, 2,
                       mp_.mmap_threshold, mp_.trim_threshold);
        }
        munmap_chunk(p);
    }
    else
    {
        // 初始化 tcache
        MAYBE_INIT_TCACHE();

        // 标记地址
        /* Mark the chunk as belonging to the library again.  */
        (void)tag_region(chunk2mem(p), memsize(p));
        // 获取 arena 地址
        ar_ptr = arena_for_chunk(p);
        // 释放内存
        _int_free(ar_ptr, p, 0);
    }

    __set_errno(err);
}

_int_free

#if USE_TCACHE
    {
        // 计算tcache索引
        size_t tc_idx = csize2tidx(size);
        // 如果tcache存在,且索引小于tcache_bins,就放入tcache
        if (tcache != NULL && tc_idx < mp_.tcache_bins)
        {
            // 转换成tcache指针
            /* Check to see if it's already in the tcache.  */
            tcache_entry *e = (tcache_entry *)chunk2mem(p);

            /* This test succeeds on double free.  However, we don't 100%
               trust it (it also matches random payload data at a 1 in
               2^<size_t> chance), so verify it's not an unlikely
               coincidence before aborting.  */
            // 检测key,检测bug
            if (__glibc_unlikely(e->key == tcache_key))
            {
                tcache_entry *tmp;
                size_t cnt = 0;
                LIBC_PROBE(memory_tcache_double_free, 2, e, tc_idx);
                for (tmp = tcache->entries[tc_idx];
                     tmp;
                     tmp = REVEAL_PTR(tmp->next), ++cnt)    // 计算解密后的next指针到tmp,这里next指针有被加密,只要泄露出来一个就能计算出密钥和heap地址
                {
                    // 如果tcache数量过多,报错
                    if (cnt >= mp_.tcache_count)
                        malloc_printerr("free(): too many chunks detected in tcache");
                    // 如果tcache中的指针不对齐,报错
                    if (__glibc_unlikely(!aligned_OK(tmp)))
                        malloc_printerr("free(): unaligned chunk detected in tcache 2");
                    // 如果tcache中的指针和当前指针相同,报错,双重释放检测
                    if (tmp == e)
                        malloc_printerr("free(): double free detected in tcache 2");
                    /* If we get here, it was a coincidence.  We've wasted a
                       few cycles, but don't abort.  */
                }
            }

            // 如果数量没满
            if (tcache->counts[tc_idx] < mp_.tcache_count)
            {
                // 添加到tcache,退出
                tcache_put(p, tc_idx);
                return;
            }
        }
    }
#endif

tcache_put

插入节点到tcachebin链表

/* Caller must ensure that we know tc_idx is valid and there's room
   for more chunks.  */
static __always_inline void
tcache_put(mchunkptr chunk, size_t tc_idx)
{
    tcache_entry *e = (tcache_entry *)chunk2mem(chunk);

    /* Mark this chunk as "in the tcache" so the test in _int_free will
       detect a double free.  */
    e->key = tcache_key;
    // 加密next指针
    e->next = PROTECT_PTR(&e->next, tcache->entries[tc_idx]);   // 堆地址,和下一个entry的地址,异或
    tcache->entries[tc_idx] = e;
    ++(tcache->counts[tc_idx]);
}

这里进行了一个next指针加密:异或加密,泄露出一个解密后的值就可以解密得到堆地址和加密key(堆地址右移12位)

/* Safe-Linking:
   Use randomness from ASLR (mmap_base) to protect single-linked lists
   of Fast-Bins and TCache.  That is, mask the "next" pointers of the
   lists' chunks, and also perform allocation alignment checks on them.
   This mechanism reduces the risk of pointer hijacking, as was done with
   Safe-Unlinking in the double-linked lists of Small-Bins.
   It assumes a minimum page size of 4096 bytes (12 bits).  Systems with
   larger pages provide less entropy, although the pointer mangling
   still works.  */
// 
#define PROTECT_PTR(pos, ptr) \
    ((__typeof(ptr))((((size_t)pos) >> 12) ^ ((size_t)ptr)))
#define REVEAL_PTR(ptr) PROTECT_PTR(&ptr, ptr)

评论