__libc_free
void __libc_free(void *mem)
{
mstate ar_ptr;
mchunkptr p; /* chunk corresponding to mem */
// 如果释放0内存,直接返回
if (mem == 0) /* free(0) has no effect */
return;
/* Quickly check that the freed pointer matches the tag for the memory.
This gives a useful double-free detection. */
if (__glibc_unlikely(mtag_enabled))
*(volatile char *)mem;
int err = errno;
// 切换到chunk ptr
p = mem2chunk(mem);
// 映射内存的处理
if (chunk_is_mmapped(p)) /* release mmapped memory. */
{
/* See if the dynamic brk/mmap threshold needs adjusting.
Dumped fake mmapped chunks do not affect the threshold. */
if (!mp_.no_dyn_threshold && chunksize_nomask(p) > mp_.mmap_threshold && chunksize_nomask(p) <= DEFAULT_MMAP_THRESHOLD_MAX)
{
mp_.mmap_threshold = chunksize(p);
mp_.trim_threshold = 2 * mp_.mmap_threshold;
LIBC_PROBE(memory_mallopt_free_dyn_thresholds, 2,
mp_.mmap_threshold, mp_.trim_threshold);
}
munmap_chunk(p);
}
else
{
// 初始化 tcache
MAYBE_INIT_TCACHE();
// 标记地址
/* Mark the chunk as belonging to the library again. */
(void)tag_region(chunk2mem(p), memsize(p));
// 获取 arena 地址
ar_ptr = arena_for_chunk(p);
// 释放内存
_int_free(ar_ptr, p, 0);
}
__set_errno(err);
}
_int_free
/*
If eligible, place chunk on a fastbin so it can be found
and used quickly in malloc.
*/
// 如果大小在fastbin范围内,就放入fastbin,更快
if ((unsigned long)(size) <= (unsigned long)(get_max_fast())
#if TRIM_FASTBINS
/*
If TRIM_FASTBINS set, don't place chunks
bordering top into fastbins
*/
// 如果chunk末尾不是top指针
&& (chunk_at_offset(p, size) != av->top)
#endif
)
{
// 检查下一个chunk的大小,太小了,或者超过系统内存了,报错,只要不要太小或者过大就行
if (__builtin_expect(chunksize_nomask(chunk_at_offset(p, size)) <= CHUNK_HDR_SZ, 0) || __builtin_expect(chunksize(chunk_at_offset(p, size)) >= av->system_mem, 0))
{
bool fail = true;
/* We might not have a lock at this point and concurrent modifications
of system_mem might result in a false positive. Redo the test after
getting the lock. */
if (!have_lock)
{
__libc_lock_lock(av->mutex);
fail = (chunksize_nomask(chunk_at_offset(p, size)) <= CHUNK_HDR_SZ || chunksize(chunk_at_offset(p, size)) >= av->system_mem);
__libc_lock_unlock(av->mutex);
}
if (fail)
malloc_printerr("free(): invalid next size (fast)");
}
free_perturb(chunk2mem(p), size - CHUNK_HDR_SZ);
// 设置 av->have_fastchunks
atomic_store_relaxed(&av->have_fastchunks, true);
// 计算fastbin索引,获取对应的fastbin
unsigned int idx = fastbin_index(size);
fb = &fastbin(av, idx);
/* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
// 链接到fastbin中
mchunkptr old = *fb, old2;
// 单线程情况
if (SINGLE_THREAD_P)
{
/* Check that the top of the bin is not the record we are going to
add (i.e., double free). */
// 如果该fastbin当前指向的chunk和要释放的chunk指针相同,报错,双重释放检测
// 只检测第一个chunk是否和要释放的chunk相同
if (__builtin_expect(old == p, 0))
malloc_printerr("double free or corruption (fasttop)");
// 指针加密
p->fd = PROTECT_PTR(&p->fd, old);
*fb = p;
}
else // 多线程情况
do
{
/* Check that the top of the bin is not the record we are going to
add (i.e., double free). */
// 同样的双重释放检测
if (__builtin_expect(old == p, 0))
malloc_printerr("double free or corruption (fasttop)");
old2 = old;
p->fd = PROTECT_PTR(&p->fd, old);
} while ((old = catomic_compare_and_exchange_val_rel(fb, p, old2)) != old2);
/* Check that size of fastbin chunk at the top is the same as
size of the chunk that we are adding. We can dereference OLD
only if we have the lock, otherwise it might have already been
allocated again. */
if (have_lock && old != NULL && __builtin_expect(fastbin_index(chunksize(old)) != idx, 0))
malloc_printerr("invalid fastbin entry (free)");
}
主要做的事情:
- 如果tcachebin满了,就会检查是否满足fastbin的要求(大小低于fastbin最大大小,如果开启了
TRIM_FASTBINS
,则fastbin后紧跟的chunk不能是top hcunk),满足要求了往下走 - 检查紧挨着的下个chunk的大小,需要大于最小chunk大小(header大小),并且不能超过系统内存大小,否则报错
- 计算fastbin对应的索引,获取fastbin指针
- 双重释放检测,检测fastbin top chunk和释放的chunk是否是同一个,是就报错
- 堆当前指针进行加密,用当前chunk.fd的地址右移12位和fastbin里的值异或
主要就这两个要注意的安全检查,伪造一个fake chunk,满足这两个条件检查,就能顺利释放到fastbin中