Analysis: Tcache under glibc 2.27 && Bypass double free
By
February 14, 2024
Tcache Struct
To begin with,InGlibc 2.29
, Tcache bin controlled by tcache_entry
and tcache_perthread_struct
:
tcache_entry
Glibc2.29
Added Key
,Which can help us finds out about Double Free
typedef struct tcache_entry
{
struct tcache_entry *next;
/* This field exists to detect double frees. */
struct tcache_perthread_struct *key;
} tcache_entry;
The tcache_entry
structure is used to connect chunkunder and has a next pointer to the next Free Chunk
of the same size
.
Note that
next
points to the chunk'suser data
, whilefastbin
'sfd
points to the addressat the beginning of the chunk
.
tcache_perthread_struct
typedef struct tcache_perthread_struct
{
char counts[TCACHE_MAX_BINS]; /*TCACHE_MAX_BINS = 64*/
tcache_entry *entries[TCACHE_MAX_BINS];
} tcache_perthread_struct;
This structure is the same as it's name, in each thread will apply for a Chunk for the structure, his role is mainly to manage the entire Tcache Bin List
count[]
records the number ofFree Chunks
on thetcache_entry
chain, each entry can have up to 7 Chunks.tcache_entry
connects the Free Chunks through aunidirectional chain table
.
How It Works?
started at malloc
:
__libc_malloc (size_t bytes)
{
// codes............
#if USE_TCACHE
/* int_free also calls request2size, be careful to not pad twice. */
size_t tbytes;
checked_request2size (bytes, tbytes);
size_t tc_idx = csize2tidx (tbytes);
MAYBE_INIT_TCACHE ();
/*Call to MAYBE_INIT_TCACHE()*/
As we call malloc
for the first time,we will enter MAYBE_INIT_TCACHE()
to initalize
# define MAYBE_INIT_TCACHE()
if (__glibc_unlikely (tcache == NULL))
tcache_init();
Tcache
is first asserted here, and if Tcache
does not exist, the tcache_init()
function will be executed
tcache_init(void)
{
mstate ar_ptr;
void *victim = 0;
const size_t bytes = sizeof (tcache_perthread_struct);
if (tcache_shutting_down)
return;
arena_get (ar_ptr, bytes);
victim = _int_malloc (ar_ptr, bytes);
if (!victim && ar_ptr != NULL)
{
ar_ptr = arena_get_retry (ar_ptr, bytes);
victim = _int_malloc (ar_ptr, bytes);
}
if (ar_ptr != NULL)
__libc_lock_unlock (ar_ptr->mutex);
/* In a low memory situation, we may not be able to allocate memory
- in which case, we just keep trying later. However, we
typically do this very early, so either there is sufficient
memory, or there isn't enough memory to do non-trivial
allocations anyway. */
if (victim)
{
tcache = (tcache_perthread_struct *) victim;
memset (tcache, 0, sizeof (tcache_perthread_struct));
}
}
Here allocated tcache_perthread_struct
Continue from MAYBE_INIT_TCACHE
:
DIAG_PUSH_NEEDS_COMMENT;
if (tc_idx < mp_.tcache_bins
/*&& tc_idx < TCACHE_MAX_BINS*/ /* to appease gcc */
&& tcache
&& tcache->entries[tc_idx] != NULL)
{
return tcache_get (tc_idx);
}
DIAG_POP_NEEDS_COMMENT;
First of all, we determine whether the tcache applied in tcache_init()
is empty or not, and whether tcache->entries[tc_idx]
is not empty or not; here we are mentioning the tc_idx
, to show you the calculation method:
tc_idx calculation
malloc
's definition of tc_idx
is:
size_t tbytes;
checked_request2size (bytes, tbytes);
size_t tc_idx = csize2tidx (tbytes);
csize2tidx()
's definition && defining related variables:
#if USE_TCACHE
/* When "x" is from chunksize(). */
# define csize2tidx(x) (((x) - MINSIZE + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT)/*x - 0x20 + 0x9 / 0x10*/
/*Other's Required*/
#define SIZE_SZ (sizeof (size_t))
#define MALLOC_ALIGNMENT (2 * SIZE_SZ < __alignof__ (long double) \ /*64位返回False,32没有测过*/
? __alignof__ (long double) : 2 * SIZE_SZ) /*返回 0x10*/
#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
/* The smallest possible chunk */
#define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize)) /*0x20*/
/* The smallest size we can malloc is an aligned minimal chunk */
#define MINSIZE \
(unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)) /*(0x20 + 0x1f) & ~0x1f = 32*/
#define request2size(req) \
(((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
MINSIZE : \
((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
tc_idx
= size - 0x20 + 0x9 \ 0x10
We can also use how2heap
to calculate it:
➜ calc ./calc_tca
This file doesn't demonstrate an attack, but calculates the tcache idx for a given chunk size.
The basic formula is as follows:
IDX = (CHUNKSIZE - MINSIZE + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT
On a 64 bit system the current values are:
MINSIZE: 0x20
MALLOC_ALIGNMENT: 0x10
So we get the following equation:
IDX = (CHUNKSIZE - 0x11) / 0x10
BUT be AWARE that CHUNKSIZE is not the x in malloc(x)
It is calculated as follows:
IF x + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE(0x20) CHUNKSIZE = MINSIZE (0x20)
ELSE: CHUNKSIZE = (x + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
=> CHUNKSIZE = (x + 0x8 + 0xf) & ~0xf
[CTRL-C to exit] Please enter a size x (malloc(x)) in hex (e.g. 0x10): 0x10
TCache Idx: 0
[CTRL-C to exit] Please enter a size x (malloc(x)) in hex (e.g. 0x10): 0x25
TCache Idx: 1
[CTRL-C to exit] Please enter a size x (malloc(x)) in hex (e.g. 0x10): 0x23
TCache Idx: 1
[CTRL-C to exit] Please enter a size x (malloc(x)) in hex (e.g. 0x10): 0x40
TCache Idx: 3
[CTRL-C to exit] Please enter a size x (malloc(x)) in hex (e.g. 0x10):
tcache_put && tcache_get
static __always_inline void
tcache_put (mchunkptr chunk, size_t tc_idx)
{
tcache_entry *e = (tcache_entry *) chunk2mem (chunk);
assert (tc_idx < TCACHE_MAX_BINS);
/* Mark this chunk as "in the tcache" so the test in _int_free will
detect a double free. */
e->key = tcache;
\
e->next = tcache->entries[tc_idx];
tcache->entries[tc_idx] = e;
++(tcache->counts[tc_idx]);
}
/* Caller must ensure that we know tc_idx is valid and there's
available chunks to remove. */
static __always_inline void *
tcache_get (size_t tc_idx)
{
tcache_entry *e = tcache->entries[tc_idx];
assert (tc_idx < TCACHE_MAX_BINS);
assert (tcache->entries[tc_idx] > 0);
tcache->entries[tc_idx] = e->next;
--(tcache->counts[tc_idx]);
e->key = NULL;
return (void *) e;
}
Two brothers tcache_put()
and tcache_get(),
respectively, to add to the Tcache Bin List and take out the Tcache Bin simple operation
About KeyTests
Under glibc2.29
, New asserts targeting Double Free
are written:
// #if USE_TCACHE
{
size_t tc_idx = csize2tidx (size);
if (tcache != NULL && tc_idx < mp_.tcache_bins)
{
/* Check to see if it's already in the tcache. */
tcache_entry *e = (tcache_entry *) chunk2mem (p);
/* This test succeeds on double free. However, we don't 100%
trust it (it also matches random payload data at a 1 in
2^<size_t> chance), so verify it's not an unlikely
coincidence before aborting. */
if (__glibc_unlikely (e->key == tcache))
{
tcache_entry *tmp;
LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
for (tmp = tcache->entries[tc_idx];
tmp;
tmp = tmp->next)
if (tmp == e)
malloc_printerr ("free(): double free detected in tcache 2");
/* If we get here, it was a coincidence. We've wasted a
few cycles, but don't abort. */
}
if (tcache->counts[tc_idx] < mp_.tcache_count)
{
tcache_put (p, tc_idx);
return;
}
}
}
// #endif
As you can see, Glibc
does a traversal of the entire Tcache Bin List
for the key==tcache
case, which means that after free()
, it will traverse to see if there are any identical chunks.
How To Bypass
If you are Double Free
heavy fans, but feeling glibc2.29
sad about. in fact, there is a method of attack called House of Poortho
This discovery is found in picoctf
previous year's topic found, the title is called zero2hero
, this program has Off-By-Null
, the main principle of it, probably the use of Off-By-Null
overflow single byte, modify the size of the Chunk
, resulting in the calculation of the tc_idx appeared in the different sizes
, the address of the contents of the same Chunk this time
, because it will be stored in different tcache->entries[tc_idx]
chain, leading to traversal will not traverse to the same Chunk