diff --git a/libbpf-tools/.gitignore b/libbpf-tools/.gitignore index 5f28c587db05..564db2344f0f 100644 --- a/libbpf-tools/.gitignore +++ b/libbpf-tools/.gitignore @@ -13,6 +13,7 @@ /cachestat /cpudist /cpufreq +/doublefree /drsnoop /execsnoop /exitsnoop diff --git a/libbpf-tools/Makefile b/libbpf-tools/Makefile index cd72d1734718..f5d5f10e8074 100644 --- a/libbpf-tools/Makefile +++ b/libbpf-tools/Makefile @@ -46,6 +46,7 @@ APPS = \ cachestat \ cpudist \ cpufreq \ + doublefree \ drsnoop \ execsnoop \ exitsnoop \ diff --git a/libbpf-tools/doublefree.bpf.c b/libbpf-tools/doublefree.bpf.c new file mode 100644 index 000000000000..5d2aa505915a --- /dev/null +++ b/libbpf-tools/doublefree.bpf.c @@ -0,0 +1,219 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ +/* Copyright (c) 2022 LG Electronics */ +#include +#include +#include +#include +#include "doublefree.h" + +const volatile bool kernel_threads_only = false; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, MAX_ENTRIES); + __type(key, u64); + __type(value, struct doublefree_info_t); +} allocs SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, MAX_ENTRIES); + __type(key, u64); + __type(value, u64); +} memptrs SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_STACK_TRACE); + __uint(max_entries, MAX_ENTRIES); + __uint(key_size, sizeof(u32)); +} stack_traces SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, MAX_ENTRIES); + __type(key, u64); + __type(value, u32); +} first_deallocs SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 10); + __type(key, u64); + __type(value, u32); +} second_deallocs SEC(".maps"); + +int gen_alloc_exit2(struct pt_regs *ctx, u64 address) +{ + struct doublefree_info_t info = {0, }; + + if (address != 0) { + info.stack_id = bpf_get_stackid(ctx, &stack_traces, + kernel_threads_only ? 0 : BPF_F_USER_STACK); + info.is_available = 1; + bpf_map_update_elem(&allocs, &address, &info, BPF_ANY); + } + return 0; +} + +int gen_alloc_exit(struct pt_regs *ctx) +{ + return gen_alloc_exit2(ctx, PT_REGS_RC(ctx)); +} + +int gen_free_enter(struct pt_regs *ctx, void *address) +{ + int stack_id = 0; + u64 addr = (u64)address; + struct doublefree_info_t *info = bpf_map_lookup_elem(&allocs, &addr); + + if (!info) + return 0; + + info->is_available -= 1; + stack_id = bpf_get_stackid(ctx, &stack_traces, + kernel_threads_only ? 0 : BPF_F_USER_STACK); + if (info->is_available == 0) { + bpf_map_update_elem(&first_deallocs, &addr, &stack_id, BPF_ANY); + } else if (info->is_available < 0) { + bpf_map_update_elem(&second_deallocs, &addr, &stack_id, BPF_ANY); + } else { + bpf_printk("This code should not be printed at any rate\n"); + bpf_printk("Please check if something goes wrong\n"); + } + return 0; +} + +SEC("kretprobe/dummy_malloc") +int BPF_KRETPROBE(malloc_return) +{ + return gen_alloc_exit(ctx); +} + +SEC("kprobe/dummy_free") +int BPF_KPROBE(free_entry, void *address) +{ + return gen_free_enter(ctx, address); +} + +SEC("kretprobe/dummy_calloc") +int BPF_KRETPROBE(calloc_return) +{ + return gen_alloc_exit(ctx); +} + +SEC("kretprobe/dummy_realloc") +int BPF_KRETPROBE(realloc_return) +{ + return gen_alloc_exit(ctx); +} + +SEC("kprobe/dummy_posix_memalign") +int BPF_KPROBE(posix_memalign_entry, void **memptr, size_t alignment, size_t size) +{ + u64 memptr64 = (u64)(size_t)memptr; + u64 pid = bpf_get_current_pid_tgid(); + + bpf_map_update_elem(&memptrs, &pid, &memptr64, BPF_ANY); + return 0; +} + +SEC("kretprobe/dummy_posix_memalign") +int BPF_KRETPROBE(posix_memalign_return) +{ + void *addr = NULL; + u64 pid = bpf_get_current_pid_tgid(); + u64 *memptr64 = bpf_map_lookup_elem(&memptrs, &pid); + + if (memptr64 == 0) + return 0; + + bpf_map_delete_elem(&memptrs, &pid); + if (bpf_probe_read_user(&addr, sizeof(void *), (void *)(size_t)*memptr64)) + return 0; + + u64 addr64 = (u64)(size_t)addr; + return gen_alloc_exit2(ctx, addr64); +} + +SEC("kretprobe/dummy_aligned_alloc") +int BPF_KRETPROBE(aligned_alloc_return) +{ + return gen_alloc_exit(ctx); +} + +SEC("kretprobe/dummy_valloc") +int BPF_KRETPROBE(valloc_return) +{ + return gen_alloc_exit(ctx); +} + +SEC("kretprobe/dummy_memalign") +int BPF_KRETPROBE(memalign_return) +{ + return gen_alloc_exit(ctx); +} + +SEC("kretprobe/dummy_pvalloc") +int BPF_KRETPROBE(pvalloc_return) +{ + return gen_alloc_exit(ctx); +} + +SEC("kretprobe/dummy_reallocarray") +int BPF_KRETPROBE(reallocarray_return) +{ + return gen_alloc_exit(ctx); +} + +/* TODO: Can be enabled only on arm32 and arm64 */ +#if 0 +SEC("tracepoint/kmem/kmalloc") +int tracepoint__kmem__kmalloc(struct trace_event_raw_kmem_alloc *args) +{ + return gen_alloc_exit2((struct pt_regs *)args, (__u64)args->ptr); +} + +SEC("tracepoint/kmem/kmalloc_node") +int tracepoint__kmem__kmalloc_node(struct trace_event_raw_kmem_alloc_node *args) +{ + return gen_alloc_exit2((struct pt_regs *)args, (__u64)args->ptr); +} + +SEC("tracepoint/kmem/kfree") +int tracepoint__kmem__kfree(struct trace_event_raw_kmem_free *args) +{ + return gen_free_enter((struct pt_regs *)args, (void *)args->ptr); +} + +SEC("tracepoint/kmem/kmem_cache_alloc") +int tracepoint__kmem__kmem_cache_alloc(struct trace_event_raw_kmem_alloc *args) +{ + return gen_alloc_exit2((struct pt_regs *)args, (__u64)args->ptr); +} + +SEC("tracepoint/kmem/kmem_cache_alloc_node") +int tracepoint__kmem__kmem_cache_alloc_node(struct trace_event_raw_kmem_alloc_node *args) +{ + return gen_alloc_exit2((struct pt_regs *)args, (__u64)args->ptr); +} + +SEC("tracepoint/kmem/kmem_cache_free") +int tracepoint__kmem__kmem_cache_free(struct trace_event_raw_kmem_free *args) +{ + return gen_free_enter((struct pt_regs *)args, (void *)args->ptr); +} + +SEC("tracepoint/kmem/mm_page_alloc") +int tracepoint__kmem__mm_page_alloc(struct trace_event_raw_mm_page_alloc *args) +{ + return gen_alloc_exit2((struct pt_regs *)args, args->pfn); +} + +SEC("tracepoint/kmem/mm_page_free") +int tracepoint__kmem__mm_page_free(struct trace_event_raw_mm_page_free *args) +{ + return gen_free_enter((struct pt_regs *)args, (void *)args->pfn); +} +#endif + +char _license[] SEC("license") = "GPL"; diff --git a/libbpf-tools/doublefree.c b/libbpf-tools/doublefree.c new file mode 100644 index 000000000000..def37cdde51e --- /dev/null +++ b/libbpf-tools/doublefree.c @@ -0,0 +1,690 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ +/* Copyright (c) 2022 LG Electronics */ + +// 19-Oct-2022 Bojun Seo Created this. +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "doublefree.h" +#include "doublefree.skel.h" +#include "trace_helpers.h" +#include "uprobe_helpers.h" + +enum log_level { + DEBUG, + INFO, + WARN, + ERROR, +}; + +void __p(enum log_level level, char *level_str, char *fmt, ...); +void set_log_level(enum log_level level); + +#define p_debug(fmt, ...) __p(DEBUG, "Debug", fmt, ##__VA_ARGS__) +#define p_info(fmt, ...) __p(INFO, "Info", fmt, ##__VA_ARGS__) +#define p_warn(fmt, ...) __p(WARN, "Warn", fmt, ##__VA_ARGS__) +#define p_err(fmt, ...) __p(ERROR, "Error", fmt, ##__VA_ARGS__) + +pid_t execute_process(char *); + +#define ON_MEM_FAILURE(buf) \ +if (NULL == buf) { \ + p_err("FATAL: Failed to allocate memory on %s", __func__); \ + exit(-1); \ +} + +/* default */ +static enum log_level log_level = ERROR; + +void sig_handler(int signo) +{ + if (signo == SIGINT || signo == SIGTERM) + kill(0, SIGKILL); +} + +void __p(enum log_level level, char *level_str, char *fmt, ...) +{ + va_list ap; + + if (level < log_level) + return; + va_start(ap, fmt); + fprintf(stderr, "%s: ", level_str); + vfprintf(stderr, fmt, ap); + fprintf(stderr, "\n"); + va_end(ap); + fflush(stderr); +} + +void set_log_level(enum log_level level) +{ + log_level = level; +} + +pid_t execute_process(char *cmd) +{ + const char *delim = " "; + char **argv, *ptr, *filepath; + pid_t pid = 0; + int i; + + if (cmd == NULL) { + p_warn("Invalid argument: command not found"); + exit(-1); + } else + p_info("Execute child process: %s", cmd); + + argv = calloc(sizeof(char *), strlen(cmd)); + if (argv == NULL) { + p_err("failed to alloc memory"); + goto cleanup; + } + + ptr = strtok(cmd, delim); + if (ptr != NULL) { + filepath = ptr; + ptr = strtok(NULL, delim); + } else { + p_err("failed to exec %s", cmd); + goto cleanup; + } + + i = 0; + argv[i++] = filepath; + while (ptr != NULL) { + argv[i++] = ptr; + ptr = strtok(NULL, delim); + } + + pid = fork(); + if (pid == 0) { + execve(filepath, argv, NULL); + } else if (pid > 0) { + signal(SIGINT, sig_handler); + free(argv); + return pid; + } else { + p_err("failed to exec %s", cmd); + exit(-1); + } + +cleanup: + free(argv); + return -1; +} + +#define STACK_MAX 127 +#define BUF_MAX (STACK_MAX * LINE_MAX * 2) + +static struct env { + /* main process's tid */ + pid_t pid; + /* thread group id(pid) */ + pid_t tgid; + bool kernel_threads_only; + int stack_storage_size; + int perf_max_stack_depth; + /* unit: second */ + int interval; + int top; + bool verbose; + char* command; +} env = { + .pid = -1, + .tgid = -1, + .kernel_threads_only = false, + .stack_storage_size = MAX_ENTRIES, + .perf_max_stack_depth = STACK_MAX, + .interval = 10, + .top = -1, + .verbose = false, + .command = NULL, +}; + +const char *argp_program_version = "doublefree 0.1"; +const char *argp_program_bug_address = + "https://github.com/iovisor/bcc/tree/master/libbpf-tools"; +const char argp_program_doc[] = "Detect and report double free error.\n" +"\n" +"-c or -p or -k is a mandatory option\n" +"EXAMPLES:\n" +" doublefree -p 1234 # Detect doublefree on process id 1234\n" +" doublefree -c a.out # Detect doublefree on a.out\n" +" doublefree -c 'a.out arg' # Detect doublefree on a.out with argument\n" +" doublefree -c \"a.out arg\" # Detect doublefree on a.out with argument\n"; +static const struct argp_option opts[] = { + { "verbose", 'v', NULL, 0, "Verbose debug output" }, + { "help", 'h', NULL, OPTION_HIDDEN, "Show the full help" }, + { "kernel", 'k', NULL, 0, "Kernel threads only (no user threads)" }, + { "pid", 'p', "PID", 0, "Set pid" }, + { "command", 'c', "COMMAND", 0, "Execute and trace the specified command" }, + { "interval", 'i', "INTERVAL", 0, "Set interval in second to detect leak" }, + { "top", 'T', "TOP", 0, "Report only specified amount of backtraces"}, + {}, +}; + +static struct doublefree_bpf *obj = NULL; + +static int libbpf_print_fn(enum libbpf_print_level level, + const char *format, va_list args) +{ + if (level == LIBBPF_DEBUG && !env.verbose) + return 0; + return vfprintf(stderr, format, args); +} + +static error_t parse_arg(int key, char *arg, struct argp_state *state) +{ + switch (key) { + case 'h': + argp_state_help(state, stderr, ARGP_HELP_STD_HELP); + break; + case 'v': + env.verbose = true; + break; + case 'p': + errno = 0; + env.pid = strtol(arg, NULL, 10); + if (errno || env.pid <= 0) { + p_err("Invalid PID: %s", arg); + argp_usage(state); + } + break; + case 'k': + env.kernel_threads_only = true; + break; + case 'c': + env.command = strdup(arg); + ON_MEM_FAILURE(env.command); + break; + case 'i': + errno = 0; + env.interval = strtol(arg, NULL, 10); + if (errno || env.interval <= 0) { + p_err("Invalid interval: %s", arg); + argp_usage(state); + } + break; + case 'T': + errno = 0; + env.top = strtol(arg, NULL, 10); + if (errno || env.top <= 0) { + p_err("Invalid interval: %s", arg); + argp_usage(state); + } + break; + default: + return ARGP_ERR_UNKNOWN; + } + return 0; +} + +static int attach_uprobes(struct doublefree_bpf *obj) +{ + int err = 0; + char libc_path[PATH_MAX] = {0, }; + off_t func_off = 0; + + err = get_pid_lib_path(1, "c", libc_path, PATH_MAX); + if (err) { + p_err("Failed to find libc.so, %d", err); + return err; + } + + /* malloc */ + func_off = get_elf_func_offset(libc_path, "malloc"); + if (func_off < 0) { + p_err("Failed to get func offset"); + return func_off; + } + obj->links.malloc_return = + bpf_program__attach_uprobe(obj->progs.malloc_return, + true, env.pid ? : -1, libc_path, func_off); + err = libbpf_get_error(obj->links.malloc_return); + if (err) { + p_err("Failed to attach malloc_return"); + return err; + } + + /* free */ + func_off = get_elf_func_offset(libc_path, "free"); + if (func_off < 0) { + p_err("Failed to get func offset"); + return func_off; + } + obj->links.free_entry = + bpf_program__attach_uprobe(obj->progs.free_entry, + false, env.pid ? : -1, libc_path, func_off); + err = libbpf_get_error(obj->links.free_entry); + if (err) { + p_err("Failed to attach free_entry"); + return err; + } + + /* calloc */ + func_off = get_elf_func_offset(libc_path, "calloc"); + if (func_off < 0) { + p_err("Failed to get func offset"); + return func_off; + } + obj->links.calloc_return = + bpf_program__attach_uprobe(obj->progs.calloc_return, + true, env.pid ? : -1, libc_path, func_off); + err = libbpf_get_error(obj->links.calloc_return); + if (err) { + p_err("Failed to attach calloc_return"); + return err; + } + + /* realloc */ + func_off = get_elf_func_offset(libc_path, "realloc"); + if (func_off < 0) { + p_err("Failed to get func offset"); + return func_off; + } + obj->links.realloc_return = + bpf_program__attach_uprobe(obj->progs.realloc_return, + true, env.pid ? : -1, libc_path, func_off); + err = libbpf_get_error(obj->links.realloc_return); + if (err) { + p_err("Failed to attach realloc_return"); + return err; + } + + /* posix_memalign */ + func_off = get_elf_func_offset(libc_path, "posix_memalign"); + if (func_off < 0) { + p_err("Failed to get func offset"); + return func_off; + } + obj->links.posix_memalign_entry = + bpf_program__attach_uprobe(obj->progs.posix_memalign_entry, + false, env.pid ? : -1, libc_path, func_off); + err = libbpf_get_error(obj->links.posix_memalign_entry); + if (err) { + p_err("Failed to attach posix_memalign_entry"); + return err; + } + obj->links.posix_memalign_return = + bpf_program__attach_uprobe(obj->progs.posix_memalign_return, + true, env.pid ? : -1, libc_path, func_off); + err = libbpf_get_error(obj->links.posix_memalign_return); + if (err) { + p_err("Failed to attach posix_memalign_return"); + return err; + } + + /* aligned_alloc */ + func_off = get_elf_func_offset(libc_path, "aligned_alloc"); + if (func_off < 0) { + p_err("Failed to get func offset"); + return func_off; + } + obj->links.aligned_alloc_return = + bpf_program__attach_uprobe(obj->progs.aligned_alloc_return, + true, env.pid ? : -1, libc_path, func_off); + err = libbpf_get_error(obj->links.aligned_alloc_return); + if (err) { + p_err("Failed to attach aligned_alloc_return"); + return err; + } + + /* valloc */ + func_off = get_elf_func_offset(libc_path, "valloc"); + if (func_off < 0) { + p_err("Failed to get func offset"); + return func_off; + } + obj->links.valloc_return = + bpf_program__attach_uprobe(obj->progs.valloc_return, + true, env.pid ? : -1, libc_path, func_off); + err = libbpf_get_error(obj->links.valloc_return); + if (err) { + p_err("Failed to attach valloc_return"); + return err; + } + + /* memalign */ + func_off = get_elf_func_offset(libc_path, "memalign"); + if (func_off < 0) { + p_err("Failed to get func offset"); + return func_off; + } + obj->links.memalign_return = + bpf_program__attach_uprobe(obj->progs.memalign_return, + true, env.pid ? : -1, libc_path, func_off); + err = libbpf_get_error(obj->links.memalign_return); + if (err) { + p_err("Failed to attach memalign_return"); + return err; + } + + /* pvalloc */ + func_off = get_elf_func_offset(libc_path, "pvalloc"); + if (func_off < 0) { + p_err("Failed to get func offset"); + return func_off; + } + obj->links.pvalloc_return = + bpf_program__attach_uprobe(obj->progs.pvalloc_return, + true, env.pid ? : -1, libc_path, func_off); + err = libbpf_get_error(obj->links.pvalloc_return); + if (err) { + p_err("Failed to attach pvalloc_return"); + return err; + } + + /* reallocarray */ + func_off = get_elf_func_offset(libc_path, "reallocarray"); + if (func_off < 0) { + p_err("Failed to get func offset"); + return func_off; + } + obj->links.reallocarray_return = + bpf_program__attach_uprobe(obj->progs.reallocarray_return, + true, env.pid ? : -1, libc_path, func_off); + err = libbpf_get_error(obj->links.reallocarray_return); + if (err) { + p_err("Failed to attach reallocarray_return"); + return err; + } + + return 0; +} + +static int attach_kprobes(struct doublefree_bpf* obj) +{ +#if defined(__arm__) || defined(__aarch64__) + long err = 0; + + /* kmalloc */ + obj->links.tracepoint__kmem__kmalloc = + bpf_program__attach(obj->progs.tracepoint__kmem__kmalloc); + err = libbpf_get_error(obj->links.tracepoint__kmem__kmalloc); + if (err) { + p_warn("failed to attach tracepoint: %s", strerror(-err)); + return -1; + } + + /* kmalloc_node */ + obj->links.tracepoint__kmem__kmalloc_node = + bpf_program__attach(obj->progs.tracepoint__kmem__kmalloc_node); + err = libbpf_get_error(obj->links.tracepoint__kmem__kmalloc_node); + if (err) { + p_warn("failed to attach tracepoint: %s", strerror(-err)); + return -1; + } + + /* kfree */ + obj->links.tracepoint__kmem__kfree = + bpf_program__attach(obj->progs.tracepoint__kmem__kfree); + err = libbpf_get_error(obj->links.tracepoint__kmem__kfree); + if (err) { + p_warn("failed to attach tracepoint: %s", strerror(-err)); + return -1; + } + + /* kmem_cache_alloc */ + obj->links.tracepoint__kmem__kmem_cache_alloc = + bpf_program__attach(obj->progs.tracepoint__kmem__kmem_cache_alloc); + err = libbpf_get_error(obj->links.tracepoint__kmem__kmem_cache_alloc); + if (err) { + p_warn("failed to attach tracepoint: %s", strerror(-err)); + return -1; + } + + /* kmem_cache_alloc_node */ + obj->links.tracepoint__kmem__kmem_cache_alloc_node = + bpf_program__attach(obj->progs.tracepoint__kmem__kmem_cache_alloc_node); + err = libbpf_get_error(obj->links.tracepoint__kmem__kmem_cache_alloc_node); + if (err) { + p_warn("failed to attach tracepoint: %s", strerror(-err)); + return -1; + } + + /* kmem_cache_free */ + obj->links.tracepoint__kmem__kmem_cache_free = + bpf_program__attach(obj->progs.tracepoint__kmem__kmem_cache_free); + err = libbpf_get_error(obj->links.tracepoint__kmem__kmem_cache_free); + if (err) { + p_warn("failed to attach tracepoint: %s", strerror(-err)); + return -1; + } + + /* mm_page_alloc */ + obj->links.tracepoint__kmem__mm_page_alloc = + bpf_program__attach(obj->progs.tracepoint__kmem__mm_page_alloc); + err = libbpf_get_error(obj->links.tracepoint__kmem__mm_page_alloc); + if (err) { + p_warn("failed to attach tracepoint: %s", strerror(-err)); + return -1; + } + + /* mm_page_free */ + obj->links.tracepoint__kmem__mm_page_free = + bpf_program__attach(obj->progs.tracepoint__kmem__mm_page_free); + err = libbpf_get_error(obj->links.tracepoint__kmem__mm_page_free); + if (err) { + p_warn("failed to attach tracepoint: %s", strerror(-err)); + return -1; + } + +#endif + return 0; +} + +static int attach_probes(struct doublefree_bpf* obj) +{ + return env.kernel_threads_only ? attach_kprobes(obj) : attach_uprobes(obj); +} + +static pid_t get_tgid(const char* path) +{ + __u64 tmp = 0; + char buf[BUF_MAX] = {0, }; + FILE* f = fopen(path, "r"); + + if (!f) { + p_warn("Failed to open %s", path); + return 0; + } + while(fscanf(f, "%s %lld\n", buf, &tmp) != EOF) { + if (!strcmp(buf, "Tgid:")) { + fclose(f); + return tmp; + } + } + p_warn("Failed to get tgid: %s\n", path); + fclose(f); + return 0; +} + +static const char* demangle(const char* name) +{ + /* TODO + * return demangled function name + * return name if it's not mangled + */ + return name; +} + +static void printer(const struct syms* syms, struct ksyms *ksyms, int fd, unsigned long key) { + struct doublefree_info_t val; + const struct sym *sym = NULL; + const struct ksym *ksym = NULL; + char* dso_name = NULL; + uint64_t dso_offset = 0; + size_t i = 0; + int err = 0; + unsigned long *ip = NULL; + int sfd = bpf_map__fd(obj->maps.stack_traces); + + ip = calloc(env.perf_max_stack_depth, sizeof(*ip)); + if (!ip) { + p_err("Failed to alloc ip"); + return; + } + err = bpf_map_lookup_elem(fd, &key, &val); + if (err < 0) { + p_err("Failed to lookup elem on fd"); + free(ip); + return; + } + err = bpf_map_lookup_elem(sfd, &(val.stack_id), ip); + if (err < 0) { + p_err("Failed to lookup elem on sfd"); + free(ip); + return; + } + printf("stack_id: %d\n", val.stack_id); + for (i = 0; i < env.perf_max_stack_depth && ip[i]; ++i) { + printf("\t#%ld %#016lx", i+1, ip[i]); + if (!env.kernel_threads_only) { + sym = syms__map_addr_dso(syms, ip[i], &dso_name, &dso_offset); + if (sym) { + printf(" %s+%#lx", demangle(sym->name), sym->offset); + } + if (dso_name) { + printf(" (%s+%#lx)", dso_name, dso_offset); + } + } else { + ksym = ksyms__map_addr(ksyms, ip[i]); + if (ksym) { + printf(" %s+%lx", ksym->name, ip[i] - ksym->addr); + } + } + printf("\n"); + } + printf("\n"); + free(ip); +} + +static int check_doublefree(struct syms_cache *syms_cache, struct ksyms *ksyms) +{ + unsigned long lookup_key = 0; + unsigned long next_key = 0; + const struct syms *syms = NULL; + int idx = 1; + int afd = bpf_map__fd(obj->maps.allocs); + int firstfd = bpf_map__fd(obj->maps.first_deallocs); + int secondfd = bpf_map__fd(obj->maps.second_deallocs); + + if (!env.kernel_threads_only) { + syms = syms_cache__get_syms(syms_cache, env.pid); + } + lookup_key = -1; + while (!bpf_map_get_next_key(secondfd, &lookup_key, &next_key)) { + printf("#%d Found double free...\n", idx++); + if (syms != NULL || env.kernel_threads_only) { + printf("Allocation happended on "); + printer(syms, ksyms, afd, next_key); + printf("\nFirst deallocation happended on "); + printer(syms, ksyms, firstfd, next_key); + printf("\nSecond deallocation happended on "); + printer(syms, ksyms, secondfd, next_key); + } else { + p_warn("syms is null"); + } + lookup_key = next_key; + } + return 0; +} + +int main(int argc, char **argv) +{ + struct syms_cache *syms_cache = NULL; + struct ksyms *ksyms = NULL; + int err = 0; + char path[PATH_MAX] = {0, }; + + set_log_level(INFO); + static const struct argp argp = { + .options = opts, + .parser = parse_arg, + .doc = argp_program_doc, + }; + err = argp_parse(&argp, argc, argv, 0, NULL, NULL); + if (err) + return err; + + if (!env.kernel_threads_only) { +#ifdef __arm__ + p_err("doublefree user mode is not supported on arm32 systems"); + return 0; +#endif + } else { +#if !(defined(__arm__) || defined(__aarch64__)) + p_err("doublefree kernel mode supports arm32 and arm64 systems"); + return 0; +#endif + } + if (env.verbose) { + set_log_level(DEBUG); + } + if (env.command != NULL) { + env.pid = execute_process(env.command); + if (env.pid > 0) { + p_info("execute command: %s(pid %d)", env.command, env.pid); + } + } + if (!env.kernel_threads_only && env.pid == -1) { + p_err("-c or -p or -k is a mandatory option"); + return -1; + } + libbpf_set_print(libbpf_print_fn); + obj = doublefree_bpf__open(); + if (!obj) { + p_err("Failed to open BPF object"); + return -1; + } + obj->rodata->kernel_threads_only = env.kernel_threads_only; + bpf_map__set_value_size(obj->maps.stack_traces, + env.perf_max_stack_depth * sizeof(unsigned long long)); + bpf_map__set_max_entries(obj->maps.stack_traces, + env.stack_storage_size); + err = doublefree_bpf__load(obj); + if (err) { + p_err("Failed to load BPF object: %d", err); + return -1; + } + err = attach_probes(obj); + if (err) { + p_err("Failed to attach BPF programs"); + p_err("Is this process alive? pid: %d", env.pid); + return -1; + } + syms_cache = syms_cache__new(0); + if (!syms_cache) { + p_err("Failed to load syms"); + return -1; + } + if (!env.kernel_threads_only) { + snprintf(path, sizeof(path), "/proc/%d/status", env.pid); + env.tgid = get_tgid(path); + } else { + ksyms = ksyms__load(); + if (!ksyms) { + p_err("failed to load ksyms"); + return -1; + } + } + do { + sleep(env.interval); + if (!env.kernel_threads_only && getpgid(env.pid) < 0) { + p_warn("Is this process alive? pid: %d", env.pid); + } + } while (check_doublefree(syms_cache, ksyms) == 0); + + /* cleanup */ + syms_cache__free(syms_cache); + ksyms__free(ksyms); + doublefree_bpf__destroy(obj); + free(env.command); + return 0; +} diff --git a/libbpf-tools/doublefree.h b/libbpf-tools/doublefree.h new file mode 100644 index 000000000000..5b6411a53ff2 --- /dev/null +++ b/libbpf-tools/doublefree.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ +/* Copyright (c) 2022 LG Electronics */ +#ifndef __DOUBLEFREE_H +#define __DOUBLEFREE_H + +#define MAX_ENTRIES 65536 + +struct doublefree_info_t { + int stack_id; + int is_available; +}; + +#endif /* __DOUBLEFREE_H */