From d2b87419ac0930c69ee015c1c300468b99e8ad6e Mon Sep 17 00:00:00 2001 From: Tim Schumacher Date: Sun, 5 Jun 2022 00:57:54 +0200 Subject: [PATCH] LibELF: Only collect region sizes before reserving memory This keeps us from needlessly allocating storage via `malloc` as part of the `Vector`s that early, which we might conflict on while reserving memory for the main executable. --- Userland/Libraries/LibELF/DynamicLoader.cpp | 102 ++++++++++++-------- 1 file changed, 62 insertions(+), 40 deletions(-) diff --git a/Userland/Libraries/LibELF/DynamicLoader.cpp b/Userland/Libraries/LibELF/DynamicLoader.cpp index eb21cfc352a443..e6affe62e5f9da 100644 --- a/Userland/Libraries/LibELF/DynamicLoader.cpp +++ b/Userland/Libraries/LibELF/DynamicLoader.cpp @@ -263,49 +263,26 @@ void DynamicLoader::do_lazy_relocations() void DynamicLoader::load_program_headers() { - Vector load_regions; - Vector map_regions; - Vector copy_regions; - Optional tls_region; - Optional relro_region; - - VirtualAddress dynamic_region_desired_vaddr; + FlatPtr ph_load_start = SIZE_MAX; + FlatPtr ph_load_end = 0; + // We walk the program header list once to find the requested address ranges of the program. + // We don't fill in the list of regions yet to keep malloc memory blocks from interfering with our reservation. image().for_each_program_header([&](Image::ProgramHeader const& program_header) { - ProgramHeaderRegion region {}; - region.set_program_header(program_header.raw_header()); - if (region.is_tls_template()) { - VERIFY(!tls_region.has_value()); - tls_region = region; - } else if (region.is_load()) { - if (region.size_in_memory() == 0) - return; - load_regions.append(region); - if (region.is_writable()) { - copy_regions.append(region); - } else { - map_regions.append(region); - } - } else if (region.is_dynamic()) { - dynamic_region_desired_vaddr = region.desired_load_address(); - } else if (region.is_relro()) { - VERIFY(!relro_region.has_value()); - relro_region = region; - } - }); + if (program_header.type() != PT_LOAD) + return; - VERIFY(!map_regions.is_empty() || !copy_regions.is_empty()); + FlatPtr section_start = program_header.vaddr().get(); + FlatPtr section_end = section_start + program_header.size_in_memory(); - auto compare_load_address = [](ProgramHeaderRegion& a, ProgramHeaderRegion& b) { - return a.desired_load_address().as_ptr() < b.desired_load_address().as_ptr(); - }; + if (ph_load_start > section_start) + ph_load_start = section_start; - quick_sort(load_regions, compare_load_address); - quick_sort(map_regions, compare_load_address); - quick_sort(copy_regions, compare_load_address); + if (ph_load_end < section_end) + ph_load_end = section_end; + }); - // Process regions in order: .text, .data, .tls - void* requested_load_address = image().is_dynamic() ? nullptr : load_regions.first().desired_load_address().as_ptr(); + void* requested_load_address = image().is_dynamic() ? nullptr : reinterpret_cast(ph_load_start); int reservation_mmap_flags = MAP_ANON | MAP_PRIVATE | MAP_NORESERVE; if (image().is_dynamic()) @@ -318,14 +295,13 @@ void DynamicLoader::load_program_headers() // First, we make a dummy reservation mapping, in order to allocate enough VM // to hold all regions contiguously in the address space. - FlatPtr ph_load_base = load_regions.first().desired_load_address().page_base().get(); - FlatPtr ph_load_end = round_up_to_power_of_two(load_regions.last().desired_load_address().offset(load_regions.last().size_in_memory()).get(), PAGE_SIZE); + FlatPtr ph_load_base = ph_load_start & ~(FlatPtr)0xfffu; + ph_load_end = round_up_to_power_of_two(ph_load_end, PAGE_SIZE); size_t total_mapping_size = ph_load_end - ph_load_base; // Before we make our reservation, unmap our existing mapped ELF image that we used for reading header information. // This leaves our pointers dangling momentarily, but it reduces the chance that we will conflict with ourselves. - // The regions that we collected above are still safe to use because they are independent copies. if (munmap(m_file_data, m_file_size) < 0) { perror("munmap old mapping"); VERIFY_NOT_REACHED(); @@ -360,6 +336,52 @@ void DynamicLoader::load_program_headers() VERIFY_NOT_REACHED(); } + // Most binaries have four loadable regions, three of which are mapped + // (symbol tables/relocation information, executable instructions, read-only data) + // and one of which is copied (modifiable data). + // These are allocated in-line to cut down on the malloc calls. + Vector load_regions; + Vector map_regions; + Vector copy_regions; + Optional tls_region; + Optional relro_region; + + VirtualAddress dynamic_region_desired_vaddr; + + image().for_each_program_header([&](Image::ProgramHeader const& program_header) { + ProgramHeaderRegion region {}; + region.set_program_header(program_header.raw_header()); + if (region.is_tls_template()) { + VERIFY(!tls_region.has_value()); + tls_region = region; + } else if (region.is_load()) { + if (region.size_in_memory() == 0) + return; + load_regions.append(region); + if (region.is_writable()) { + copy_regions.append(region); + } else { + map_regions.append(region); + } + } else if (region.is_dynamic()) { + dynamic_region_desired_vaddr = region.desired_load_address(); + } else if (region.is_relro()) { + VERIFY(!relro_region.has_value()); + relro_region = region; + } + }); + + VERIFY(!map_regions.is_empty() || !copy_regions.is_empty()); + + auto compare_load_address = [](ProgramHeaderRegion& a, ProgramHeaderRegion& b) { + return a.desired_load_address().as_ptr() < b.desired_load_address().as_ptr(); + }; + + quick_sort(load_regions, compare_load_address); + quick_sort(map_regions, compare_load_address); + quick_sort(copy_regions, compare_load_address); + + // Process regions in order: .text, .data, .tls for (auto& region : map_regions) { FlatPtr ph_desired_base = region.desired_load_address().get(); FlatPtr ph_base = region.desired_load_address().page_base().get();