Skip to content

Commit

Permalink
Kernel/SMP: Always take PageDirectory lock before the MemoryManager lock
Browse files Browse the repository at this point in the history
This prevents deadlocking due to inconsistent acquisition order.
  • Loading branch information
awesomekling committed Aug 9, 2021
1 parent d21b8f9 commit 00bbbde
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 10 deletions.
15 changes: 7 additions & 8 deletions Kernel/Memory/MemoryManager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -105,8 +105,8 @@ UNMAP_AFTER_INIT void MemoryManager::protect_kernel_image()

UNMAP_AFTER_INIT void MemoryManager::protect_readonly_after_init_memory()
{
ScopedSpinLock mm_lock(s_mm_lock);
ScopedSpinLock page_lock(kernel_page_directory().get_lock());
ScopedSpinLock mm_lock(s_mm_lock);
// Disable writing to the .ro_after_init section
for (auto i = (FlatPtr)&start_of_ro_after_init; i < (FlatPtr)&end_of_ro_after_init; i += PAGE_SIZE) {
auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
Expand All @@ -117,8 +117,8 @@ UNMAP_AFTER_INIT void MemoryManager::protect_readonly_after_init_memory()

void MemoryManager::unmap_text_after_init()
{
ScopedSpinLock mm_lock(s_mm_lock);
ScopedSpinLock page_lock(kernel_page_directory().get_lock());
ScopedSpinLock mm_lock(s_mm_lock);

auto start = page_round_down((FlatPtr)&start_of_unmap_after_init);
auto end = page_round_up((FlatPtr)&end_of_unmap_after_init);
Expand Down Expand Up @@ -703,7 +703,7 @@ PageFaultResponse MemoryManager::handle_page_fault(PageFault const& fault)
OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
{
VERIFY(!(size % PAGE_SIZE));
ScopedSpinLock lock(s_mm_lock);
ScopedSpinLock lock(kernel_page_directory().get_lock());
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.has_value())
return {};
Expand All @@ -721,7 +721,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, StringView nam
auto vm_object = AnonymousVMObject::try_create_with_size(size, strategy);
if (!vm_object)
return {};
ScopedSpinLock lock(s_mm_lock);
ScopedSpinLock lock(kernel_page_directory().get_lock());
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.has_value())
return {};
Expand All @@ -734,7 +734,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size
if (!vm_object)
return {};
VERIFY(!(size % PAGE_SIZE));
ScopedSpinLock lock(s_mm_lock);
ScopedSpinLock lock(kernel_page_directory().get_lock());
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.has_value())
return {};
Expand All @@ -743,7 +743,6 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size

OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VirtualRange const& range, VMObject& vmobject, StringView name, Region::Access access, Region::Cacheable cacheable)
{
ScopedSpinLock lock(s_mm_lock);
auto region = Region::try_create_kernel_only(range, vmobject, 0, KString::try_create(name), access, cacheable);
if (region)
region->map(kernel_page_directory());
Expand All @@ -753,7 +752,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VirtualRange
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
{
VERIFY(!(size % PAGE_SIZE));
ScopedSpinLock lock(s_mm_lock);
ScopedSpinLock lock(kernel_page_directory().get_lock());
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.has_value())
return {};
Expand Down Expand Up @@ -1109,8 +1108,8 @@ void MemoryManager::dump_kernel_regions()

void MemoryManager::set_page_writable_direct(VirtualAddress vaddr, bool writable)
{
ScopedSpinLock lock(s_mm_lock);
ScopedSpinLock page_lock(kernel_page_directory().get_lock());
ScopedSpinLock lock(s_mm_lock);
auto* pte = ensure_pte(kernel_page_directory(), vaddr);
VERIFY(pte);
if (pte->is_writable() == writable)
Expand Down
4 changes: 2 additions & 2 deletions Kernel/Memory/Region.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -236,10 +236,10 @@ bool Region::remap_vmobject_page(size_t page_index, bool with_flush)

void Region::unmap(ShouldDeallocateVirtualRange deallocate_range)
{
ScopedSpinLock lock(s_mm_lock);
if (!m_page_directory)
return;
ScopedSpinLock page_lock(m_page_directory->get_lock());
ScopedSpinLock lock(s_mm_lock);
size_t count = page_count();
for (size_t i = 0; i < count; ++i) {
auto vaddr = vaddr_from_page_index(i);
Expand All @@ -261,8 +261,8 @@ void Region::set_page_directory(PageDirectory& page_directory)

bool Region::map(PageDirectory& page_directory, ShouldFlushTLB should_flush_tlb)
{
ScopedSpinLock lock(s_mm_lock);
ScopedSpinLock page_lock(page_directory.get_lock());
ScopedSpinLock lock(s_mm_lock);

// FIXME: Find a better place for this sanity check(?)
if (is_user() && !is_shared()) {
Expand Down

0 comments on commit 00bbbde

Please sign in to comment.