Skip to content

Commit

Permalink
Kernel: Protect the PageDirectory from concurrent access
Browse files Browse the repository at this point in the history
  • Loading branch information
tomuta authored and awesomekling committed Nov 11, 2020
1 parent 2b25a89 commit 5b38132
Show file tree
Hide file tree
Showing 4 changed files with 14 additions and 3 deletions.
6 changes: 5 additions & 1 deletion Kernel/VM/MemoryManager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ MemoryManager::~MemoryManager()

void MemoryManager::protect_kernel_image()
{
ScopedSpinLock page_lock(kernel_page_directory().get_lock());
// Disable writing to the kernel text and rodata segments.
for (size_t i = (FlatPtr)&start_of_kernel_text; i < (FlatPtr)&start_of_kernel_data; i += PAGE_SIZE) {
auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
Expand Down Expand Up @@ -193,10 +194,11 @@ void MemoryManager::parse_memory_map()
ASSERT(m_user_physical_pages > 0);
}

PageTableEntry* MemoryManager::pte(const PageDirectory& page_directory, VirtualAddress vaddr)
PageTableEntry* MemoryManager::pte(PageDirectory& page_directory, VirtualAddress vaddr)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(s_mm_lock.own_lock());
ASSERT(page_directory.get_lock().own_lock());
u32 page_directory_table_index = (vaddr.get() >> 30) & 0x3;
u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
Expand All @@ -213,6 +215,7 @@ PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(s_mm_lock.own_lock());
ASSERT(page_directory.get_lock().own_lock());
u32 page_directory_table_index = (vaddr.get() >> 30) & 0x3;
u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
Expand Down Expand Up @@ -259,6 +262,7 @@ void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress va
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(s_mm_lock.own_lock());
ASSERT(page_directory.get_lock().own_lock());
u32 page_directory_table_index = (vaddr.get() >> 30) & 0x3;
u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
Expand Down
2 changes: 1 addition & 1 deletion Kernel/VM/MemoryManager.h
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ class MemoryManager {
PageDirectoryEntry* quickmap_pd(PageDirectory&, size_t pdpt_index);
PageTableEntry* quickmap_pt(PhysicalAddress);

PageTableEntry* pte(const PageDirectory&, VirtualAddress);
PageTableEntry* pte(PageDirectory&, VirtualAddress);
PageTableEntry* ensure_pte(PageDirectory&, VirtualAddress);
void release_pte(PageDirectory&, VirtualAddress, bool);

Expand Down
3 changes: 3 additions & 0 deletions Kernel/VM/PageDirectory.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,8 @@ class PageDirectory : public RefCounted<PageDirectory> {
Process* process() { return m_process; }
const Process* process() const { return m_process; }

RecursiveSpinLock& get_lock() { return m_lock; }

private:
PageDirectory(Process&, const RangeAllocator* parent_range_allocator);
PageDirectory();
Expand All @@ -67,6 +69,7 @@ class PageDirectory : public RefCounted<PageDirectory> {
RefPtr<PhysicalPage> m_directory_table;
RefPtr<PhysicalPage> m_directory_pages[4];
HashMap<u32, RefPtr<PhysicalPage>> m_page_tables;
RecursiveSpinLock m_lock;
};

}
6 changes: 5 additions & 1 deletion Kernel/VM/Region.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -229,6 +229,7 @@ Bitmap& Region::ensure_cow_map() const

bool Region::map_individual_page_impl(size_t page_index)
{
ASSERT(m_page_directory->get_lock().own_lock());
auto page_vaddr = vaddr_from_page_index(page_index);
auto* pte = MM.ensure_pte(*m_page_directory, page_vaddr);
if (!pte) {
Expand Down Expand Up @@ -260,8 +261,9 @@ bool Region::map_individual_page_impl(size_t page_index)

bool Region::remap_page(size_t page_index, bool with_flush)
{
ASSERT(m_page_directory);
ScopedSpinLock lock(s_mm_lock);
ASSERT(m_page_directory);
ScopedSpinLock page_lock(m_page_directory->get_lock());
ASSERT(physical_page(page_index));
bool success = map_individual_page_impl(page_index);
if (with_flush)
Expand All @@ -273,6 +275,7 @@ void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
{
ScopedSpinLock lock(s_mm_lock);
ASSERT(m_page_directory);
ScopedSpinLock page_lock(m_page_directory->get_lock());
size_t count = page_count();
for (size_t i = 0; i < count; ++i) {
auto vaddr = vaddr_from_page_index(i);
Expand Down Expand Up @@ -302,6 +305,7 @@ void Region::set_page_directory(PageDirectory& page_directory)
bool Region::map(PageDirectory& page_directory)
{
ScopedSpinLock lock(s_mm_lock);
ScopedSpinLock page_lock(page_directory.get_lock());
set_page_directory(page_directory);
#ifdef MM_DEBUG
dbg() << "MM: Region::map() will map VMO pages " << first_page_index() << " - " << last_page_index() << " (VMO page count: " << vmobject().page_count() << ")";
Expand Down

0 comments on commit 5b38132

Please sign in to comment.