From d9da513959a13663b0a743103c7fc35a4322de12 Mon Sep 17 00:00:00 2001 From: Andreas Kling Date: Sun, 29 Aug 2021 12:43:39 +0200 Subject: [PATCH] Kernel: Move "in-scheduler" flag from SchedulerData to Processor This avoids a race between getting the processor-specific SchedulerData and accessing it. (Switching to a different CPU in that window means that we're operating on the wrong SchedulerData.) Co-authored-by: Tom --- Kernel/Arch/x86/Processor.h | 12 +++++++++++- Kernel/Scheduler.cpp | 34 +++++++++++++--------------------- 2 files changed, 24 insertions(+), 22 deletions(-) diff --git a/Kernel/Arch/x86/Processor.h b/Kernel/Arch/x86/Processor.h index 57abb54514cbd4..52d1e1c4786958 100644 --- a/Kernel/Arch/x86/Processor.h +++ b/Kernel/Arch/x86/Processor.h @@ -25,7 +25,6 @@ struct ProcessorMessageEntry; enum class ProcessorSpecificDataID { MemoryManager, - Scheduler, __Count, }; @@ -138,6 +137,7 @@ class Processor { bool m_invoke_scheduler_async; bool m_scheduler_initialized; + bool m_in_scheduler { true }; Atomic m_halt_requested; DeferredCallEntry* m_pending_deferred_calls; // in reverse order @@ -344,6 +344,16 @@ class Processor { write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), in_critical() + 1); } + ALWAYS_INLINE static bool current_in_scheduler() + { + return read_gs_value(__builtin_offsetof(Processor, m_in_scheduler)); + } + + ALWAYS_INLINE static void set_current_in_scheduler(bool value) + { + write_gs_value(__builtin_offsetof(Processor, m_in_scheduler), value); + } + private: ALWAYS_INLINE void do_leave_critical() { diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp index d52aca035cc4f1..e70def131b486a 100644 --- a/Kernel/Scheduler.cpp +++ b/Kernel/Scheduler.cpp @@ -22,12 +22,6 @@ namespace Kernel { -struct SchedulerData { - static ProcessorSpecificDataID processor_specific_data_id() { return ProcessorSpecificDataID::Scheduler; } - - bool in_scheduler { true }; -}; - RecursiveSpinlock g_scheduler_lock; static u32 time_slice_for(const Thread& thread) @@ -195,7 +189,6 @@ UNMAP_AFTER_INIT void Scheduler::start() g_scheduler_lock.lock(); auto& processor = Processor::current(); - ProcessorSpecific::initialize(); VERIFY(processor.is_initialized()); auto& idle_thread = *Processor::idle_thread(); VERIFY(processor.current_thread() == &idle_thread); @@ -217,14 +210,13 @@ bool Scheduler::pick_next() // prevents a recursive call into Scheduler::invoke_async upon // leaving the scheduler lock. ScopedCritical critical; - ProcessorSpecific::get().in_scheduler = true; + Processor::set_current_in_scheduler(true); ScopeGuard guard( []() { // We may be on a different processor after we got switched // back to this thread! - auto& scheduler_data = ProcessorSpecific::get(); - VERIFY(scheduler_data.in_scheduler); - scheduler_data.in_scheduler = false; + VERIFY(Processor::current_in_scheduler()); + Processor::set_current_in_scheduler(false); }); SpinlockLocker lock(g_scheduler_lock); @@ -361,9 +353,9 @@ void Scheduler::leave_on_first_switch(u32 flags) // Scheduler::context_switch is not in the call stack we need to // clean up and release locks manually here g_scheduler_lock.unlock(flags); - auto& scheduler_data = ProcessorSpecific::get(); - VERIFY(scheduler_data.in_scheduler); - scheduler_data.in_scheduler = false; + + VERIFY(Processor::current_in_scheduler()); + Processor::set_current_in_scheduler(false); } void Scheduler::prepare_after_exec() @@ -371,9 +363,9 @@ void Scheduler::prepare_after_exec() // This is called after exec() when doing a context "switch" into // the new process. This is called from Processor::assume_context VERIFY(g_scheduler_lock.own_lock()); - auto& scheduler_data = ProcessorSpecific::get(); - VERIFY(!scheduler_data.in_scheduler); - scheduler_data.in_scheduler = true; + + VERIFY(!Processor::current_in_scheduler()); + Processor::set_current_in_scheduler(true); } void Scheduler::prepare_for_idle_loop() @@ -382,9 +374,9 @@ void Scheduler::prepare_for_idle_loop() // and is about to run it. We need to acquire he scheduler lock VERIFY(!g_scheduler_lock.own_lock()); g_scheduler_lock.lock(); - auto& scheduler_data = ProcessorSpecific::get(); - VERIFY(!scheduler_data.in_scheduler); - scheduler_data.in_scheduler = true; + + VERIFY(!Processor::current_in_scheduler()); + Processor::set_current_in_scheduler(true); } Process* Scheduler::colonel() @@ -517,7 +509,7 @@ void Scheduler::invoke_async() // Since this function is called when leaving critical sections (such // as a Spinlock), we need to check if we're not already doing this // to prevent recursion - if (!ProcessorSpecific::get().in_scheduler) + if (!Processor::current_in_scheduler()) pick_next(); }