diff --git a/Kernel/Arch/i386/CPU.cpp b/Kernel/Arch/i386/CPU.cpp index b30ddbba8d8ab2..a08a1be0b030cd 100644 --- a/Kernel/Arch/i386/CPU.cpp +++ b/Kernel/Arch/i386/CPU.cpp @@ -943,6 +943,7 @@ void Processor::early_initialize(u32 cpu) m_message_queue = nullptr; m_idle_thread = nullptr; m_current_thread = nullptr; + m_scheduler_data = nullptr; m_mm_data = nullptr; m_info = nullptr; @@ -1188,9 +1189,9 @@ extern "C" void context_first_init(Thread* from_thread, Thread* to_thread, TrapF // Since we got here and don't have Scheduler::context_switch in the // call stack (because this is the first time we switched into this - // context), we need to unlock the scheduler lock manually. We're - // using the flags initially set up by init_context - g_scheduler_lock.unlock(trap->regs->eflags); + // context), we need to notify the scheduler so that it can release + // the scheduler lock. + Scheduler::leave_on_first_switch(trap->regs->eflags); } extern "C" void thread_context_first_enter(void); @@ -1335,6 +1336,7 @@ void Processor::assume_context(Thread& thread, u32 flags) dbg() << "Assume context for thread " << VirtualAddress(&thread) << " " << thread; #endif ASSERT_INTERRUPTS_DISABLED(); + Scheduler::prepare_after_exec(); // in_critical() should be 2 here. The critical section in Process::exec // and then the scheduler lock ASSERT(Processor::current().in_critical() == 2); @@ -1346,21 +1348,20 @@ extern "C" void pre_init_finished(void) { ASSERT(g_scheduler_lock.own_lock()); - // The target flags will get restored upon leaving the trap - u32 prev_flags = cpu_flags(); - g_scheduler_lock.unlock(prev_flags); - - // We because init_finished() will wait on the other APs, we need + // Because init_finished() will wait on the other APs, we need // to release the scheduler lock so that the other APs can also get // to this point + + // The target flags will get restored upon leaving the trap + u32 prev_flags = cpu_flags(); + Scheduler::leave_on_first_switch(prev_flags); } extern "C" void post_init_finished(void) { // We need to re-acquire the scheduler lock before a context switch // transfers control into the idle loop, which needs the lock held - ASSERT(!g_scheduler_lock.own_lock()); - g_scheduler_lock.lock(); + Scheduler::prepare_for_idle_loop(); } void Processor::initialize_context_switching(Thread& initial_thread) diff --git a/Kernel/Arch/i386/CPU.h b/Kernel/Arch/i386/CPU.h index de90cd6dc966bb..bb933eea469886 100644 --- a/Kernel/Arch/i386/CPU.h +++ b/Kernel/Arch/i386/CPU.h @@ -623,6 +623,7 @@ static_assert(GDT_SELECTOR_CODE0 + 16 == GDT_SELECTOR_CODE3); // CS3 = CS0 + 16 static_assert(GDT_SELECTOR_CODE0 + 24 == GDT_SELECTOR_DATA3); // SS3 = CS0 + 32 class ProcessorInfo; +class SchedulerPerProcessorData; struct MemoryManagerData; struct ProcessorMessageEntry; @@ -683,6 +684,7 @@ class Processor { ProcessorInfo* m_info; MemoryManagerData* m_mm_data; + SchedulerPerProcessorData* m_scheduler_data; Thread* m_current_thread; Thread* m_idle_thread; @@ -770,6 +772,16 @@ class Processor { return get_fs() == GDT_SELECTOR_PROC && read_fs_u32(0) != 0; } + ALWAYS_INLINE void set_scheduler_data(SchedulerPerProcessorData& scheduler_data) + { + m_scheduler_data = &scheduler_data; + } + + ALWAYS_INLINE SchedulerPerProcessorData& get_scheduler_data() const + { + return *m_scheduler_data; + } + ALWAYS_INLINE void set_mm_data(MemoryManagerData& mm_data) { m_mm_data = &mm_data; @@ -920,16 +932,13 @@ class ScopedCritical { public: ScopedCritical() { - m_valid = true; - Processor::current().enter_critical(m_prev_flags); + enter(); } ~ScopedCritical() { - if (m_valid) { - m_valid = false; - Processor::current().leave_critical(m_prev_flags); - } + if (m_valid) + leave(); } ScopedCritical(ScopedCritical&& from) @@ -955,6 +964,20 @@ class ScopedCritical { m_prev_flags &= ~0x200; } + void leave() + { + ASSERT(m_valid); + m_valid = false; + Processor::current().leave_critical(m_prev_flags); + } + + void enter() + { + ASSERT(!m_valid); + m_valid = true; + Processor::current().enter_critical(m_prev_flags); + } + private: u32 m_prev_flags { 0 }; bool m_valid { false }; diff --git a/Kernel/Forward.h b/Kernel/Forward.h index ce3f233fd0580c..700feab854f692 100644 --- a/Kernel/Forward.h +++ b/Kernel/Forward.h @@ -57,6 +57,7 @@ class Range; class RangeAllocator; class Region; class Scheduler; +class SchedulerPerProcessorData; class SharedBuffer; class Socket; template diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp index c01caa1798b4ea..307e8ba79a6fc4 100644 --- a/Kernel/Scheduler.cpp +++ b/Kernel/Scheduler.cpp @@ -25,6 +25,7 @@ */ #include +#include #include #include #include @@ -41,6 +42,16 @@ namespace Kernel { +class SchedulerPerProcessorData { + AK_MAKE_NONCOPYABLE(SchedulerPerProcessorData); + AK_MAKE_NONMOVABLE(SchedulerPerProcessorData); + +public: + SchedulerPerProcessorData() = default; + + bool m_in_scheduler { true }; +}; + SchedulerData* g_scheduler_data; timeval g_timeofday; RecursiveSpinLock g_scheduler_lock; @@ -325,6 +336,7 @@ void Scheduler::start() g_scheduler_lock.lock(); auto& processor = Processor::current(); + processor.set_scheduler_data(*new SchedulerPerProcessorData()); ASSERT(processor.is_initialized()); auto& idle_thread = *processor.idle_thread(); ASSERT(processor.current_thread() == &idle_thread); @@ -349,6 +361,20 @@ bool Scheduler::pick_next() auto now_sec = now.tv_sec; auto now_usec = now.tv_usec; + // Set the m_in_scheduler flag before acquiring the spinlock. This + // prevents a recursive call into Scheduler::invoke_async upon + // leaving the scheduler lock. + ScopedCritical critical; + Processor::current().get_scheduler_data().m_in_scheduler = true; + ScopeGuard guard( + []() { + // We may be on a different processor after we got switched + // back to this thread! + auto& scheduler_data = Processor::current().get_scheduler_data(); + ASSERT(scheduler_data.m_in_scheduler); + scheduler_data.m_in_scheduler = false; + }); + ScopedSpinLock lock(g_scheduler_lock); // Check and unblock threads whose wait conditions have been met. @@ -455,6 +481,10 @@ bool Scheduler::pick_next() dbg() << "Scheduler[" << Processor::current().id() << "]: Switch to " << *thread_to_schedule << " @ " << String::format("%04x:%08x", thread_to_schedule->tss().cs, thread_to_schedule->tss().eip); #endif + // We need to leave our first critical section before switching context, + // but since we're still holding the scheduler lock we're still in a critical section + critical.leave(); + return context_switch(thread_to_schedule); } @@ -462,6 +492,7 @@ bool Scheduler::yield() { InterruptDisabler disabler; auto& proc = Processor::current(); + auto current_thread = Thread::current(); #ifdef SCHEDULER_DEBUG dbg() << "Scheduler[" << proc.id() << "]: yielding thread " << *current_thread << " in_irq: " << proc.in_irq(); @@ -473,18 +504,35 @@ bool Scheduler::yield() // delay until exiting the trap or critical section proc.invoke_scheduler_async(); return false; - } else if (!Scheduler::pick_next()) + } + + if (!Scheduler::pick_next()) return false; #ifdef SCHEDULER_DEBUG - dbg() << "Scheduler[" << proc.id() << "]: yield returns to thread " << *current_thread << " in_irq: " << proc.in_irq(); + dbg() << "Scheduler[" << Processor::current().id() << "]: yield returns to thread " << *current_thread << " in_irq: " << Processor::current().in_irq(); #endif return true; } bool Scheduler::donate_to(Thread* beneficiary, const char* reason) { - ScopedSpinLock lock(g_scheduler_lock); + // Set the m_in_scheduler flag before acquiring the spinlock. This + // prevents a recursive call into Scheduler::invoke_async upon + // leaving the scheduler lock. + ScopedCritical critical; auto& proc = Processor::current(); + proc.get_scheduler_data().m_in_scheduler = true; + ScopeGuard guard( + []() { + // We may be on a different processor after we got switched + // back to this thread! + auto& scheduler_data = Processor::current().get_scheduler_data(); + ASSERT(scheduler_data.m_in_scheduler); + scheduler_data.m_in_scheduler = false; + }); + + ScopedSpinLock lock(g_scheduler_lock); + ASSERT(!proc.in_irq()); if (!Thread::is_thread(beneficiary)) return false; @@ -564,6 +612,39 @@ void Scheduler::enter_current(Thread& prev_thread) } } +void Scheduler::leave_on_first_switch(u32 flags) +{ + // This is called when a thread is swiched into for the first time. + // At this point, enter_current has already be called, but because + // Scheduler::context_switch is not in the call stack we need to + // clean up and release locks manually here + g_scheduler_lock.unlock(flags); + auto& scheduler_data = Processor::current().get_scheduler_data(); + ASSERT(scheduler_data.m_in_scheduler); + scheduler_data.m_in_scheduler = false; +} + +void Scheduler::prepare_after_exec() +{ + // This is called after exec() when doing a context "switch" into + // the new process. This is called from Processor::assume_context + ASSERT(g_scheduler_lock.own_lock()); + auto& scheduler_data = Processor::current().get_scheduler_data(); + ASSERT(!scheduler_data.m_in_scheduler); + scheduler_data.m_in_scheduler = true; +} + +void Scheduler::prepare_for_idle_loop() +{ + // This is called when the CPU finished setting up the idle loop + // and is about to run it. We need to acquire he scheduler lock + ASSERT(!g_scheduler_lock.own_lock()); + g_scheduler_lock.lock(); + auto& scheduler_data = Processor::current().get_scheduler_data(); + ASSERT(!scheduler_data.m_in_scheduler); + scheduler_data.m_in_scheduler = true; +} + Process* Scheduler::colonel() { ASSERT(s_colonel_process); @@ -646,8 +727,14 @@ void Scheduler::timer_tick(const RegisterState& regs) void Scheduler::invoke_async() { ASSERT_INTERRUPTS_DISABLED(); - ASSERT(!Processor::current().in_irq()); - pick_next(); + auto& proc = Processor::current(); + ASSERT(!proc.in_irq()); + + // Since this function is called when leaving critical sections (such + // as a SpinLock), we need to check if we're not already doing this + // to prevent recursion + if (!proc.get_scheduler_data().m_in_scheduler) + pick_next(); } void Scheduler::notify_finalizer() diff --git a/Kernel/Scheduler.h b/Kernel/Scheduler.h index 4b5588fc93d912..5474ef8db19a0d 100644 --- a/Kernel/Scheduler.h +++ b/Kernel/Scheduler.h @@ -62,6 +62,9 @@ class Scheduler { static bool donate_to(Thread*, const char* reason); static bool context_switch(Thread*); static void enter_current(Thread& prev_thread); + static void leave_on_first_switch(u32 flags); + static void prepare_after_exec(); + static void prepare_for_idle_loop(); static Process* colonel(); static void beep(); static void idle_loop();