Skip to content

Commit

Permalink
Kernel: Move "in-scheduler" flag from SchedulerData to Processor
Browse files Browse the repository at this point in the history
This avoids a race between getting the processor-specific SchedulerData
and accessing it. (Switching to a different CPU in that window means
that we're operating on the wrong SchedulerData.)

Co-authored-by: Tom <[email protected]>
  • Loading branch information
awesomekling and tomuta committed Aug 29, 2021
1 parent 249d6a4 commit d9da513
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 22 deletions.
12 changes: 11 additions & 1 deletion Kernel/Arch/x86/Processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ struct ProcessorMessageEntry;

enum class ProcessorSpecificDataID {
MemoryManager,
Scheduler,
__Count,
};

Expand Down Expand Up @@ -138,6 +137,7 @@ class Processor {

bool m_invoke_scheduler_async;
bool m_scheduler_initialized;
bool m_in_scheduler { true };
Atomic<bool> m_halt_requested;

DeferredCallEntry* m_pending_deferred_calls; // in reverse order
Expand Down Expand Up @@ -344,6 +344,16 @@ class Processor {
write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), in_critical() + 1);
}

ALWAYS_INLINE static bool current_in_scheduler()
{
return read_gs_value<decltype(m_in_scheduler)>(__builtin_offsetof(Processor, m_in_scheduler));
}

ALWAYS_INLINE static void set_current_in_scheduler(bool value)
{
write_gs_value<decltype(m_in_scheduler)>(__builtin_offsetof(Processor, m_in_scheduler), value);
}

private:
ALWAYS_INLINE void do_leave_critical()
{
Expand Down
34 changes: 13 additions & 21 deletions Kernel/Scheduler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,6 @@

namespace Kernel {

struct SchedulerData {
static ProcessorSpecificDataID processor_specific_data_id() { return ProcessorSpecificDataID::Scheduler; }

bool in_scheduler { true };
};

RecursiveSpinlock g_scheduler_lock;

static u32 time_slice_for(const Thread& thread)
Expand Down Expand Up @@ -195,7 +189,6 @@ UNMAP_AFTER_INIT void Scheduler::start()
g_scheduler_lock.lock();

auto& processor = Processor::current();
ProcessorSpecific<SchedulerData>::initialize();
VERIFY(processor.is_initialized());
auto& idle_thread = *Processor::idle_thread();
VERIFY(processor.current_thread() == &idle_thread);
Expand All @@ -217,14 +210,13 @@ bool Scheduler::pick_next()
// prevents a recursive call into Scheduler::invoke_async upon
// leaving the scheduler lock.
ScopedCritical critical;
ProcessorSpecific<SchedulerData>::get().in_scheduler = true;
Processor::set_current_in_scheduler(true);
ScopeGuard guard(
[]() {
// We may be on a different processor after we got switched
// back to this thread!
auto& scheduler_data = ProcessorSpecific<SchedulerData>::get();
VERIFY(scheduler_data.in_scheduler);
scheduler_data.in_scheduler = false;
VERIFY(Processor::current_in_scheduler());
Processor::set_current_in_scheduler(false);
});

SpinlockLocker lock(g_scheduler_lock);
Expand Down Expand Up @@ -361,19 +353,19 @@ void Scheduler::leave_on_first_switch(u32 flags)
// Scheduler::context_switch is not in the call stack we need to
// clean up and release locks manually here
g_scheduler_lock.unlock(flags);
auto& scheduler_data = ProcessorSpecific<SchedulerData>::get();
VERIFY(scheduler_data.in_scheduler);
scheduler_data.in_scheduler = false;

VERIFY(Processor::current_in_scheduler());
Processor::set_current_in_scheduler(false);
}

void Scheduler::prepare_after_exec()
{
// This is called after exec() when doing a context "switch" into
// the new process. This is called from Processor::assume_context
VERIFY(g_scheduler_lock.own_lock());
auto& scheduler_data = ProcessorSpecific<SchedulerData>::get();
VERIFY(!scheduler_data.in_scheduler);
scheduler_data.in_scheduler = true;

VERIFY(!Processor::current_in_scheduler());
Processor::set_current_in_scheduler(true);
}

void Scheduler::prepare_for_idle_loop()
Expand All @@ -382,9 +374,9 @@ void Scheduler::prepare_for_idle_loop()
// and is about to run it. We need to acquire he scheduler lock
VERIFY(!g_scheduler_lock.own_lock());
g_scheduler_lock.lock();
auto& scheduler_data = ProcessorSpecific<SchedulerData>::get();
VERIFY(!scheduler_data.in_scheduler);
scheduler_data.in_scheduler = true;

VERIFY(!Processor::current_in_scheduler());
Processor::set_current_in_scheduler(true);
}

Process* Scheduler::colonel()
Expand Down Expand Up @@ -517,7 +509,7 @@ void Scheduler::invoke_async()
// Since this function is called when leaving critical sections (such
// as a Spinlock), we need to check if we're not already doing this
// to prevent recursion
if (!ProcessorSpecific<SchedulerData>::get().in_scheduler)
if (!Processor::current_in_scheduler())
pick_next();
}

Expand Down

0 comments on commit d9da513

Please sign in to comment.