Skip to content

Commit

Permalink
Kernel: Fix signal delivery
Browse files Browse the repository at this point in the history
When delivering urgent signals to the current thread
we need to check if we should be unblocked, and if not
we need to yield to another process.

We also need to make sure that we suppress context switches
during Process::exec() so that we don't clobber the registers
that it sets up (eip mainly) by a context switch. To be able
to do that we add the concept of a critical section, which are
similar to Process::m_in_irq but different in that they can be
requested at any time. Calls to Scheduler::yield and
Scheduler::donate_to will return instantly without triggering
a context switch, but the processor will then asynchronously
trigger a context switch once the critical section is left.
  • Loading branch information
tomuta authored and awesomekling committed Jul 3, 2020
1 parent a308b17 commit e373e5f
Show file tree
Hide file tree
Showing 12 changed files with 242 additions and 95 deletions.
61 changes: 49 additions & 12 deletions Kernel/Arch/i386/CPU.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -828,6 +828,10 @@ void Processor::early_initialize(u32 cpu)

m_cpu = cpu;
m_in_irq = 0;
m_in_critical = 0;

m_invoke_scheduler_async = false;
m_scheduler_initialized = false;

m_idle_thread = nullptr;
m_current_thread = nullptr;
Expand Down Expand Up @@ -961,9 +965,10 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
void Processor::switch_context(Thread* from_thread, Thread* to_thread)
{
ASSERT(!in_irq());
ASSERT(!m_in_critical);
ASSERT(is_kernel_mode());
#ifdef CONTEXT_SWITCH_DEBUG
dbg() << "switch_context --> switching out of: " << *from_thread;
dbg() << "switch_context --> switching out of: " << VirtualAddress(from_thread) << " " << *from_thread;
#endif

// Switch to new thread context, passing from_thread and to_thread
Expand Down Expand Up @@ -1006,7 +1011,7 @@ void Processor::switch_context(Thread* from_thread, Thread* to_thread)
[to_thread] "a" (to_thread)
);
#ifdef CONTEXT_SWITCH_DEBUG
dbg() << "switch_context <-- from " << *from_thread << " to " << *to_thread;
dbg() << "switch_context <-- from " << VirtualAddress(from_thread) << " " << *from_thread << " to " << VirtualAddress(to_thread) << " " << *to_thread;
#endif
}

Expand All @@ -1017,9 +1022,14 @@ extern "C" void context_first_init(Thread* from_thread, Thread* to_thread, TrapF
(void)from_thread;
(void)to_thread;
(void)trap;

ASSERT(to_thread == Thread::current());
#ifdef CONTEXT_SWITCH_DEBUG
dbg() << "switch_context <-- from " << *from_thread << " to " << *to_thread << " (context_first_init)";
dbg() << "switch_context <-- from " << VirtualAddress(from_thread) << " " << *from_thread << " to " << VirtualAddress(to_thread) << " " << *to_thread << " (context_first_init)";
#endif
if (to_thread->process().wait_for_tracer_at_next_execve()) {
to_thread->send_urgent_signal_to_self(SIGSTOP);
}
}

extern "C" void thread_context_first_enter(void);
Expand All @@ -1038,9 +1048,15 @@ asm(
" jmp common_trap_exit \n"
);

u32 Processor::init_context(Thread& thread)
u32 Processor::init_context(Thread& thread, bool leave_crit)
{
ASSERT(is_kernel_mode());
if (leave_crit) {
ASSERT(in_critical());
m_in_critical--; // leave it without triggering anything
ASSERT(!in_critical());
}

const u32 kernel_stack_top = thread.kernel_stack_top();
u32 stack_top = kernel_stack_top;

Expand Down Expand Up @@ -1098,7 +1114,10 @@ u32 Processor::init_context(Thread& thread)
*reinterpret_cast<u32*>(stack_top) = stack_top + 4;

#ifdef CONTEXT_SWITCH_DEBUG
dbg() << "init_context " << thread << " set up to execute at eip: " << VirtualAddress(tss.eip) << " esp: " << VirtualAddress(tss.esp) << " stack top: " << VirtualAddress(stack_top);
if (return_to_user)
dbg() << "init_context " << thread << " (" << VirtualAddress(&thread) << ") set up to execute at eip: " << String::format("%02x:%08x", iretframe.cs, (u32)tss.eip) << " esp: " << VirtualAddress(tss.esp) << " stack top: " << VirtualAddress(stack_top) << " user esp: " << String::format("%02x:%08x", iretframe.userspace_ss, (u32)iretframe.userspace_esp);
else
dbg() << "init_context " << thread << " (" << VirtualAddress(&thread) << ") set up to execute at eip: " << String::format("%02x:%08x", iretframe.cs, (u32)tss.eip) << " esp: " << VirtualAddress(tss.esp) << " stack top: " << VirtualAddress(stack_top);
#endif

// make switch_context() always first return to thread_context_first_enter()
Expand All @@ -1118,34 +1137,43 @@ u32 Processor::init_context(Thread& thread)
}


extern "C" u32 do_init_context(Thread* thread)
extern "C" u32 do_init_context(Thread* thread, u32 flags)
{
return Processor::init_context(*thread);
ASSERT_INTERRUPTS_DISABLED();
ASSERT(Processor::current().in_critical());
thread->tss().eflags = flags;
return Processor::current().init_context(*thread, true);
}

extern "C" void do_assume_context(Thread* thread);
extern "C" void do_assume_context(Thread* thread, u32 flags);

asm(
".global do_assume_context \n"
"do_assume_context: \n"
" movl 4(%esp), %ebx \n"
" movl 8(%esp), %esi \n"
// We're going to call Processor::init_context, so just make sure
// we have enough stack space so we don't stomp over it
" subl $(" __STRINGIFY(4 + REGISTER_STATE_SIZE + TRAP_FRAME_SIZE + 4) "), %esp \n"
" pushl %esi \n"
" pushl %ebx \n"
" cld \n"
" call do_init_context \n"
" addl $4, %esp \n"
" addl $8, %esp \n"
" movl %eax, %esp \n" // move stack pointer to what Processor::init_context set up for us
" pushl %ebx \n" // push to_thread
" pushl %ebx \n" // push from_thread
" pushl $thread_context_first_enter \n" // should be same as tss.eip
" jmp enter_thread_context \n"
);

void Processor::assume_context(Thread& thread)
void Processor::assume_context(Thread& thread, u32 flags)
{
do_assume_context(&thread);
#ifdef CONTEXT_SWITCH_DEBUG
dbg() << "Assume context for thread " << VirtualAddress(&thread) << " " << thread;
#endif
ASSERT_INTERRUPTS_DISABLED();
do_assume_context(&thread, flags);
ASSERT_NOT_REACHED();
}

Expand All @@ -1161,6 +1189,7 @@ void Processor::initialize_context_switching(Thread& initial_thread)
m_tss.cs = m_tss.ds = m_tss.es = m_tss.gs = m_tss.ss = GDT_SELECTOR_CODE0 | 3;
m_tss.fs = GDT_SELECTOR_PROC | 3;

m_scheduler_initialized = true;

asm volatile(
"movl %[new_esp], %%esp \n" // swich to new stack
Expand Down Expand Up @@ -1197,7 +1226,15 @@ void Processor::exit_trap(TrapFrame& trap)
ASSERT(m_in_irq >= trap.prev_irq_level);
m_in_irq = trap.prev_irq_level;

if (m_invoke_scheduler_async && !m_in_irq) {
if (!m_in_irq && !m_in_critical)
check_invoke_scheduler();
}

void Processor::check_invoke_scheduler()
{
ASSERT(!m_in_irq);
ASSERT(!m_in_critical);
if (m_invoke_scheduler_async && m_scheduler_initialized) {
m_invoke_scheduler_async = false;
Scheduler::invoke_async();
}
Expand Down
70 changes: 68 additions & 2 deletions Kernel/Arch/i386/CPU.h
Original file line number Diff line number Diff line change
Expand Up @@ -622,6 +622,7 @@ class Processor {

u32 m_cpu;
u32 m_in_irq;
u32 m_in_critical;

TSS32 m_tss;
static FPUState s_clean_fpu_state;
Expand All @@ -632,6 +633,7 @@ class Processor {
Thread* m_idle_thread;

bool m_invoke_scheduler_async;
bool m_scheduler_initialized;

void gdt_init();
void write_raw_gdt_entry(u16 selector, u32 low, u32 high);
Expand Down Expand Up @@ -713,24 +715,88 @@ class Processor {
return m_in_irq;
}

ALWAYS_INLINE void enter_critical(u32& prev_flags)
{
m_in_critical++;
prev_flags = cpu_flags();
cli();
}

ALWAYS_INLINE void leave_critical(u32 prev_flags)
{
ASSERT(m_in_critical > 0);
if (--m_in_critical == 0) {
if (!m_in_irq)
check_invoke_scheduler();
}
if (prev_flags & 0x200)
sti();
}

ALWAYS_INLINE u32& in_critical() { return m_in_critical; }

ALWAYS_INLINE const FPUState& clean_fpu_state() const
{
return s_clean_fpu_state;
}

void check_invoke_scheduler();
void invoke_scheduler_async() { m_invoke_scheduler_async = true; }

void enter_trap(TrapFrame& trap, bool raise_irq);
void exit_trap(TrapFrame& trap);

[[noreturn]] void initialize_context_switching(Thread& initial_thread);
void switch_context(Thread* from_thread, Thread* to_thread);
[[noreturn]] static void assume_context(Thread& thread);
static u32 init_context(Thread& thread);
[[noreturn]] static void assume_context(Thread& thread, u32 flags);
u32 init_context(Thread& thread, bool leave_crit);

void set_thread_specific(u8* data, size_t len);
};

class ScopedCritical
{
u32 m_prev_flags;
bool m_valid;

public:
ScopedCritical(const ScopedCritical&) = delete;
ScopedCritical& operator=(const ScopedCritical&) = delete;

ScopedCritical()
{
m_valid = true;
Processor::current().enter_critical(m_prev_flags);
}

~ScopedCritical()
{
if (m_valid) {
m_valid = false;
Processor::current().leave_critical(m_prev_flags);
}
}

ScopedCritical(ScopedCritical&& from):
m_prev_flags(from.m_prev_flags),
m_valid(from.m_valid)
{
from.m_prev_flags = 0;
from.m_valid = false;
}

ScopedCritical& operator=(ScopedCritical&& from)
{
if (&from != this) {
m_prev_flags = from.m_prev_flags;
m_valid = from.m_valid;
from.m_prev_flags = 0;
from.m_valid = false;
}
return *this;
}
};

struct TrapFrame {
u32 prev_irq_level;
RegisterState* regs; // must be last
Expand Down
15 changes: 13 additions & 2 deletions Kernel/Lock.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/

#include <AK/TemporaryChange.h>
#include <Kernel/KSyms.h>
#include <Kernel/Lock.h>
#include <Kernel/Thread.h>
Expand Down Expand Up @@ -67,6 +68,13 @@ void Lock::lock(Mode mode)
}
timeval* timeout = nullptr;
current_thread->wait_on(m_queue, timeout, &m_lock, m_holder, m_name);
} else if (Processor::current().in_critical()) {
// If we're in a critical section and trying to lock, no context
// switch will happen, so yield.
// The assumption is that if we call this from a critical section
// that we DO want to temporarily leave it
TemporaryChange change(Processor::current().in_critical(), 0u);
Scheduler::yield();
}
}
}
Expand Down Expand Up @@ -95,14 +103,17 @@ void Lock::unlock()
return;
}
// I don't know *who* is using "m_lock", so just yield.
// The assumption is that if we call this from a critical section
// that we DO want to temporarily leave it
TemporaryChange change(Processor::current().in_critical(), 0u);
Scheduler::yield();
}
}

bool Lock::force_unlock_if_locked()
{
ASSERT(m_mode != Mode::Shared);
InterruptDisabler disabler;
ScopedCritical critical;
if (m_holder != Thread::current())
return false;
ASSERT(m_times_locked == 1);
Expand All @@ -116,7 +127,7 @@ bool Lock::force_unlock_if_locked()
void Lock::clear_waiters()
{
ASSERT(m_mode != Mode::Shared);
InterruptDisabler disabler;
ScopedCritical critical;
m_queue.clear();
}

Expand Down
Loading

0 comments on commit e373e5f

Please sign in to comment.