Skip to content

Commit

Permalink
Kernel: Push ARCH specific ifdef's down into RegisterState functions
Browse files Browse the repository at this point in the history
The non CPU specific code of the kernel shouldn't need to deal with
architecture specific registers, and should instead deal with an
abstract view of the machine. This allows us to remove a variety of
architecture specific ifdefs and helps keep the code slightly more
portable.

We do this by exposing the abstract representation of instruction
pointer, stack pointer, base pointer, return register, etc on the
RegisterState struct.
  • Loading branch information
bgianfo authored and gunnarbeutner committed Jul 19, 2021
1 parent ff8429a commit 1cffecb
Show file tree
Hide file tree
Showing 9 changed files with 119 additions and 97 deletions.
69 changes: 69 additions & 0 deletions Kernel/Arch/x86/RegisterState.h
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,75 @@ struct [[gnu::packed]] RegisterState {
FlatPtr userspace_rsp;
FlatPtr userspace_ss;
#endif

FlatPtr userspace_sp() const
{
#if ARCH(I386)
return userspace_esp;
#else
return userspace_rsp;
#endif
}

FlatPtr ip() const
{
#if ARCH(I386)
return eip;
#else
return rip;
#endif
}

FlatPtr bp() const
{
#if ARCH(I386)
return ebp;
#else
return rbp;
#endif
}

FlatPtr flags() const
{
#if ARCH(I386)
return eflags;
#else
return rflags;
#endif
}

void capture_syscall_params(FlatPtr& function, FlatPtr& arg1, FlatPtr& arg2, FlatPtr& arg3) const
{
#if ARCH(I386)
function = eax;
arg1 = edx;
arg2 = ecx;
arg3 = ebx;
#else
function = rax;
arg1 = rdx;
arg2 = rcx;
arg3 = rbx;
#endif
}

void set_ip_reg(FlatPtr value)
{
#if ARCH(I386)
eip = value;
#else
rip = value;
#endif
}

void set_return_reg(FlatPtr value)
{
#if ARCH(I386)
eax = value;
#else
rax = value;
#endif
}
};

#if ARCH(I386)
Expand Down
15 changes: 2 additions & 13 deletions Kernel/Arch/x86/common/Interrupts.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -228,13 +228,7 @@ void handle_crash(RegisterState& regs, const char* description, int signal, bool
PANIC("Crash in ring 0");
}

FlatPtr ip;
#if ARCH(I386)
ip = regs.eip;
#else
ip = regs.rip;
#endif
process->crash(signal, ip, out_of_memory);
process->crash(signal, regs.ip(), out_of_memory);
}

EH_ENTRY_NO_CODE(6, illegal_instruction);
Expand Down Expand Up @@ -316,12 +310,7 @@ void page_fault_handler(TrapFrame* trap)
current_thread->set_handling_page_fault(false);
};

VirtualAddress userspace_sp;
#if ARCH(I386)
userspace_sp = VirtualAddress { regs.userspace_esp };
#else
userspace_sp = VirtualAddress { regs.userspace_rsp };
#endif
VirtualAddress userspace_sp = VirtualAddress { regs.userspace_sp() };
if (!faulted_in_kernel && !MM.validate_user_stack(current_thread->process(), userspace_sp)) {
dbgln("Invalid stack pointer: {}", userspace_sp);
handle_crash(regs, "Bad stack on page fault", SIGSTKFLT);
Expand Down
24 changes: 5 additions & 19 deletions Kernel/Arch/x86/common/Processor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -546,14 +546,7 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
// to be ebp.
ProcessPagingScope paging_scope(thread.process());
auto& regs = thread.regs();
FlatPtr* stack_top;
FlatPtr sp;
#if ARCH(I386)
sp = regs.esp;
#else
sp = regs.rsp;
#endif
stack_top = reinterpret_cast<FlatPtr*>(sp);
FlatPtr* stack_top = reinterpret_cast<FlatPtr*>(regs.sp());
if (is_user_range(VirtualAddress(stack_top), sizeof(FlatPtr))) {
if (!copy_from_user(&frame_ptr, &((FlatPtr*)stack_top)[0]))
frame_ptr = 0;
Expand All @@ -562,11 +555,9 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
if (!safe_memcpy(&frame_ptr, &((FlatPtr*)stack_top)[0], sizeof(FlatPtr), fault_at))
frame_ptr = 0;
}
#if ARCH(I386)
ip = regs.eip;
#else
ip = regs.rip;
#endif

ip = regs.ip();

// TODO: We need to leave the scheduler lock here, but we also
// need to prevent the target thread from being run while
// we walk the stack
Expand Down Expand Up @@ -1222,12 +1213,7 @@ extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe
// the scheduler lock. We don't want to enable interrupts at this point
// as we're still in the middle of a context switch. Doing so could
// trigger a context switch within a context switch, leading to a crash.
FlatPtr flags;
#if ARCH(I386)
flags = trap->regs->eflags;
#else
flags = trap->regs->rflags;
#endif
FlatPtr flags = trap->regs->flags();
Scheduler::leave_on_first_switch(flags & ~0x200);
}

Expand Down
8 changes: 2 additions & 6 deletions Kernel/Arch/x86/common/SafeMem.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -261,12 +261,8 @@ NEVER_INLINE Optional<bool> safe_atomic_compare_exchange_relaxed(volatile u32* v

bool handle_safe_access_fault(RegisterState& regs, FlatPtr fault_address)
{
FlatPtr ip;
#if ARCH(I386)
ip = regs.eip;
#else
ip = regs.rip;
#endif
FlatPtr ip = regs.ip();
;
if (ip >= (FlatPtr)&start_of_safemem_text && ip < (FlatPtr)&end_of_safemem_text) {
// If we detect that the fault happened in safe_memcpy() safe_strnlen(),
// or safe_memset() then resume at the appropriate _faulted label
Expand Down
31 changes: 11 additions & 20 deletions Kernel/Scheduler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ bool Scheduler::pick_next()
dbgln("Scheduler[{}]: Switch to {} @ {:04x}:{:08x}",
Processor::id(),
thread_to_schedule,
thread_to_schedule.regs().cs, thread_to_schedule.regs().eip);
thread_to_schedule.regs().cs, thread_to_schedule.regs().ip());
#else
dbgln("Scheduler[{}]: Switch to {} @ {:04x}:{:016x}",
Processor::id(),
Expand Down Expand Up @@ -295,13 +295,16 @@ bool Scheduler::context_switch(Thread* thread)
from_thread->set_state(Thread::Runnable);

#ifdef LOG_EVERY_CONTEXT_SWITCH
const auto msg =
# if ARCH(I386)
dbgln("Scheduler[{}]: {} -> {} [prio={}] {:04x}:{:08x}", Processor::id(), from_thread->tid().value(),
thread->tid().value(), thread->priority(), thread->regs().cs, thread->regs().eip);
"Scheduler[{}]: {} -> {} [prio={}] {:04x}:{:08x}";
# else
dbgln("Scheduler[{}]: {} -> {} [prio={}] {:04x}:{:16x}", Processor::id(), from_thread->tid().value(),
thread->tid().value(), thread->priority(), thread->regs().cs, thread->regs().rip);
"Scheduler[{}]: {} -> {} [prio={}] {:04x}:{:16x}";
# endif

dbgln(msg,
Processor::id(), from_thread->tid().value(),
thread->tid().value(), thread->priority(), thread->regs().cs, thread->regs().ip());
#endif
}

Expand All @@ -322,14 +325,8 @@ bool Scheduler::context_switch(Thread* thread)
VERIFY(thread == Thread::current());

if (thread->process().is_user_process()) {
FlatPtr flags;
auto& regs = Thread::current()->get_register_dump_from_stack();
#if ARCH(I386)
flags = regs.eflags;
#else
flags = regs.rflags;
#endif
auto iopl = get_iopl_from_eflags(flags);
auto iopl = get_iopl_from_eflags(regs.flags());
if (iopl != 0) {
PANIC("Switched to thread {} with non-zero IOPL={}", Thread::current()->tid().value(), iopl);
}
Expand Down Expand Up @@ -604,15 +601,9 @@ void dump_thread_list(bool with_stack_traces)
};

auto get_eip = [](Thread& thread) -> u32 {
#if ARCH(I386)
if (!thread.current_trap())
return thread.regs().eip;
return thread.get_register_dump_from_stack().eip;
#else
if (!thread.current_trap())
return thread.regs().rip;
return thread.get_register_dump_from_stack().rip;
#endif
return thread.regs().ip();
return thread.get_register_dump_from_stack().ip();
};

Thread::for_each([&](Thread& thread) {
Expand Down
34 changes: 8 additions & 26 deletions Kernel/Syscall.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -107,11 +107,7 @@ KResultOr<FlatPtr> handle(RegisterState& regs, FlatPtr function, FlatPtr arg1, F
// These syscalls need special handling since they never return to the caller.

if (auto* tracer = process.tracer(); tracer && tracer->is_tracing_syscalls()) {
#if ARCH(I386)
regs.eax = 0;
#else
regs.rax = 0;
#endif
regs.set_return_reg(0);
tracer->set_trace_syscalls(false);
process.tracer_trap(*current_thread, regs); // this triggers SIGTRAP and stops the thread!
}
Expand Down Expand Up @@ -232,31 +228,17 @@ NEVER_INLINE void syscall_handler(TrapFrame* trap)
handle_crash(regs, "Syscall from non-syscall region", SIGSEGV);
}

#if ARCH(I386)
auto function = regs.eax;
auto arg1 = regs.edx;
auto arg2 = regs.ecx;
auto arg3 = regs.ebx;
#else
auto function = regs.rax;
auto arg1 = regs.rdx;
auto arg2 = regs.rcx;
auto arg3 = regs.rbx;
#endif
FlatPtr function;
FlatPtr arg1;
FlatPtr arg2;
FlatPtr arg3;
regs.capture_syscall_params(function, arg1, arg2, arg3);

auto result = Syscall::handle(regs, function, arg1, arg2, arg3);
if (result.is_error()) {
#if ARCH(I386)
regs.eax = result.error();
#else
regs.rax = result.error();
#endif
regs.set_return_reg(result.error());
} else {
#if ARCH(I386)
regs.eax = result.value();
#else
regs.rax = result.value();
#endif
regs.set_return_reg(result.value());
}

process.big_lock().unlock();
Expand Down
7 changes: 1 addition & 6 deletions Kernel/Syscalls/get_stack_bounds.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,7 @@ namespace Kernel {
KResultOr<FlatPtr> Process::sys$get_stack_bounds(Userspace<FlatPtr*> user_stack_base, Userspace<size_t*> user_stack_size)
{
auto& regs = Thread::current()->get_register_dump_from_stack();
FlatPtr stack_pointer;
#if ARCH(I386)
stack_pointer = regs.userspace_esp;
#else
stack_pointer = regs.userspace_rsp;
#endif
FlatPtr stack_pointer = regs.userspace_sp();
auto* stack_region = space().find_region_containing(Range { VirtualAddress(stack_pointer), 1 });

// The syscall handler should have killed us if we had an invalid stack pointer.
Expand Down
10 changes: 3 additions & 7 deletions Kernel/Thread.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1028,16 +1028,12 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
auto& regs = get_register_dump_from_stack();
setup_stack(regs);
auto signal_trampoline_addr = process.signal_trampoline().get();
#if ARCH(I386)
regs.eip = signal_trampoline_addr;
#else
regs.rip = signal_trampoline_addr;
#endif
regs.set_ip_reg(signal_trampoline_addr);

#if ARCH(I386)
dbgln_if(SIGNAL_DEBUG, "Thread in state '{}' has been primed with signal handler {:04x}:{:08x} to deliver {}", state_string(), m_regs.cs, m_regs.eip, signal);
dbgln_if(SIGNAL_DEBUG, "Thread in state '{}' has been primed with signal handler {:04x}:{:08x} to deliver {}", state_string(), m_regs.cs, m_regs.ip(), signal);
#else
dbgln_if(SIGNAL_DEBUG, "Thread in state '{}' has been primed with signal handler {:04x}:{:16x} to deliver {}", state_string(), m_regs.cs, m_regs.rip, signal);
dbgln_if(SIGNAL_DEBUG, "Thread in state '{}' has been primed with signal handler {:04x}:{:16x} to deliver {}", state_string(), m_regs.cs, m_regs.ip(), signal);
#endif

return DispatchSignalResult::Continue;
Expand Down
18 changes: 18 additions & 0 deletions Kernel/Thread.h
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,24 @@ struct ThreadRegisters {
FlatPtr rflags;
#endif
FlatPtr cr3;

FlatPtr ip() const
{
#if ARCH(I386)
return eip;
#else
return rip;
#endif
}

FlatPtr sp() const
{
#if ARCH(I386)
return esp;
#else
return rsp;
#endif
}
};

class Thread
Expand Down

0 comments on commit 1cffecb

Please sign in to comment.