diff --git a/Kernel/Thread.h b/Kernel/Thread.h index 6dea7b95ec3425..af2cbcb5769a29 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -710,6 +710,7 @@ class Thread template [[nodiscard]] BlockResult block(const BlockTimeout& timeout, Args&&... args) { + ScopedSpinLock scheduler_lock(g_scheduler_lock); ScopedSpinLock lock(m_lock); // We need to hold m_lock so that nobody can unblock a blocker as soon // as it is constructed and registered elsewhere @@ -718,7 +719,6 @@ class Thread bool did_timeout = false; RefPtr timer; { - ScopedSpinLock scheduler_lock(g_scheduler_lock); // We should never be blocking a blocked (or otherwise non-active) thread. ASSERT(state() == Thread::Running); ASSERT(m_blocker == nullptr); @@ -762,15 +762,16 @@ class Thread } lock.unlock(); + scheduler_lock.unlock(); // Yield to the scheduler, and wait for us to resume unblocked. yield_without_holding_big_lock(); + scheduler_lock.lock(); lock.lock(); bool is_stopped = false; { - ScopedSpinLock scheduler_lock(g_scheduler_lock); if (t.was_interrupted_by_signal()) dispatch_one_pending_signal(); @@ -787,17 +788,21 @@ class Thread did_timeout = true; } + // Notify the blocker that we are no longer blocking. It may need + // to clean up now while we're still holding m_lock + auto result = t.end_blocking({}, did_timeout); // calls was_unblocked internally + if (timer && !did_timeout) { // Cancel the timer while not holding any locks. This allows // the timer function to complete before we remove it // (e.g. if it's on another processor) + lock.unlock(); + scheduler_lock.unlock(); TimerQueue::the().cancel_timer(timer.release_nonnull()); + } else { + scheduler_lock.unlock(); } - // Notify the blocker that we are no longer blocking. It may need - // to clean up now while we're still holding m_lock - auto result = t.end_blocking({}, did_timeout); // calls was_unblocked internally - if (is_stopped) { // If we're stopped we need to yield yield_without_holding_big_lock(); diff --git a/Kernel/TimerQueue.cpp b/Kernel/TimerQueue.cpp index 229691def6ea56..3a7bbb9feb2ce8 100644 --- a/Kernel/TimerQueue.cpp +++ b/Kernel/TimerQueue.cpp @@ -61,7 +61,7 @@ RefPtr TimerQueue::add_timer_without_id(const timespec& deadline, Functio auto timer = adopt(*new Timer(time_to_ticks(deadline), move(callback))); ScopedSpinLock lock(g_timerqueue_lock); - timer->id = 0; // Don't generate a timer id + timer->m_id = 0; // Don't generate a timer id add_timer_locked(timer); return timer; } @@ -70,31 +70,38 @@ TimerId TimerQueue::add_timer(NonnullRefPtr&& timer) { ScopedSpinLock lock(g_timerqueue_lock); - timer->id = ++m_timer_id_count; - ASSERT(timer->id != 0); // wrapped + timer->m_id = ++m_timer_id_count; + ASSERT(timer->m_id != 0); // wrapped add_timer_locked(move(timer)); return m_timer_id_count; } void TimerQueue::add_timer_locked(NonnullRefPtr timer) { - u64 timer_expiration = timer->expires; + u64 timer_expiration = timer->m_expires; ASSERT(timer_expiration >= time_to_ticks(TimeManagement::the().monotonic_time())); + ASSERT(!timer->is_queued()); + if (m_timer_queue.is_empty()) { - m_timer_queue.append(move(timer)); + m_timer_queue.append(&timer.leak_ref()); m_next_timer_due = timer_expiration; } else { - auto following_timer = m_timer_queue.find([&timer_expiration](auto& other) { return other->expires > timer_expiration; }); - - if (following_timer.is_end()) { - m_timer_queue.append(move(timer)); - } else { - auto next_timer_needs_update = following_timer.is_begin(); - m_timer_queue.insert_before(following_timer, move(timer)); - + Timer* following_timer = nullptr; + m_timer_queue.for_each([&](Timer& t) { + if (t.m_expires > timer_expiration) { + following_timer = &t; + return IterationDecision::Break; + } + return IterationDecision::Continue; + }); + if (following_timer) { + bool next_timer_needs_update = m_timer_queue.head() == following_timer; + m_timer_queue.insert_before(following_timer, &timer.leak_ref()); if (next_timer_needs_update) m_next_timer_due = timer_expiration; + } else { + m_timer_queue.append(&timer.leak_ref()); } } } @@ -125,51 +132,106 @@ u64 TimerQueue::time_to_ticks(const timespec& tspec) const bool TimerQueue::cancel_timer(TimerId id) { ScopedSpinLock lock(g_timerqueue_lock); - auto it = m_timer_queue.find([id](auto& timer) { return timer->id == id; }); - if (it.is_end()) + Timer* found_timer = nullptr; + if (m_timer_queue.for_each([&](Timer& timer) { + if (timer.m_id == id) { + found_timer = &timer; + return IterationDecision::Break; + } + return IterationDecision::Continue; + }) + != IterationDecision::Break) { + // The timer may be executing right now, if it is then it should + // be in m_timers_executing. If it is then release the lock + // briefly to allow it to finish by removing itself + // NOTE: This can only happen with multiple processors! + while (m_timers_executing.for_each([&](Timer& timer) { + if (timer.m_id == id) + return IterationDecision::Break; + return IterationDecision::Continue; + }) == IterationDecision::Break) { + // NOTE: This isn't the most efficient way to wait, but + // it should only happen when multiple processors are used. + // Also, the timers should execute pretty quickly, so it + // should not loop here for very long. But we can't yield. + lock.unlock(); + Processor::wait_check(); + lock.lock(); + } + // We were not able to cancel the timer, but at this point + // the handler should have completed if it was running! return false; + } + + ASSERT(found_timer); + bool was_next_timer = (m_timer_queue.head() == found_timer); - auto was_next_timer = it.is_begin(); - m_timer_queue.remove(it); + m_timer_queue.remove(found_timer); + found_timer->set_queued(false); if (was_next_timer) update_next_timer_due(); + lock.unlock(); + found_timer->unref(); return true; } -bool TimerQueue::cancel_timer(const NonnullRefPtr& timer) +bool TimerQueue::cancel_timer(Timer& timer) { ScopedSpinLock lock(g_timerqueue_lock); - auto it = m_timer_queue.find([timer](auto& t) { return t.ptr() == timer.ptr(); }); - if (it.is_end()) + if (!m_timer_queue.contains_slow(&timer)) { + // The timer may be executing right now, if it is then it should + // be in m_timers_executing. If it is then release the lock + // briefly to allow it to finish by removing itself + // NOTE: This can only happen with multiple processors! + while (m_timers_executing.contains_slow(&timer)) { + // NOTE: This isn't the most efficient way to wait, but + // it should only happen when multiple processors are used. + // Also, the timers should execute pretty quickly, so it + // should not loop here for very long. But we can't yield. + lock.unlock(); + Processor::wait_check(); + lock.lock(); + } + // We were not able to cancel the timer, but at this point + // the handler should have completed if it was running! return false; + } - auto was_next_timer = it.is_begin(); - m_timer_queue.remove(it); + bool was_next_timer = (m_timer_queue.head() == &timer); + m_timer_queue.remove(&timer); + timer.set_queued(false); if (was_next_timer) update_next_timer_due(); - return true; } void TimerQueue::fire() { ScopedSpinLock lock(g_timerqueue_lock); - if (m_timer_queue.is_empty()) + auto* timer = m_timer_queue.head(); + if (!timer) return; - ASSERT(m_next_timer_due == m_timer_queue.first()->expires); + ASSERT(m_next_timer_due == timer->m_expires); - while (!m_timer_queue.is_empty() && TimeManagement::the().monotonic_ticks() > m_timer_queue.first()->expires) { - auto timer = m_timer_queue.take_first(); + while (timer && TimeManagement::the().monotonic_ticks() > timer->m_expires) { + m_timer_queue.remove(timer); + m_timers_executing.append(timer); update_next_timer_due(); lock.unlock(); - timer->callback(); + timer->m_callback(); lock.lock(); + + m_timers_executing.remove(timer); + timer->set_queued(false); + timer->unref(); + + timer = m_timer_queue.head(); } } @@ -177,10 +239,10 @@ void TimerQueue::update_next_timer_due() { ASSERT(g_timerqueue_lock.is_locked()); - if (m_timer_queue.is_empty()) - m_next_timer_due = 0; + if (auto* next_timer = m_timer_queue.head()) + m_next_timer_due = next_timer->m_expires; else - m_next_timer_due = m_timer_queue.first()->expires; + m_next_timer_due = 0; } } diff --git a/Kernel/TimerQueue.h b/Kernel/TimerQueue.h index 920486c7095a58..a6c3ae335643db 100644 --- a/Kernel/TimerQueue.h +++ b/Kernel/TimerQueue.h @@ -27,41 +27,59 @@ #pragma once #include +#include #include #include #include -#include #include namespace Kernel { typedef u64 TimerId; -struct Timer : public RefCounted { - TimerId id; - u64 expires; - Function callback; +class Timer : public RefCounted + , public InlineLinkedListNode { + friend class TimerQueue; + friend class InlineLinkedListNode; + +public: + Timer(u64 expires, Function&& callback) + : m_expires(expires) + , m_callback(move(callback)) + { + } + ~Timer() + { + ASSERT(!is_queued()); + } + +private: + TimerId m_id; + u64 m_expires; + Function m_callback; + Timer* m_next { nullptr }; + Timer* m_prev { nullptr }; + Atomic m_queued { false }; + bool operator<(const Timer& rhs) const { - return expires < rhs.expires; + return m_expires < rhs.m_expires; } bool operator>(const Timer& rhs) const { - return expires > rhs.expires; + return m_expires > rhs.m_expires; } bool operator==(const Timer& rhs) const { - return id == rhs.id; - } - - Timer(u64 expires, Function&& callback) - : expires(expires) - , callback(move(callback)) - { + return m_id == rhs.m_id; } + bool is_queued() const { return m_queued.load(AK::MemoryOrder::memory_order_relaxed); } + void set_queued(bool queued) { m_queued.store(queued, AK::MemoryOrder::memory_order_relaxed); } }; class TimerQueue { + friend class Timer; + public: TimerQueue(); static TimerQueue& the(); @@ -70,10 +88,14 @@ class TimerQueue { RefPtr add_timer_without_id(const timespec& timeout, Function&& callback); TimerId add_timer(timeval& timeout, Function&& callback); bool cancel_timer(TimerId id); - bool cancel_timer(const NonnullRefPtr&); + bool cancel_timer(NonnullRefPtr&& timer) + { + return cancel_timer(timer.leak_ref()); + } void fire(); private: + bool cancel_timer(Timer&); void update_next_timer_due(); void add_timer_locked(NonnullRefPtr); @@ -83,7 +105,8 @@ class TimerQueue { u64 m_next_timer_due { 0 }; u64 m_timer_id_count { 0 }; u64 m_ticks_per_second { 0 }; - SinglyLinkedList> m_timer_queue; + InlineLinkedList m_timer_queue; + InlineLinkedList m_timers_executing; }; }