Skip to content

Commit

Permalink
Kernel: TimerQueue::cancel_timer needs to wait if timer is executing
Browse files Browse the repository at this point in the history
We need to be able to guarantee that a timer won't be executing after
TimerQueue::cancel_timer returns. In the case of multiple processors
this means that we may need to wait while the timer handler finishes
execution on another core.

This also fixes a problem in Thread::block and Thread::wait_on where
theoretically the timer could execute after the function returned
and the Thread disappeared.
  • Loading branch information
tomuta authored and awesomekling committed Dec 2, 2020
1 parent c6230b7 commit 601a688
Show file tree
Hide file tree
Showing 3 changed files with 143 additions and 53 deletions.
17 changes: 11 additions & 6 deletions Kernel/Thread.h
Original file line number Diff line number Diff line change
Expand Up @@ -710,6 +710,7 @@ class Thread
template<typename T, class... Args>
[[nodiscard]] BlockResult block(const BlockTimeout& timeout, Args&&... args)
{
ScopedSpinLock scheduler_lock(g_scheduler_lock);
ScopedSpinLock lock(m_lock);
// We need to hold m_lock so that nobody can unblock a blocker as soon
// as it is constructed and registered elsewhere
Expand All @@ -718,7 +719,6 @@ class Thread
bool did_timeout = false;
RefPtr<Timer> timer;
{
ScopedSpinLock scheduler_lock(g_scheduler_lock);
// We should never be blocking a blocked (or otherwise non-active) thread.
ASSERT(state() == Thread::Running);
ASSERT(m_blocker == nullptr);
Expand Down Expand Up @@ -762,15 +762,16 @@ class Thread
}

lock.unlock();
scheduler_lock.unlock();

// Yield to the scheduler, and wait for us to resume unblocked.
yield_without_holding_big_lock();

scheduler_lock.lock();
lock.lock();

bool is_stopped = false;
{
ScopedSpinLock scheduler_lock(g_scheduler_lock);
if (t.was_interrupted_by_signal())
dispatch_one_pending_signal();

Expand All @@ -787,17 +788,21 @@ class Thread
did_timeout = true;
}

// Notify the blocker that we are no longer blocking. It may need
// to clean up now while we're still holding m_lock
auto result = t.end_blocking({}, did_timeout); // calls was_unblocked internally

if (timer && !did_timeout) {
// Cancel the timer while not holding any locks. This allows
// the timer function to complete before we remove it
// (e.g. if it's on another processor)
lock.unlock();
scheduler_lock.unlock();
TimerQueue::the().cancel_timer(timer.release_nonnull());
} else {
scheduler_lock.unlock();
}

// Notify the blocker that we are no longer blocking. It may need
// to clean up now while we're still holding m_lock
auto result = t.end_blocking({}, did_timeout); // calls was_unblocked internally

if (is_stopped) {
// If we're stopped we need to yield
yield_without_holding_big_lock();
Expand Down
124 changes: 93 additions & 31 deletions Kernel/TimerQueue.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ RefPtr<Timer> TimerQueue::add_timer_without_id(const timespec& deadline, Functio
auto timer = adopt(*new Timer(time_to_ticks(deadline), move(callback)));

ScopedSpinLock lock(g_timerqueue_lock);
timer->id = 0; // Don't generate a timer id
timer->m_id = 0; // Don't generate a timer id
add_timer_locked(timer);
return timer;
}
Expand All @@ -70,31 +70,38 @@ TimerId TimerQueue::add_timer(NonnullRefPtr<Timer>&& timer)
{
ScopedSpinLock lock(g_timerqueue_lock);

timer->id = ++m_timer_id_count;
ASSERT(timer->id != 0); // wrapped
timer->m_id = ++m_timer_id_count;
ASSERT(timer->m_id != 0); // wrapped
add_timer_locked(move(timer));
return m_timer_id_count;
}

void TimerQueue::add_timer_locked(NonnullRefPtr<Timer> timer)
{
u64 timer_expiration = timer->expires;
u64 timer_expiration = timer->m_expires;
ASSERT(timer_expiration >= time_to_ticks(TimeManagement::the().monotonic_time()));

ASSERT(!timer->is_queued());

if (m_timer_queue.is_empty()) {
m_timer_queue.append(move(timer));
m_timer_queue.append(&timer.leak_ref());
m_next_timer_due = timer_expiration;
} else {
auto following_timer = m_timer_queue.find([&timer_expiration](auto& other) { return other->expires > timer_expiration; });

if (following_timer.is_end()) {
m_timer_queue.append(move(timer));
} else {
auto next_timer_needs_update = following_timer.is_begin();
m_timer_queue.insert_before(following_timer, move(timer));

Timer* following_timer = nullptr;
m_timer_queue.for_each([&](Timer& t) {
if (t.m_expires > timer_expiration) {
following_timer = &t;
return IterationDecision::Break;
}
return IterationDecision::Continue;
});
if (following_timer) {
bool next_timer_needs_update = m_timer_queue.head() == following_timer;
m_timer_queue.insert_before(following_timer, &timer.leak_ref());
if (next_timer_needs_update)
m_next_timer_due = timer_expiration;
} else {
m_timer_queue.append(&timer.leak_ref());
}
}
}
Expand Down Expand Up @@ -125,62 +132,117 @@ u64 TimerQueue::time_to_ticks(const timespec& tspec) const
bool TimerQueue::cancel_timer(TimerId id)
{
ScopedSpinLock lock(g_timerqueue_lock);
auto it = m_timer_queue.find([id](auto& timer) { return timer->id == id; });
if (it.is_end())
Timer* found_timer = nullptr;
if (m_timer_queue.for_each([&](Timer& timer) {
if (timer.m_id == id) {
found_timer = &timer;
return IterationDecision::Break;
}
return IterationDecision::Continue;
})
!= IterationDecision::Break) {
// The timer may be executing right now, if it is then it should
// be in m_timers_executing. If it is then release the lock
// briefly to allow it to finish by removing itself
// NOTE: This can only happen with multiple processors!
while (m_timers_executing.for_each([&](Timer& timer) {
if (timer.m_id == id)
return IterationDecision::Break;
return IterationDecision::Continue;
}) == IterationDecision::Break) {
// NOTE: This isn't the most efficient way to wait, but
// it should only happen when multiple processors are used.
// Also, the timers should execute pretty quickly, so it
// should not loop here for very long. But we can't yield.
lock.unlock();
Processor::wait_check();
lock.lock();
}
// We were not able to cancel the timer, but at this point
// the handler should have completed if it was running!
return false;
}

ASSERT(found_timer);
bool was_next_timer = (m_timer_queue.head() == found_timer);

auto was_next_timer = it.is_begin();
m_timer_queue.remove(it);
m_timer_queue.remove(found_timer);
found_timer->set_queued(false);

if (was_next_timer)
update_next_timer_due();

lock.unlock();
found_timer->unref();
return true;
}

bool TimerQueue::cancel_timer(const NonnullRefPtr<Timer>& timer)
bool TimerQueue::cancel_timer(Timer& timer)
{
ScopedSpinLock lock(g_timerqueue_lock);
auto it = m_timer_queue.find([timer](auto& t) { return t.ptr() == timer.ptr(); });
if (it.is_end())
if (!m_timer_queue.contains_slow(&timer)) {
// The timer may be executing right now, if it is then it should
// be in m_timers_executing. If it is then release the lock
// briefly to allow it to finish by removing itself
// NOTE: This can only happen with multiple processors!
while (m_timers_executing.contains_slow(&timer)) {
// NOTE: This isn't the most efficient way to wait, but
// it should only happen when multiple processors are used.
// Also, the timers should execute pretty quickly, so it
// should not loop here for very long. But we can't yield.
lock.unlock();
Processor::wait_check();
lock.lock();
}
// We were not able to cancel the timer, but at this point
// the handler should have completed if it was running!
return false;
}

auto was_next_timer = it.is_begin();
m_timer_queue.remove(it);
bool was_next_timer = (m_timer_queue.head() == &timer);
m_timer_queue.remove(&timer);
timer.set_queued(false);

if (was_next_timer)
update_next_timer_due();

return true;
}

void TimerQueue::fire()
{
ScopedSpinLock lock(g_timerqueue_lock);
if (m_timer_queue.is_empty())
auto* timer = m_timer_queue.head();
if (!timer)
return;

ASSERT(m_next_timer_due == m_timer_queue.first()->expires);
ASSERT(m_next_timer_due == timer->m_expires);

while (!m_timer_queue.is_empty() && TimeManagement::the().monotonic_ticks() > m_timer_queue.first()->expires) {
auto timer = m_timer_queue.take_first();
while (timer && TimeManagement::the().monotonic_ticks() > timer->m_expires) {
m_timer_queue.remove(timer);
m_timers_executing.append(timer);

update_next_timer_due();

lock.unlock();
timer->callback();
timer->m_callback();
lock.lock();

m_timers_executing.remove(timer);
timer->set_queued(false);
timer->unref();

timer = m_timer_queue.head();
}
}

void TimerQueue::update_next_timer_due()
{
ASSERT(g_timerqueue_lock.is_locked());

if (m_timer_queue.is_empty())
m_next_timer_due = 0;
if (auto* next_timer = m_timer_queue.head())
m_next_timer_due = next_timer->m_expires;
else
m_next_timer_due = m_timer_queue.first()->expires;
m_next_timer_due = 0;
}

}
55 changes: 39 additions & 16 deletions Kernel/TimerQueue.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,41 +27,59 @@
#pragma once

#include <AK/Function.h>
#include <AK/InlineLinkedList.h>
#include <AK/NonnullRefPtr.h>
#include <AK/OwnPtr.h>
#include <AK/RefCounted.h>
#include <AK/SinglyLinkedList.h>
#include <Kernel/Time/TimeManagement.h>

namespace Kernel {

typedef u64 TimerId;

struct Timer : public RefCounted<Timer> {
TimerId id;
u64 expires;
Function<void()> callback;
class Timer : public RefCounted<Timer>
, public InlineLinkedListNode<Timer> {
friend class TimerQueue;
friend class InlineLinkedListNode<Timer>;

public:
Timer(u64 expires, Function<void()>&& callback)
: m_expires(expires)
, m_callback(move(callback))
{
}
~Timer()
{
ASSERT(!is_queued());
}

private:
TimerId m_id;
u64 m_expires;
Function<void()> m_callback;
Timer* m_next { nullptr };
Timer* m_prev { nullptr };
Atomic<bool> m_queued { false };

bool operator<(const Timer& rhs) const
{
return expires < rhs.expires;
return m_expires < rhs.m_expires;
}
bool operator>(const Timer& rhs) const
{
return expires > rhs.expires;
return m_expires > rhs.m_expires;
}
bool operator==(const Timer& rhs) const
{
return id == rhs.id;
}

Timer(u64 expires, Function<void()>&& callback)
: expires(expires)
, callback(move(callback))
{
return m_id == rhs.m_id;
}
bool is_queued() const { return m_queued.load(AK::MemoryOrder::memory_order_relaxed); }
void set_queued(bool queued) { m_queued.store(queued, AK::MemoryOrder::memory_order_relaxed); }
};

class TimerQueue {
friend class Timer;

public:
TimerQueue();
static TimerQueue& the();
Expand All @@ -70,10 +88,14 @@ class TimerQueue {
RefPtr<Timer> add_timer_without_id(const timespec& timeout, Function<void()>&& callback);
TimerId add_timer(timeval& timeout, Function<void()>&& callback);
bool cancel_timer(TimerId id);
bool cancel_timer(const NonnullRefPtr<Timer>&);
bool cancel_timer(NonnullRefPtr<Timer>&& timer)
{
return cancel_timer(timer.leak_ref());
}
void fire();

private:
bool cancel_timer(Timer&);
void update_next_timer_due();
void add_timer_locked(NonnullRefPtr<Timer>);

Expand All @@ -83,7 +105,8 @@ class TimerQueue {
u64 m_next_timer_due { 0 };
u64 m_timer_id_count { 0 };
u64 m_ticks_per_second { 0 };
SinglyLinkedList<NonnullRefPtr<Timer>> m_timer_queue;
InlineLinkedList<Timer> m_timer_queue;
InlineLinkedList<Timer> m_timers_executing;
};

}

0 comments on commit 601a688

Please sign in to comment.