Commit f9c2b639 authored by Philippe Gerum's avatar Philippe Gerum
Browse files

evl/clock: group core tick handling code


Signed-off-by: default avatarPhilippe Gerum <rpm@xenomai.org>
parent 5effd51f
......@@ -20,6 +20,7 @@
struct evl_rq;
struct evl_timerbase;
struct clock_event_device;
struct evl_clock_gravity {
ktime_t irq;
......@@ -72,6 +73,8 @@ int evl_init_clock(struct evl_clock *clock,
int evl_init_slave_clock(struct evl_clock *clock,
struct evl_clock *master);
void evl_core_tick(struct clock_event_device *dummy);
void evl_announce_tick(struct evl_clock *clock);
void evl_adjust_timers(struct evl_clock *clock,
......
......@@ -272,30 +272,16 @@ static inline bool timer_needs_enqueuing(struct evl_timer *timer)
EVL_TIMER_RUNNING);
}
/* Announce a tick from a master clock. */
void evl_announce_tick(struct evl_clock *clock)
/* hard irqs off */
static void do_clock_tick(struct evl_clock *clock, struct evl_timerbase *tmb)
{
struct evl_rq *rq = this_evl_rq();
struct evl_timerbase *tmb;
struct evl_timer *timer;
struct evl_tqueue *tq;
struct evl_tnode *tn;
unsigned long flags;
ktime_t now;
#ifdef CONFIG_SMP
/*
* Some external clock devices may be global without any
* particular IRQ affinity, in which case the associated
* timers will be queued to CPU0.
*/
if (clock != &evl_mono_clock &&
!cpumask_test_cpu(evl_rq_cpu(rq), &clock->affinity))
tmb = evl_percpu_timers(clock, 0);
else
#endif
tmb = evl_this_cpu_timers(clock);
tq = &tmb->q;
raw_spin_lock_irqsave(&tmb->lock, flags);
......@@ -340,6 +326,48 @@ void evl_announce_tick(struct evl_clock *clock)
raw_spin_unlock_irqrestore(&tmb->lock, flags);
}
void evl_core_tick(struct clock_event_device *dummy) /* hard irqs off */
{
struct evl_rq *this_rq = this_evl_rq();
struct evl_timerbase *tmb;
if (EVL_WARN_ON_ONCE(CORE, !is_evl_cpu(evl_rq_cpu(this_rq))))
return;
tmb = evl_this_cpu_timers(&evl_mono_clock);
do_clock_tick(&evl_mono_clock, tmb);
/*
* If an EVL thread was preempted by this clock event, any
* transition to the root thread will cause a pending in-band
* tick to be propagated by evl_schedule() from
* exit_oob_irq(), so we may have to propagate the in-band
* tick immediately only if the root thread was preempted.
*/
if ((this_rq->lflags & RQ_TPROXY) && (this_rq->curr->state & T_ROOT))
evl_notify_proxy_tick(this_rq);
}
void evl_announce_tick(struct evl_clock *clock) /* hard irqs off */
{
struct evl_rq *this_rq = this_evl_rq();
struct evl_timerbase *tmb;
#ifdef CONFIG_SMP
/*
* Some external clock devices may be global without any
* particular IRQ affinity, in which case the associated
* timers will be queued to CPU0.
*/
if (!cpumask_test_cpu(evl_rq_cpu(this_rq), &clock->affinity))
tmb = evl_percpu_timers(clock, 0);
else
#endif
tmb = evl_this_cpu_timers(clock);
do_clock_tick(clock, tmb);
}
EXPORT_SYMBOL_GPL(evl_announce_tick);
void evl_stop_timers(struct evl_clock *clock)
......
......@@ -80,41 +80,6 @@ static int proxy_set_oneshot_stopped(struct clock_event_device *proxy_dev)
return 0;
}
/*
* This is our high-precision clock tick handler. We only have two
* possible callers, each of them may only run over a CPU which is a
* member of the real-time set:
*
* - our TIMER_OOB_IPI handler, such IPI is directed to members of our
* real-time CPU set exclusively.
*
* - our clock_event_handler() routine. The IRQ pipeline
* guarantees that such handler always runs over a CPU which is a
* member of the CPU set passed to enable_clock_devices() (i.e. our
* real-time CPU set).
*
* hard IRQs are off.
*/
static void clock_event_handler(struct clock_event_device *dummy)
{
struct evl_rq *this_rq = this_evl_rq();
if (EVL_WARN_ON_ONCE(CORE, !is_evl_cpu(evl_rq_cpu(this_rq))))
return;
evl_announce_tick(&evl_mono_clock);
/*
* If an EVL thread was preempted by this clock event, any
* transition to the root thread will cause a pending in-band
* tick to be propagated by evl_schedule() from
* exit_oob_irq(), so we may have to propagate the in-band
* tick immediately only if the root thread was preempted.
*/
if ((this_rq->lflags & RQ_TPROXY) && (this_rq->curr->state & T_ROOT))
evl_notify_proxy_tick(this_rq);
}
void evl_notify_proxy_tick(struct evl_rq *this_rq) /* hard IRQs off. */
{
/*
......@@ -131,7 +96,7 @@ void evl_notify_proxy_tick(struct evl_rq *this_rq) /* hard IRQs off. */
static irqreturn_t clock_ipi_handler(int irq, void *dev_id)
{
clock_event_handler(NULL);
evl_core_tick(NULL);
return IRQ_HANDLED;
}
......@@ -142,7 +107,7 @@ static void setup_proxy(struct clock_proxy_device *dev)
{
struct clock_event_device *proxy_dev = &dev->proxy_device;
dev->handle_oob_event = clock_event_handler;
dev->handle_oob_event = evl_core_tick;
proxy_dev->features |= CLOCK_EVT_FEAT_KTIME;
proxy_dev->set_next_ktime = proxy_set_next_ktime;
if (proxy_dev->set_state_oneshot_stopped)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment