tick.c 6.57 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
/*
 * SPDX-License-Identifier: GPL-2.0
 *
 * Copyright (C) 2016, 2018 Philippe Gerum  <rpm@xenomai.org>
 */

#include <linux/percpu.h>
#include <linux/cpumask.h>
#include <linux/clockchips.h>
#include <linux/tick.h>
#include <linux/irqdomain.h>
#include <linux/ktime.h>
#include <linux/kernel.h>
#include <linux/timekeeping.h>
#include <linux/irq_pipeline.h>
#include <linux/slab.h>
17
18
19
20
21
22
#include <evl/sched.h>
#include <evl/timer.h>
#include <evl/clock.h>
#include <evl/tick.h>
#include <evl/control.h>
#include <trace/events/evl.h>
23

24
static DEFINE_PER_CPU(struct clock_proxy_device *, proxy_tick_device);
25
26

static int proxy_set_next_ktime(ktime_t expires,
27
				struct clock_event_device *proxy_dev)
28
29
30
31
32
33
34
35
36
37
38
39
40
{
	struct evl_rq *rq;
	unsigned long flags;
	ktime_t delta;

	/*
	 * Negative delta have been observed. evl_start_timer()
	 * will trigger an immediate shot in such an event.
	 */
	delta = ktime_sub(expires, ktime_get());

	flags = hard_local_irq_save(); /* Prevent CPU migration. */
	rq = this_evl_rq();
41
42
	evl_start_timer(&rq->inband_timer,
			evl_abs_timeout(&rq->inband_timer, delta),
43
44
45
46
47
48
			EVL_INFINITE);
	hard_local_irq_restore(flags);

	return 0;
}

49
static int proxy_set_oneshot_stopped(struct clock_event_device *proxy_dev)
50
{
51
52
	struct clock_event_device *real_dev;
	struct clock_proxy_device *dev;
53
54
55
	unsigned long flags;
	struct evl_rq *rq;

56
57
	dev = container_of(proxy_dev, struct clock_proxy_device, proxy_device);

58
59
60
61
62
63
64
65
66
67
68
69
	/*
	 * In-band wants to disable the clock hardware on entering a
	 * tickless state, so we have to stop our in-band tick
	 * emulation. Propagate the request for shutting down the
	 * hardware to the real device only if we have no outstanding
	 * OOB timers. CAUTION: the in-band timer is counted when
	 * assessing the RQ_IDLE condition, so we need to stop it
	 * prior to testing the latter.
	 */
	flags = hard_local_irq_save();

	rq = this_evl_rq();
70
	evl_stop_timer(&rq->inband_timer);
Philippe Gerum's avatar
Philippe Gerum committed
71
	rq->local_flags |= RQ_TSTOPPED;
72

Philippe Gerum's avatar
Philippe Gerum committed
73
	if (rq->local_flags & RQ_IDLE) {
74
75
76
		real_dev = dev->real_device;
		real_dev->set_state_oneshot_stopped(real_dev);
	}
77
78
79
80
81
82
83
84
85
86

	hard_local_irq_restore(flags);

	return 0;
}

#ifdef CONFIG_SMP

static irqreturn_t clock_ipi_handler(int irq, void *dev_id)
{
87
	evl_core_tick(NULL);
88
89
90
91
92
93

	return IRQ_HANDLED;
}

#endif

94
95
96
97
static void setup_proxy(struct clock_proxy_device *dev)
{
	struct clock_event_device *proxy_dev = &dev->proxy_device;

98
	dev->handle_oob_event = evl_core_tick;
99
100
101
102
103
104
105
	proxy_dev->features |= CLOCK_EVT_FEAT_KTIME;
	proxy_dev->set_next_ktime = proxy_set_next_ktime;
	if (proxy_dev->set_state_oneshot_stopped)
		proxy_dev->set_state_oneshot_stopped = proxy_set_oneshot_stopped;

	__this_cpu_write(proxy_tick_device, dev);
}
106
107
108
109
110
111
112

int evl_enable_tick(void)
{
	int ret;

#ifdef CONFIG_SMP
	ret = __request_percpu_irq(TIMER_OOB_IPI,
113
				clock_ipi_handler,
114
				IRQF_OOB, "EVL timer IPI",
115
				&evl_machine_cpudata);
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
	if (ret)
		return ret;
#endif

	/*
	 * CAUTION:
	 *
	 * - EVL timers may be started only _after_ the proxy clock
	 * device has been set up for the target CPU.
	 *
	 * - do not hold any lock across calls to evl_enable_tick().
	 *
	 * - tick_install_proxy() guarantees that the real clock
	 * device supports oneshot mode, or fails.
	 */
131
	ret = tick_install_proxy(setup_proxy, &evl_oob_cpus);
132
133
134
135
136
137
138
139
140
141
142
143
144
	if (ret) {
#ifdef CONFIG_SMP
		free_percpu_irq(TIMER_OOB_IPI,
				&evl_machine_cpudata);
#endif
		return ret;
	}

	return 0;
}

void evl_disable_tick(void)
{
145
	tick_uninstall_proxy(&evl_oob_cpus);
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
#ifdef CONFIG_SMP
	free_percpu_irq(TIMER_OOB_IPI, &evl_machine_cpudata);
#endif
	/*
	 * When the kernel is swapping clock event devices on behalf
	 * of enable_clock_devices(), it may end up calling
	 * program_timer() via the synthetic device's
	 * ->set_next_event() handler for resuming the in-band timer.
	 * Therefore, no timer should remain queued before
	 * enable_clock_devices() is called, or unpleasant hangs may
	 * happen if the in-band timer is not at front of the
	 * queue. You have been warned.
	 */
	evl_stop_timers(&evl_mono_clock);
}

/* per-cpu timer queue locked. */
void evl_program_proxy_tick(struct evl_clock *clock)
{
165
166
	struct clock_proxy_device *dev = __this_cpu_read(proxy_tick_device);
	struct clock_event_device *real_dev = dev->real_device;
167
168
169
170
171
172
173
174
175
176
177
178
179
180
	struct evl_rq *this_rq = this_evl_rq();
	struct evl_timerbase *tmb;
	struct evl_timer *timer;
	struct evl_tnode *tn;
	int64_t delta;
	u64 cycles;
	ktime_t t;
	int ret;

	/*
	 * Do not reprogram locally when inside the tick handler -
	 * will be done on exit anyway. Also exit if there is no
	 * pending timer.
	 */
Philippe Gerum's avatar
Philippe Gerum committed
181
	if (this_rq->local_flags & RQ_TIMER)
182
183
184
185
186
		return;

	tmb = evl_this_cpu_timers(clock);
	tn = evl_get_tqueue_head(&tmb->q);
	if (tn == NULL) {
Philippe Gerum's avatar
Philippe Gerum committed
187
		this_rq->local_flags |= RQ_IDLE;
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
		return;
	}

	/*
	 * Try to defer the next in-band tick, so that it does not
	 * preempt an OOB activity uselessly, in two cases:
	 *
	 * 1) a rescheduling is pending for the current CPU. We may
	 * assume that an EVL thread is about to resume, so we want to
	 * move the in-band tick out of the way until in-band activity
	 * resumes, unless there is no other outstanding timers.
	 *
	 * 2) the current EVL thread is running OOB, in which case we
	 * may defer the in-band tick until the in-band activity
	 * resumes.
	 *
	 * The in-band tick deferral is cleared whenever EVL is about
	 * to yield control to the in-band code (see
206
	 * __evl_schedule()), or a timer with an earlier timeout date
207
208
	 * is scheduled, whichever comes first.
	 */
Philippe Gerum's avatar
Philippe Gerum committed
209
	this_rq->local_flags &= ~(RQ_TDEFER|RQ_IDLE|RQ_TSTOPPED);
210
	timer = container_of(tn, struct evl_timer, node);
211
	if (timer == &this_rq->inband_timer) {
212
		if (evl_need_resched(this_rq) ||
213
			!(this_rq->curr->state & T_ROOT)) {
214
215
			tn = evl_get_tqueue_next(&tmb->q, tn);
			if (tn) {
Philippe Gerum's avatar
Philippe Gerum committed
216
				this_rq->local_flags |= RQ_TDEFER;
217
218
219
220
221
222
223
224
				timer = container_of(tn, struct evl_timer, node);
			}
		}
	}

	t = evl_tdate(timer);
	delta = ktime_to_ns(ktime_sub(t, evl_read_clock(clock)));

225
226
	if (real_dev->features & CLOCK_EVT_FEAT_KTIME) {
		real_dev->set_next_ktime(t, real_dev);
227
		trace_evl_timer_shot(timer, delta, t);
228
229
	} else {
		if (delta <= 0)
230
			delta = real_dev->min_delta_ns;
231
		else {
232
233
			delta = min(delta, (int64_t)real_dev->max_delta_ns);
			delta = max(delta, (int64_t)real_dev->min_delta_ns);
234
		}
235
236
		cycles = ((u64)delta * real_dev->mult) >> real_dev->shift;
		ret = real_dev->set_next_event(cycles, real_dev);
237
		trace_evl_timer_shot(timer, delta, cycles);
238
		if (ret) {
239
240
241
242
			real_dev->set_next_event(real_dev->min_delta_ticks,
						real_dev);
			trace_evl_timer_shot(timer, real_dev->min_delta_ns,
					real_dev->min_delta_ticks);
243
244
245
246
247
248
249
250
		}
	}
}

#ifdef CONFIG_SMP
void evl_send_timer_ipi(struct evl_clock *clock, struct evl_rq *rq)
{
	irq_pipeline_send_remote(TIMER_OOB_IPI,
251
				cpumask_of(evl_rq_cpu(rq)));
252
253
}
#endif