tick.c 8.01 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
/*
 * SPDX-License-Identifier: GPL-2.0
 *
 * Copyright (C) 2016, 2018 Philippe Gerum  <rpm@xenomai.org>
 */

#include <linux/percpu.h>
#include <linux/cpumask.h>
#include <linux/clockchips.h>
#include <linux/tick.h>
#include <linux/irqdomain.h>
#include <linux/ktime.h>
#include <linux/kernel.h>
#include <linux/timekeeping.h>
#include <linux/irq_pipeline.h>
#include <linux/slab.h>
17
18
19
20
21
22
#include <evl/sched.h>
#include <evl/timer.h>
#include <evl/clock.h>
#include <evl/tick.h>
#include <evl/control.h>
#include <trace/events/evl.h>
23

24
static DEFINE_PER_CPU(struct clock_proxy_device *, proxy_tick_device);
25
26

static int proxy_set_next_ktime(ktime_t expires,
27
				struct clock_event_device *proxy_dev)
28
29
30
31
32
33
34
35
36
37
38
39
40
{
	struct evl_rq *rq;
	unsigned long flags;
	ktime_t delta;

	/*
	 * Negative delta have been observed. evl_start_timer()
	 * will trigger an immediate shot in such an event.
	 */
	delta = ktime_sub(expires, ktime_get());

	flags = hard_local_irq_save(); /* Prevent CPU migration. */
	rq = this_evl_rq();
41
42
	evl_start_timer(&rq->inband_timer,
			evl_abs_timeout(&rq->inband_timer, delta),
43
44
45
46
47
48
			EVL_INFINITE);
	hard_local_irq_restore(flags);

	return 0;
}

49
static int proxy_set_oneshot_stopped(struct clock_event_device *proxy_dev)
50
{
51
52
	struct clock_event_device *real_dev;
	struct clock_proxy_device *dev;
53
54
55
	unsigned long flags;
	struct evl_rq *rq;

56
57
	dev = container_of(proxy_dev, struct clock_proxy_device, proxy_device);

58
59
60
61
62
63
64
65
66
67
68
69
	/*
	 * In-band wants to disable the clock hardware on entering a
	 * tickless state, so we have to stop our in-band tick
	 * emulation. Propagate the request for shutting down the
	 * hardware to the real device only if we have no outstanding
	 * OOB timers. CAUTION: the in-band timer is counted when
	 * assessing the RQ_IDLE condition, so we need to stop it
	 * prior to testing the latter.
	 */
	flags = hard_local_irq_save();

	rq = this_evl_rq();
70
	evl_stop_timer(&rq->inband_timer);
71
72
	rq->lflags |= RQ_TSTOPPED;

73
74
75
76
	if (rq->lflags & RQ_IDLE) {
		real_dev = dev->real_device;
		real_dev->set_state_oneshot_stopped(real_dev);
	}
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140

	hard_local_irq_restore(flags);

	return 0;
}

/*
 * This is our high-precision clock tick handler. We only have two
 * possible callers, each of them may only run over a CPU which is a
 * member of the real-time set:
 *
 * - our TIMER_OOB_IPI handler, such IPI is directed to members of our
 * real-time CPU set exclusively.
 *
 * - our clock_event_handler() routine. The IRQ pipeline
 * guarantees that such handler always runs over a CPU which is a
 * member of the CPU set passed to enable_clock_devices() (i.e. our
 * real-time CPU set).
 *
 * hard IRQs are off.
 */
static void clock_event_handler(struct clock_event_device *dummy)
{
	struct evl_rq *this_rq = this_evl_rq();

	if (EVL_WARN_ON_ONCE(CORE, !is_evl_cpu(evl_rq_cpu(this_rq))))
		return;

	evl_announce_tick(&evl_mono_clock);

	/*
	 * If a real-time thread was preempted by this clock
	 * interrupt, any transition to the root thread will cause a
	 * in-band tick to be propagated by evl_schedule() from
	 * irq_finish_head(), so we only need to propagate the in-band
	 * tick in case the root thread was preempted.
	 */
	if ((this_rq->lflags & RQ_TPROXY) && (this_rq->curr->state & T_ROOT))
		evl_notify_proxy_tick(this_rq);
}

void evl_notify_proxy_tick(struct evl_rq *this_rq) /* hard IRQs off. */
{
	/*
	 * A proxy clock event device is active on this CPU, make it
	 * tick asap when the in-band code resumes; this will honour a
	 * previous set_next_ktime() request received from the kernel
	 * we have carried out using our core timing services.
	 */
	this_rq->lflags &= ~RQ_TPROXY;
	tick_notify_proxy();
}

#ifdef CONFIG_SMP

static irqreturn_t clock_ipi_handler(int irq, void *dev_id)
{
	clock_event_handler(NULL);

	return IRQ_HANDLED;
}

#endif

141
142
143
144
145
146
147
148
149
150
151
152
static void setup_proxy(struct clock_proxy_device *dev)
{
	struct clock_event_device *proxy_dev = &dev->proxy_device;

	dev->handle_oob_event = clock_event_handler;
	proxy_dev->features |= CLOCK_EVT_FEAT_KTIME;
	proxy_dev->set_next_ktime = proxy_set_next_ktime;
	if (proxy_dev->set_state_oneshot_stopped)
		proxy_dev->set_state_oneshot_stopped = proxy_set_oneshot_stopped;

	__this_cpu_write(proxy_tick_device, dev);
}
153
154
155
156
157
158
159

int evl_enable_tick(void)
{
	int ret;

#ifdef CONFIG_SMP
	ret = __request_percpu_irq(TIMER_OOB_IPI,
160
				clock_ipi_handler,
161
				IRQF_OOB, "EVL timer IPI",
162
				&evl_machine_cpudata);
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
	if (ret)
		return ret;
#endif

	/*
	 * CAUTION:
	 *
	 * - EVL timers may be started only _after_ the proxy clock
	 * device has been set up for the target CPU.
	 *
	 * - do not hold any lock across calls to evl_enable_tick().
	 *
	 * - tick_install_proxy() guarantees that the real clock
	 * device supports oneshot mode, or fails.
	 */
178
	ret = tick_install_proxy(setup_proxy, &evl_oob_cpus);
179
180
181
182
183
184
185
186
187
188
189
190
191
	if (ret) {
#ifdef CONFIG_SMP
		free_percpu_irq(TIMER_OOB_IPI,
				&evl_machine_cpudata);
#endif
		return ret;
	}

	return 0;
}

void evl_disable_tick(void)
{
192
	tick_uninstall_proxy(&evl_oob_cpus);
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
#ifdef CONFIG_SMP
	free_percpu_irq(TIMER_OOB_IPI, &evl_machine_cpudata);
#endif
	/*
	 * When the kernel is swapping clock event devices on behalf
	 * of enable_clock_devices(), it may end up calling
	 * program_timer() via the synthetic device's
	 * ->set_next_event() handler for resuming the in-band timer.
	 * Therefore, no timer should remain queued before
	 * enable_clock_devices() is called, or unpleasant hangs may
	 * happen if the in-band timer is not at front of the
	 * queue. You have been warned.
	 */
	evl_stop_timers(&evl_mono_clock);
}

/* per-cpu timer queue locked. */
void evl_program_proxy_tick(struct evl_clock *clock)
{
212
213
	struct clock_proxy_device *dev = __this_cpu_read(proxy_tick_device);
	struct clock_event_device *real_dev = dev->real_device;
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
	struct evl_rq *this_rq = this_evl_rq();
	struct evl_timerbase *tmb;
	struct evl_timer *timer;
	struct evl_tnode *tn;
	int64_t delta;
	u64 cycles;
	ktime_t t;
	int ret;

	/*
	 * Do not reprogram locally when inside the tick handler -
	 * will be done on exit anyway. Also exit if there is no
	 * pending timer.
	 */
	if (this_rq->lflags & RQ_TIMER)
		return;

	tmb = evl_this_cpu_timers(clock);
	tn = evl_get_tqueue_head(&tmb->q);
	if (tn == NULL) {
		this_rq->lflags |= RQ_IDLE;
		return;
	}

	/*
	 * Try to defer the next in-band tick, so that it does not
	 * preempt an OOB activity uselessly, in two cases:
	 *
	 * 1) a rescheduling is pending for the current CPU. We may
	 * assume that an EVL thread is about to resume, so we want to
	 * move the in-band tick out of the way until in-band activity
	 * resumes, unless there is no other outstanding timers.
	 *
	 * 2) the current EVL thread is running OOB, in which case we
	 * may defer the in-band tick until the in-band activity
	 * resumes.
	 *
	 * The in-band tick deferral is cleared whenever EVL is about
	 * to yield control to the in-band code (see
253
	 * __evl_schedule()), or a timer with an earlier timeout date
254
255
256
257
	 * is scheduled, whichever comes first.
	 */
	this_rq->lflags &= ~(RQ_TDEFER|RQ_IDLE|RQ_TSTOPPED);
	timer = container_of(tn, struct evl_timer, node);
258
	if (timer == &this_rq->inband_timer) {
259
		if (evl_need_resched(this_rq) ||
260
			!(this_rq->curr->state & T_ROOT)) {
261
262
263
264
265
266
267
268
269
270
271
			tn = evl_get_tqueue_next(&tmb->q, tn);
			if (tn) {
				this_rq->lflags |= RQ_TDEFER;
				timer = container_of(tn, struct evl_timer, node);
			}
		}
	}

	t = evl_tdate(timer);
	delta = ktime_to_ns(ktime_sub(t, evl_read_clock(clock)));

272
273
	if (real_dev->features & CLOCK_EVT_FEAT_KTIME) {
		real_dev->set_next_ktime(t, real_dev);
274
		trace_evl_timer_shot(delta, t);
275
276
	} else {
		if (delta <= 0)
277
			delta = real_dev->min_delta_ns;
278
		else {
279
280
			delta = min(delta, (int64_t)real_dev->max_delta_ns);
			delta = max(delta, (int64_t)real_dev->min_delta_ns);
281
		}
282
283
		cycles = ((u64)delta * real_dev->mult) >> real_dev->shift;
		ret = real_dev->set_next_event(cycles, real_dev);
284
		trace_evl_timer_shot(delta, cycles);
285
		if (ret) {
286
			real_dev->set_next_event(real_dev->min_delta_ticks, real_dev);
287
			trace_evl_timer_shot(real_dev->min_delta_ns, real_dev->min_delta_ticks);
288
289
290
291
292
293
294
295
		}
	}
}

#ifdef CONFIG_SMP
void evl_send_timer_ipi(struct evl_clock *clock, struct evl_rq *rq)
{
	irq_pipeline_send_remote(TIMER_OOB_IPI,
296
				cpumask_of(evl_rq_cpu(rq)));
297
298
}
#endif