irqstage.h 8.99 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
/*
 * SPDX-License-Identifier: GPL-2.0
 *
 * Copyright (C) 2016, 2019 Philippe Gerum  <rpm@xenomai.org>.
 */
#ifndef _LINUX_IRQSTAGE_H
#define _LINUX_IRQSTAGE_H

#ifdef CONFIG_IRQ_PIPELINE

#include <linux/percpu.h>
#include <linux/bitops.h>
#include <linux/preempt.h>
#include <linux/sched.h>
#include <asm/irq_pipeline.h>

Philippe Gerum's avatar
Philippe Gerum committed
17
18
struct kvm_oob_notifier;

19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
struct irq_stage {
	int index;
	const char *name;
};

extern struct irq_stage inband_stage;

extern struct irq_stage oob_stage;

struct irq_event_map;

struct irq_log {
	unsigned long index_0;
	struct irq_event_map *map;
};

/* Per-CPU, per-stage data. */
struct irq_stage_data {
	struct irq_log log;
	struct irq_stage *stage;
#ifdef CONFIG_DEBUG_IRQ_PIPELINE
	int cpu;
#endif
};

/* Per-CPU pipeline descriptor. */
struct irq_pipeline_data {
	struct irq_stage_data stages[2];
	struct pt_regs tick_regs;
Philippe Gerum's avatar
Philippe Gerum committed
48
49
50
51
52
53
54
#ifdef CONFIG_DOVETAIL
	struct task_struct *task_inflight;
	struct task_struct *rqlock_owner;
#ifdef CONFIG_KVM
	struct kvm_oob_notifier *vcpu_notify;
#endif
#endif
55
56
57
58
59
60
61
62
63
64
65
66
67
};

DECLARE_PER_CPU(struct irq_pipeline_data, irq_pipeline);

/*
 * The low-level stall bit accessors. Should be used by the Dovetail
 * core implementation exclusively, inband_irq_*() and oob_irq_*()
 * accessors are available to common code.
 */

#define INBAND_STALL_BIT  0
#define OOB_STALL_BIT     1

68
static __always_inline void init_task_stall_bits(struct task_struct *p)
69
70
71
72
73
{
	__set_bit(INBAND_STALL_BIT, &p->stall_bits);
	__clear_bit(OOB_STALL_BIT, &p->stall_bits);
}

74
static __always_inline void stall_inband_nocheck(void)
75
76
77
78
79
{
	__set_bit(INBAND_STALL_BIT, &current->stall_bits);
	barrier();
}

80
static __always_inline void stall_inband(void)
81
82
83
84
85
{
	WARN_ON_ONCE(irq_pipeline_debug() && running_oob());
	stall_inband_nocheck();
}

86
static __always_inline void unstall_inband_nocheck(void)
87
88
89
90
91
{
	barrier();
	__clear_bit(INBAND_STALL_BIT, &current->stall_bits);
}

92
static __always_inline void unstall_inband(void)
93
94
95
96
97
{
	WARN_ON_ONCE(irq_pipeline_debug() && running_oob());
	unstall_inband_nocheck();
}

98
static __always_inline int test_and_stall_inband_nocheck(void)
99
100
101
102
{
	return __test_and_set_bit(INBAND_STALL_BIT, &current->stall_bits);
}

103
static __always_inline int test_and_stall_inband(void)
104
105
106
107
108
{
	WARN_ON_ONCE(irq_pipeline_debug() && running_oob());
	return test_and_stall_inband_nocheck();
}

109
static __always_inline int test_inband_stall(void)
110
111
112
113
{
	return test_bit(INBAND_STALL_BIT, &current->stall_bits);
}

114
static __always_inline void stall_oob(void)
115
116
117
118
119
{
	__set_bit(OOB_STALL_BIT, &current->stall_bits);
	barrier();
}

120
static __always_inline void unstall_oob(void)
121
122
123
124
125
{
	barrier();
	__clear_bit(OOB_STALL_BIT, &current->stall_bits);
}

126
static __always_inline int test_and_stall_oob(void)
127
128
129
130
{
	return __test_and_set_bit(OOB_STALL_BIT, &current->stall_bits);
}

131
static __always_inline int test_oob_stall(void)
132
133
134
135
136
137
138
139
140
141
{
	return test_bit(OOB_STALL_BIT, &current->stall_bits);
}

/**
 * this_staged - IRQ stage data on the current CPU
 *
 * Return the address of @stage's data on the current CPU. IRQs must
 * be hard disabled to prevent CPU migration.
 */
142
static __always_inline
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
struct irq_stage_data *this_staged(struct irq_stage *stage)
{
	return &raw_cpu_ptr(irq_pipeline.stages)[stage->index];
}

/**
 * percpu_inband_staged - IRQ stage data on specified CPU
 *
 * Return the address of @stage's data on @cpu.
 *
 * This is the slowest accessor, use it carefully. Prefer
 * this_staged() for requests referring to the current
 * CPU. Additionally, if the target stage is known at build time,
 * consider using this_{inband, oob}_staged() instead.
 */
158
static __always_inline
159
160
161
162
163
164
165
166
167
168
169
170
171
struct irq_stage_data *percpu_inband_staged(struct irq_stage *stage, int cpu)
{
	return &per_cpu(irq_pipeline.stages, cpu)[stage->index];
}

/**
 * this_inband_staged - return the address of the pipeline context
 * data for the inband stage on the current CPU. CPU migration must be
 * disabled.
 *
 * This accessor is recommended when the stage we refer to is known at
 * build time to be the inband one.
 */
172
static __always_inline struct irq_stage_data *this_inband_staged(void)
173
174
175
176
177
178
179
180
181
182
183
184
185
186
{
	return raw_cpu_ptr(&irq_pipeline.stages[0]);
}

/**
 * this_oob_staged - return the address of the pipeline context data
 * for the registered oob stage on the current CPU. CPU migration must
 * be disabled.
 *
 * This accessor is recommended when the stage we refer to is known at
 * build time to be the registered oob stage. This address is always
 * different from the context data of the inband stage, even in
 * absence of registered oob stage.
 */
187
static __always_inline struct irq_stage_data *this_oob_staged(void)
188
189
190
191
{
	return raw_cpu_ptr(&irq_pipeline.stages[1]);
}

192
static __always_inline struct irq_stage_data *__current_irq_staged(void)
193
194
195
196
197
198
199
200
201
202
{
	return &raw_cpu_ptr(irq_pipeline.stages)[stage_level()];
}

/**
 * current_irq_staged - return the address of the pipeline context
 * data for the current stage. CPU migration must be disabled.
 */
#define current_irq_staged __current_irq_staged()

203
static __always_inline
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
void check_staged_locality(struct irq_stage_data *pd)
{
#ifdef CONFIG_DEBUG_IRQ_PIPELINE
	/*
	 * Setting our context with another processor's is a really
	 * bad idea, our caller definitely went loopy.
	 */
	WARN_ON_ONCE(raw_smp_processor_id() != pd->cpu);
#endif
}

/**
 * switch_oob(), switch_inband() - switch the current CPU to the
 * specified stage context. CPU migration must be disabled.
 *
 * Calling these routines is the only sane and safe way to change the
 * interrupt stage for the current CPU. Don't bypass them, ever.
 * Really.
 */
223
static __always_inline
224
225
226
227
228
229
230
void switch_oob(struct irq_stage_data *pd)
{
	check_staged_locality(pd);
	if (!(preempt_count() & STAGE_MASK))
		preempt_count_add(STAGE_OFFSET);
}

231
static __always_inline
232
233
234
235
236
237
238
void switch_inband(struct irq_stage_data *pd)
{
	check_staged_locality(pd);
	if (preempt_count() & STAGE_MASK)
		preempt_count_sub(STAGE_OFFSET);
}

239
static __always_inline
240
241
242
243
244
245
246
247
void set_current_irq_staged(struct irq_stage_data *pd)
{
	if (pd->stage == &inband_stage)
		switch_inband(pd);
	else
		switch_oob(pd);
}

248
static __always_inline struct irq_stage *__current_irq_stage(void)
249
250
251
252
253
254
255
256
257
258
259
{
	/*
	 * We don't have to hard disable irqs while accessing the
	 * per-CPU stage data here, because there is no way we could
	 * switch stage and CPU at the same time.
	 */
	return __current_irq_staged()->stage;
}

#define current_irq_stage	__current_irq_stage()

260
static __always_inline bool oob_stage_present(void)
261
262
263
264
265
266
267
268
269
{
	return oob_stage.index != 0;
}

/**
 * stage_irqs_pending() - Whether we have interrupts pending
 * (i.e. logged) on the current CPU for the given stage. Hard IRQs
 * must be disabled.
 */
270
static __always_inline int stage_irqs_pending(struct irq_stage_data *pd)
271
272
273
274
275
276
277
278
279
280
281
{
	return pd->log.index_0 != 0;
}

void sync_current_irq_stage(void);

void sync_irq_stage(struct irq_stage *top);

void irq_post_stage(struct irq_stage *stage,
		    unsigned int irq);

282
static __always_inline void irq_post_oob(unsigned int irq)
283
284
285
286
{
	irq_post_stage(&oob_stage, irq);
}

287
static __always_inline void irq_post_inband(unsigned int irq)
288
289
290
291
{
	irq_post_stage(&inband_stage, irq);
}

292
static __always_inline void oob_irq_disable(void)
293
294
295
296
297
{
	hard_local_irq_disable();
	stall_oob();
}

298
static __always_inline unsigned long oob_irq_save(void)
299
300
301
302
303
{
	hard_local_irq_disable();
	return test_and_stall_oob();
}

304
static __always_inline int oob_irqs_disabled(void)
305
306
307
308
309
310
311
312
{
	return test_oob_stall();
}

void oob_irq_enable(void);

void __oob_irq_restore(unsigned long x);

313
static __always_inline void oob_irq_restore(unsigned long x)
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
{
	if ((x ^ test_oob_stall()) & 1)
		__oob_irq_restore(x);
}

bool stage_disabled(void);

unsigned long test_and_lock_stage(int *irqsoff);

void unlock_stage(unsigned long irqstate);

#define stage_save_flags(__irqstate)					\
  	do {								\
	  unsigned long __flags = hard_local_save_flags();		\
	  (__irqstate) = irqs_merge_flags(__flags,			\
					  irqs_disabled());		\
	} while (0)

int enable_oob_stage(const char *name);

int arch_enable_oob_stage(void);

void disable_oob_stage(void);

#else /* !CONFIG_IRQ_PIPELINE */

#include <linux/irqflags.h>

void call_is_nop_without_pipelining(void);

344
static __always_inline void stall_inband(void) { }
345

346
static __always_inline void unstall_inband(void) { }
347

348
static __always_inline int test_and_stall_inband(void)
349
350
351
352
{
	return false;
}

353
static __always_inline int test_inband_stall(void)
354
355
356
357
{
	return false;
}

358
static __always_inline bool oob_stage_present(void)
359
360
361
362
{
	return false;
}

363
static __always_inline bool stage_disabled(void)
364
365
366
367
{
	return irqs_disabled();
}

368
static __always_inline void irq_post_inband(unsigned int irq)
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
{
	call_is_nop_without_pipelining();
}

#define test_and_lock_stage(__irqsoff)				\
	({							\
		unsigned long __flags;				\
		raw_local_irq_save(__flags);			\
		*(__irqsoff) = irqs_disabled_flags(__flags);	\
		__flags;					\
	})

#define unlock_stage(__flags)		raw_local_irq_restore(__flags)

#define stage_save_flags(__flags)	raw_local_save_flags(__flags)

385
static __always_inline void stall_inband_nocheck(void)
386
387
{ }

388
static __always_inline void unstall_inband_nocheck(void)
389
390
{ }

391
static __always_inline int test_and_stall_inband_nocheck(void)
392
393
394
395
{
	return irqs_disabled();
}

396
397
398
#endif /* !CONFIG_IRQ_PIPELINE */

#endif	/* !_LINUX_IRQSTAGE_H */