Commit 35a2e95f authored by Philippe Gerum's avatar Philippe Gerum
Browse files

evl/wait: split mutex / wait queue support


Signed-off-by: default avatarPhilippe Gerum <rpm@xenomai.org>
parent 4ef68215
......@@ -14,9 +14,8 @@
#include <linux/uaccess.h>
#include <linux/semaphore.h>
#include <linux/irq_work.h>
#include <evenless/synch.h>
#include <evenless/thread.h>
#include <evenless/wait.h>
#include <evenless/flag.h>
#include <evenless/file.h>
#include <asm/evenless/fptest.h>
#include <uapi/evenless/devices/hectic.h>
......@@ -29,7 +28,7 @@ struct rtswitch_context;
struct rtswitch_task {
struct hectic_task_index base;
struct evl_wait_flag rt_synch;
struct evl_flag rt_synch;
struct semaphore nrt_synch;
struct evl_kthread kthread; /* For kernel-space real-time tasks. */
unsigned int last_switch;
......
......@@ -16,7 +16,7 @@
#include <linux/fcntl.h>
#include <linux/uaccess.h>
#include <evenless/file.h>
#include <evenless/wait.h>
#include <evenless/flag.h>
#include <evenless/clock.h>
#include <evenless/thread.h>
#include <evenless/xbuf.h>
......@@ -67,7 +67,7 @@ struct latmus_runner {
int (*run)(struct latmus_runner *runner, struct latmus_result *result);
void (*cleanup)(struct latmus_runner *runner);
struct runner_state state;
struct evl_wait_flag done;
struct evl_flag done;
int status;
int verbosity;
ktime_t period;
......@@ -94,14 +94,14 @@ struct irq_runner {
struct kthread_runner {
struct evl_kthread kthread;
struct evl_wait_flag barrier;
struct evl_flag barrier;
ktime_t start_time;
struct latmus_runner runner;
};
struct uthread_runner {
struct evl_timer timer;
struct evl_wait_flag pulse;
struct evl_flag pulse;
struct latmus_runner runner;
};
......
......@@ -41,13 +41,13 @@ struct evl_clock {
const struct timespec *ts);
void (*program_local_shot)(struct evl_clock *clock);
void (*program_remote_shot)(struct evl_clock *clock,
struct evl_rq *rq);
struct evl_rq *rq);
int (*set_gravity)(struct evl_clock *clock,
const struct evl_clock_gravity *p);
const struct evl_clock_gravity *p);
void (*reset_gravity)(struct evl_clock *clock);
void (*adjust)(struct evl_clock *clock);
int (*adjust_time)(struct evl_clock *clock,
struct timex *tx);
struct timex *tx);
} ops;
struct evl_timerbase *timerdata;
struct evl_clock *master;
......@@ -67,15 +67,15 @@ extern struct evl_clock evl_mono_clock;
extern struct evl_clock evl_realtime_clock;
int evl_init_clock(struct evl_clock *clock,
const struct cpumask *affinity);
const struct cpumask *affinity);
int evl_init_slave_clock(struct evl_clock *clock,
struct evl_clock *master);
struct evl_clock *master);
void evl_announce_tick(struct evl_clock *clock);
void evl_adjust_timers(struct evl_clock *clock,
ktime_t delta);
ktime_t delta);
void evl_stop_timers(struct evl_clock *clock);
......@@ -105,7 +105,7 @@ static inline ktime_t evl_read_clock(struct evl_clock *clock)
static inline int
evl_set_clock_time(struct evl_clock *clock,
const struct timespec *ts)
const struct timespec *ts)
{
if (clock->ops.set_time)
return clock->ops.set_time(clock, ts);
......@@ -121,14 +121,14 @@ ktime_t evl_get_clock_resolution(struct evl_clock *clock)
static inline
void evl_set_clock_resolution(struct evl_clock *clock,
ktime_t resolution)
ktime_t resolution)
{
clock->resolution = resolution;
}
static inline
int evl_set_clock_gravity(struct evl_clock *clock,
const struct evl_clock_gravity *gravity)
const struct evl_clock_gravity *gravity)
{
if (clock->ops.set_gravity)
return clock->ops.set_gravity(clock, gravity);
......@@ -158,7 +158,7 @@ int evl_clock_init(void);
void evl_clock_cleanup(void);
int evl_register_clock(struct evl_clock *clock,
const struct cpumask *affinity);
const struct cpumask *affinity);
void evl_unregister_clock(struct evl_clock *clock);
......
......@@ -38,9 +38,9 @@ struct evl_factory {
const struct file_operations *fops;
unsigned int nrdev;
struct evl_element *(*build)(struct evl_factory *fac,
const char *name,
void __user *u_attrs,
u32 *state_offp);
const char *name,
void __user *u_attrs,
u32 *state_offp);
void (*dispose)(struct evl_element *e);
const struct attribute_group **attrs;
int flags;
......@@ -80,7 +80,7 @@ evl_element_name(struct evl_element *e)
}
int evl_init_element(struct evl_element *e,
struct evl_factory *fac);
struct evl_factory *fac);
void evl_destroy_element(struct evl_element *e);
......@@ -88,7 +88,7 @@ void evl_get_element(struct evl_element *e);
struct evl_element *
__evl_get_element_by_fundle(struct evl_factory *fac,
fundle_t fundle);
fundle_t fundle);
#define evl_get_element_by_fundle(__fac, __fundle, __type) \
({ \
......@@ -111,21 +111,21 @@ __evl_get_element_by_fundle(struct evl_factory *fac,
void evl_put_element(struct evl_element *e);
int evl_open_element(struct inode *inode,
struct file *filp);
struct file *filp);
int evl_close_element(struct inode *inode,
struct file *filp);
struct file *filp);
int evl_create_element_device(struct evl_element *e,
struct evl_factory *fac,
const char *devname);
struct evl_factory *fac,
const char *devname);
void evl_remove_element_device(struct evl_element *e);
void evl_index_element(struct evl_element *e);
int evl_index_element_at(struct evl_element *e,
fundle_t fundle);
fundle_t fundle);
void evl_unindex_element(struct evl_element *e);
......
......@@ -37,7 +37,7 @@ struct evl_file_binding {
};
int evl_open_file(struct evl_file *sfilp,
struct file *filp);
struct file *filp);
void evl_release_file(struct evl_file *sfilp);
......
/*
* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2017 Philippe Gerum <rpm@xenomai.org>
*/
#ifndef _EVENLESS_FLAG_H
#define _EVENLESS_FLAG_H
#include <evenless/wait.h>
#include <evenless/sched.h>
struct evl_flag {
struct evl_wait_queue wait;
bool signaled;
};
#define DEFINE_EVL_FLAG(__name) \
struct evl_flag __name = { \
.wait = EVL_WAIT_INITIALIZER((__name).wait), \
.signaled = false, \
}
static inline void evl_init_flag(struct evl_flag *wf)
{
wf->wait = (struct evl_wait_queue)EVL_WAIT_INITIALIZER(wf->wait);
wf->signaled = false;
}
static inline void evl_destroy_flag(struct evl_flag *wf)
{
evl_destroy_wait(&wf->wait);
}
static inline
int evl_wait_flag_timeout(struct evl_flag *wf,
ktime_t timeout, enum evl_tmode timeout_mode)
{
unsigned long flags;
int ret = 0;
xnlock_get_irqsave(&nklock, flags);
while (!wf->signaled) {
ret = evl_wait_timeout(&wf->wait, timeout, timeout_mode);
if (ret & T_BREAK)
ret = -EINTR;
if (ret & T_TIMEO)
ret = -ETIMEDOUT;
if (ret & T_RMID)
ret = -EIDRM;
if (ret)
break;
}
if (ret == 0)
wf->signaled = false;
xnlock_put_irqrestore(&nklock, flags);
return ret;
}
static inline int evl_wait_flag(struct evl_flag *wf)
{
return evl_wait_flag_timeout(wf, EVL_INFINITE, EVL_REL);
}
static inline /* nklock held. */
struct evl_thread *evl_wait_flag_head(struct evl_flag *wf)
{
return evl_wait_head(&wf->wait);
}
static inline bool evl_raise_flag(struct evl_flag *wf)
{
struct evl_thread *waiter;
unsigned long flags;
xnlock_get_irqsave(&nklock, flags);
wf->signaled = true;
waiter = evl_wake_up_head(&wf->wait);
evl_schedule();
xnlock_put_irqrestore(&nklock, flags);
return waiter != NULL;
}
#endif /* _EVENLESS_FLAG_H */
......@@ -8,20 +8,20 @@
#define _EVENLESS_KSEM_H
#include <linux/ktime.h>
#include <evenless/synch.h>
#include <evenless/wait.h>
struct evl_ksem {
unsigned int value;
struct evl_syn wait_queue;
struct evl_wait_queue wait_queue;
};
void evl_init_sem(struct evl_ksem *sem,
unsigned int value);
unsigned int value);
void evl_destroy_sem(struct evl_ksem *sem);
int evl_down_timeout(struct evl_ksem *sem,
ktime_t timeout);
ktime_t timeout);
int evl_down(struct evl_ksem *sem);
......
......@@ -95,7 +95,7 @@ static inline void *evl_get_heap_base(const struct evl_heap *heap)
}
int evl_init_heap(struct evl_heap *heap, void *membase,
size_t size);
size_t size);
void evl_destroy_heap(struct evl_heap *heap);
......
......@@ -9,16 +9,19 @@
#include <evenless/factory.h>
#include <evenless/thread.h>
#include <evenless/sched.h>
int evl_signal_monitor_targeted(struct evl_thread *target,
int monfd);
void __evl_commit_monitor_ceiling(struct evl_thread *curr);
void __evl_commit_monitor_ceiling(void);
static inline void evl_commit_monitor_ceiling(struct evl_thread *curr)
static inline void evl_commit_monitor_ceiling(void)
{
struct evl_thread *curr = evl_current_thread();
if (curr->u_window->pp_pending != EVL_NO_HANDLE)
__evl_commit_monitor_ceiling(curr);
__evl_commit_monitor_ceiling();
}
#endif /* !_EVENLESS_MONITOR_H */
......@@ -7,13 +7,130 @@
#ifndef _EVENLESS_MUTEX_H
#define _EVENLESS_MUTEX_H
#include <linux/types.h>
#include <linux/ktime.h>
#include <linux/atomic.h>
#include <evenless/synch.h>
#include <evenless/list.h>
#include <evenless/assert.h>
#include <evenless/timer.h>
#include <evenless/thread.h>
#include <uapi/evenless/mutex.h>
struct evl_clock;
struct evl_thread;
#define EVL_MUTEX_PI BIT(0)
#define EVL_MUTEX_PP BIT(1)
#define EVL_MUTEX_CLAIMED BIT(2)
#define EVL_MUTEX_CEILING BIT(3)
struct evl_mutex {
struct evl_syn wait_queue;
int wprio;
int flags;
struct evl_thread *owner;
struct evl_clock *clock;
atomic_t *fastlock;
u32 *ceiling_ref;
struct evl_wait_channel wchan;
struct list_head wait_list;
struct list_head next; /* thread->boosters */
};
#define evl_for_each_mutex_waiter(__pos, __mutex) \
list_for_each_entry(__pos, &(__mutex)->wait_list, wait_next)
void evl_init_mutex_pi(struct evl_mutex *mutex,
struct evl_clock *clock,
atomic_t *fastlock);
void evl_init_mutex_pp(struct evl_mutex *mutex,
struct evl_clock *clock,
atomic_t *fastlock,
u32 *ceiling_ref);
bool evl_destroy_mutex(struct evl_mutex *mutex);
int evl_trylock_mutex(struct evl_mutex *mutex);
int evl_lock_mutex_timeout(struct evl_mutex *mutex, ktime_t timeout,
enum evl_tmode timeout_mode);
static inline int evl_lock_mutex(struct evl_mutex *mutex)
{
return evl_lock_mutex_timeout(mutex, EVL_INFINITE, EVL_REL);
}
void __evl_unlock_mutex(struct evl_mutex *mutex);
void evl_unlock_mutex(struct evl_mutex *mutex);
void evl_commit_mutex_ceiling(struct evl_mutex *mutex);
#ifdef CONFIG_EVENLESS_DEBUG_MUTEX_INBAND
void evl_detect_boost_drop(struct evl_thread *owner);
#else
static inline
void evl_detect_boost_drop(struct evl_thread *owner) { }
#endif
void evl_abort_mutex_wait(struct evl_thread *thread);
void evl_reorder_mutex_wait(struct evl_thread *thread);
struct evl_kmutex {
struct evl_mutex mutex;
atomic_t fastlock;
};
#define EVL_KMUTEX_INITIALIZER(__name) { \
.mutex = { \
.fastlock = &(__name).fastlock, \
.flags = EVL_MUTEX_PI, \
.owner = NULL, \
.wprio = -1, \
.ceiling_ref = NULL, \
.clock = &evl_mono_clock, \
.wait_list = LIST_HEAD_INIT((__name).mutex.wait_list), \
.wchan = { \
.abort_wait = evl_abort_mutex_wait, \
.reorder_wait = evl_reorder_mutex_wait, \
.lock = __HARD_SPIN_LOCK_INITIALIZER((__name).wchan.lock), \
}, \
}, \
.fastlock = ATOMIC_INIT(0), \
}
#define DEFINE_EVL_MUTEX(__name) \
struct evl_kmutex __name = EVL_KMUTEX_INITIALIZER(__name)
static inline
void evl_init_kmutex(struct evl_kmutex *kmutex)
{
*kmutex = (struct evl_kmutex)EVL_KMUTEX_INITIALIZER(*kmutex);
}
static inline
void evl_destroy_kmutex(struct evl_kmutex *kmutex)
{
evl_destroy_mutex(&kmutex->mutex);
}
static inline
int evl_trylock_kmutex(struct evl_kmutex *kmutex)
{
return evl_trylock_mutex(&kmutex->mutex);
}
static inline
int evl_lock_kmutex(struct evl_kmutex *kmutex)
{
return evl_lock_mutex(&kmutex->mutex);
}
static inline
void evl_unlock_kmutex(struct evl_kmutex *kmutex)
{
return evl_unlock_mutex(&kmutex->mutex);
}
#endif /* !_EVENLESS_MUTEX_H */
......@@ -12,13 +12,13 @@
#include <linux/rbtree.h>
#include <linux/spinlock.h>
#include <linux/poll.h>
#include <evenless/synch.h>
#include <evenless/wait.h>
#include <evenless/factory.h>
#include <uapi/evenless/poller.h>
#define EVL_POLLHEAD_INITIALIZER(__name) { \
.watchpoints = LIST_HEAD_INIT((__name).watchpoints), \
lock = __HARD_SPIN_LOCK_INITIALIZER(__name), \
lock = __HARD_SPIN_LOCK_INITIALIZER((__name).lock), \
}
struct evl_poll_head {
......
......@@ -12,6 +12,7 @@
#include <linux/list.h>
#include <evenless/lock.h>
#include <evenless/thread.h>
#include <evenless/wait.h>
#include <evenless/sched/queue.h>
#include <evenless/sched/weak.h>
#include <evenless/sched/quota.h>
......@@ -102,7 +103,7 @@ struct evl_rq {
/* Currently active account */
struct evl_account *current_account;
#endif
struct evl_syn yield_sync;
struct evl_wait_queue yield_sync;
};
DECLARE_PER_CPU(struct evl_rq, evl_runqueues);
......
......@@ -32,13 +32,13 @@ struct evl_thread;
void evl_init_schedq(struct evl_multilevel_queue *q);
void evl_add_schedq(struct evl_multilevel_queue *q,
struct evl_thread *thread);
struct evl_thread *thread);
void evl_add_schedq_tail(struct evl_multilevel_queue *q,
struct evl_thread *thread);
struct evl_thread *thread);
void evl_del_schedq(struct evl_multilevel_queue *q,
struct evl_thread *thread);
struct evl_thread *thread);
struct evl_thread *evl_get_schedq(struct evl_multilevel_queue *q);
......
......@@ -16,7 +16,7 @@
#define EVL_QUOTA_MIN_PRIO 1
#define EVL_QUOTA_MAX_PRIO 255
#define EVL_QUOTA_NR_PRIO \
#define EVL_QUOTA_NR_PRIO \
(EVL_QUOTA_MAX_PRIO - EVL_QUOTA_MIN_PRIO + 1)
extern struct evl_sched_class evl_sched_quota;
......@@ -54,16 +54,16 @@ static inline int evl_quota_init_thread(struct evl_thread *thread)
}
int evl_quota_create_group(struct evl_quota_group *tg,
struct evl_rq *rq,
int *quota_sum_r);
struct evl_rq *rq,
int *quota_sum_r);
int evl_quota_destroy_group(struct evl_quota_group *tg,
int force,
int *quota_sum_r);
int force,
int *quota_sum_r);
void evl_quota_set_limit(struct evl_quota_group *tg,
int quota_percent, int quota_peak_percent,
int *quota_sum_r);
int quota_percent, int quota_peak_percent,
int *quota_sum_r);
struct evl_quota_group *
evl_quota_find_group(struct evl_rq *rq, int tgid);
......
......@@ -48,10 +48,10 @@ static inline void __evl_dequeue_rt_thread(struct evl_thread *thread)
static inline
int __evl_chk_rt_schedparam(struct evl_thread *thread,
const union evl_sched_param *p)
const union evl_sched_param *p)
{
if (p->rt.prio < EVL_CORE_MIN_PRIO ||
p->rt.prio > EVL_CORE_MAX_PRIO)
p->rt.prio > EVL_CORE_MAX_PRIO)
return -EINVAL;
return 0;
......@@ -59,7 +59,7 @@ int __evl_chk_rt_schedparam(struct evl_thread *thread,
static inline
bool __evl_set_rt_schedparam(struct evl_thread *thread,
const union evl_sched_param *p)
const union evl_sched_param *p)
{
bool ret = evl_set_effective_thread_priority(thread, p->rt.prio);
......@@ -71,14 +71,14 @@ bool __evl_set_rt_schedparam(struct evl_thread *thread,
static inline
void __evl_get_rt_schedparam(struct evl_thread *thread,
union evl_sched_param *p)
union evl_sched_param *p)
{
p->rt.prio = thread->cprio;
}
static inline
void __evl_track_rt_priority(struct evl_thread *thread,
const union evl_sched_param *p)
const union evl_sched_param *p)
{
if (p)
thread->cprio = p->rt.prio; /* Force update. */
......
......@@ -17,7 +17,7 @@
#define EVL_WEAK_NR_PRIO (EVL_WEAK_MAX_PRIO - EVL_WEAK_MIN_PRIO + 1)
#if EVL_WEAK_NR_PRIO > EVL_CLASS_WEIGHT_FACTOR || \
EVL_WEAK_NR_PRIO > EVL_MLQ_LEVELS
EVL_WEAK_NR_PRIO > EVL_MLQ_LEVELS
#error "WEAK class has too many priority levels"
#endif
......
......@@ -70,7 +70,7 @@ static inline void evl_reset_account(struct evl_account *account)
struct evl_account *__prev; \
__prev = (struct evl_account *) \
atomic_long_xchg(&(__rq)->current_account, \
(long)(__new_account)); \
(long)(__new_account)); \
__prev; \
})
......@@ -85,9 +85,9 @@ static inline void evl_reset_account(struct evl_account *account)
(__rq)->current_account = (__new_account); \
} while (0)
struct evl_counter {
unsigned long counter;
};
struct evl_counter {
unsigned long counter;
};