Commit 2e89b304 authored by Philippe Gerum's avatar Philippe Gerum
Browse files

evl/wait: convert wait queue lock to hard lock



Out-of-band IRQs and EVL thread contexts would usually compete for
such lock, which would require hard irqs to be disabled while holding
it. Therefore we would not generally benefit from the preemption
disabling feature we are going to add to the EVL-specific
spinlock. Make it a hard lock to clarify the intent.
Signed-off-by: default avatarPhilippe Gerum <rpm@xenomai.org>
parent d6c21cb8
......@@ -1714,11 +1714,11 @@ static irqreturn_t lineevent_oob_irq_handler(int irq, void *p)
if (lineevent_read_pin(le, &ge, false) == IRQ_NONE)
return IRQ_NONE;
evl_spin_lock(&le->oob_state.wait.lock);
raw_spin_lock(&le->oob_state.wait.lock);
kfifo_put(&le->events, ge);
evl_wake_up_head(&le->oob_state.wait);
evl_signal_poll_events(&le->oob_state.poll_head, POLLIN|POLLRDNORM);
evl_spin_unlock(&le->oob_state.wait.lock);
raw_spin_unlock(&le->oob_state.wait.lock);
return IRQ_HANDLED;
}
......@@ -1732,12 +1732,12 @@ static __poll_t lineevent_oob_poll(struct file *file,
evl_poll_watch(&le->oob_state.poll_head, wait, NULL);
evl_spin_lock_irqsave(&le->oob_state.wait.lock, flags);
raw_spin_lock_irqsave(&le->oob_state.wait.lock, flags);
if (!kfifo_is_empty(&le->events))
ready |= POLLIN|POLLRDNORM;
evl_spin_unlock_irqrestore(&le->oob_state.wait.lock, flags);
raw_spin_unlock_irqrestore(&le->oob_state.wait.lock, flags);
return ready;
}
......@@ -1758,7 +1758,7 @@ static ssize_t lineevent_oob_read(struct file *file,
return -EPERM;
do {
evl_spin_lock_irqsave(&le->oob_state.wait.lock, flags);
raw_spin_lock_irqsave(&le->oob_state.wait.lock, flags);
ret = kfifo_get(&le->events, &ge);
/*
......@@ -1768,7 +1768,7 @@ static ssize_t lineevent_oob_read(struct file *file,
if (!ret)
ret = 0;
evl_spin_unlock_irqrestore(&le->oob_state.wait.lock, flags);
raw_spin_unlock_irqrestore(&le->oob_state.wait.lock, flags);
if (ret) {
ret = raw_copy_to_user(buf, &ge, sizeof(ge));
......
......@@ -10,7 +10,6 @@
#ifdef CONFIG_EVL
#include <evl/file.h>
#include <evl/lock.h>
#include <evl/mutex.h>
#include <evl/wait.h>
#include <evl/poll.h>
......
......@@ -51,10 +51,10 @@ static inline bool evl_read_flag(struct evl_flag *wf)
}
#define evl_lock_flag(__wf, __flags) \
evl_spin_lock_irqsave(&(__wf)->wait.lock, __flags)
raw_spin_lock_irqsave(&(__wf)->wait.lock, __flags)
#define evl_unlock_flag(__wf, __flags) \
evl_spin_unlock_irqrestore(&(__wf)->wait.lock, __flags)
raw_spin_unlock_irqrestore(&(__wf)->wait.lock, __flags)
static inline
int evl_wait_flag_timeout(struct evl_flag *wf,
......
......@@ -11,7 +11,6 @@
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <evl/lock.h>
#include <evl/list.h>
#include <evl/timer.h>
#include <evl/clock.h>
......@@ -32,13 +31,13 @@ struct evl_wait_queue {
int flags;
struct evl_clock *clock;
struct evl_wait_channel wchan;
evl_spinlock_t lock;
hard_spinlock_t lock;
};
#define EVL_WAIT_INITIALIZER(__name) { \
.flags = EVL_WAIT_PRIO, \
.clock = &evl_mono_clock, \
.lock = __EVL_SPIN_LOCK_INITIALIZER((__name).lock), \
.lock = __HARD_SPIN_LOCK_INITIALIZER((__name).lock), \
.wchan = { \
.reorder_wait = evl_reorder_wait, \
.follow_depend = evl_follow_wait_depend, \
......@@ -62,7 +61,7 @@ struct evl_wait_queue {
int __ret = 0, __bcast; \
unsigned long __flags; \
\
evl_spin_lock_irqsave(&(__wq)->lock, __flags); \
raw_spin_lock_irqsave(&(__wq)->lock, __flags); \
if (!(__cond)) { \
if (timeout_nonblock(__timeout)) \
__ret = -EAGAIN; \
......@@ -70,14 +69,14 @@ struct evl_wait_queue {
do { \
evl_add_wait_queue(__wq, __timeout, \
__timeout_mode); \
evl_spin_unlock_irqrestore(&(__wq)->lock, __flags); \
raw_spin_unlock_irqrestore(&(__wq)->lock, __flags); \
__ret = evl_wait_schedule(__wq); \
__bcast = evl_current()->info & T_BCAST; \
evl_spin_lock_irqsave(&(__wq)->lock, __flags); \
raw_spin_lock_irqsave(&(__wq)->lock, __flags); \
} while (!__ret && !__bcast && !(__cond)); \
} \
} \
evl_spin_unlock_irqrestore(&(__wq)->lock, __flags); \
raw_spin_unlock_irqrestore(&(__wq)->lock, __flags); \
__ret; \
})
......@@ -92,7 +91,7 @@ int evl_wait_schedule(struct evl_wait_queue *wq);
static inline bool evl_wait_active(struct evl_wait_queue *wq)
{
assert_evl_lock(&wq->lock);
assert_hard_lock(&wq->lock);
return !list_empty(&wq->wchan.wait_list);
}
......
......@@ -80,7 +80,7 @@ int evl_signal_monitor_targeted(struct evl_thread *target, int monfd)
* loosing events. Too bad.
*/
if (target->wchan == &event->wait_queue.wchan) {
evl_spin_lock_irqsave(&event->wait_queue.lock, flags);
raw_spin_lock_irqsave(&event->wait_queue.lock, flags);
event->state->flags |= (EVL_MONITOR_TARGETED|
EVL_MONITOR_SIGNALED);
raw_spin_lock(&target->lock);
......@@ -88,7 +88,7 @@ int evl_signal_monitor_targeted(struct evl_thread *target, int monfd)
target->info |= T_SIGNAL;
raw_spin_unlock(&target->rq->lock);
raw_spin_unlock(&target->lock);
evl_spin_unlock_irqrestore(&event->wait_queue.lock, flags);
raw_spin_unlock_irqrestore(&event->wait_queue.lock, flags);
}
out:
evl_put_file(efilp);
......@@ -139,9 +139,9 @@ static void untrack_event(struct evl_monitor *event)
unsigned long flags;
evl_spin_lock_irqsave(&gate->lock, flags);
evl_spin_lock(&event->wait_queue.lock);
raw_spin_lock(&event->wait_queue.lock);
__untrack_event(event);
evl_spin_unlock(&event->wait_queue.lock);
raw_spin_unlock(&event->wait_queue.lock);
evl_spin_unlock_irqrestore(&gate->lock, flags);
}
......@@ -270,10 +270,10 @@ static int exit_monitor(struct evl_monitor *gate)
*/
state->flags &= ~EVL_MONITOR_SIGNALED;
list_for_each_entry_safe(event, n, &gate->events, next) {
evl_spin_lock(&event->wait_queue.lock);
raw_spin_lock(&event->wait_queue.lock);
if (event->state->flags & EVL_MONITOR_SIGNALED)
wakeup_waiters(event);
evl_spin_unlock(&event->wait_queue.lock);
raw_spin_unlock(&event->wait_queue.lock);
}
}
......@@ -337,17 +337,17 @@ static int wait_monitor_ungated(struct file *filp,
}
} while (!atomic_try_cmpxchg(at, &val, val - 1));
} else {
evl_spin_lock_irqsave(&event->wait_queue.lock, flags);
raw_spin_lock_irqsave(&event->wait_queue.lock, flags);
if (atomic_dec_return(at) < 0) {
evl_add_wait_queue(&event->wait_queue,
timeout, tmode);
evl_spin_unlock_irqrestore(&event->wait_queue.lock,
raw_spin_unlock_irqrestore(&event->wait_queue.lock,
flags);
ret = evl_wait_schedule(&event->wait_queue);
if (ret) /* Rollback decrement if failed. */
atomic_inc(at);
} else
evl_spin_unlock_irqrestore(&event->wait_queue.lock,
raw_spin_unlock_irqrestore(&event->wait_queue.lock,
flags);
}
break;
......@@ -407,21 +407,21 @@ static int signal_monitor_ungated(struct evl_monitor *event, s32 sigval)
case EVL_EVENT_COUNT:
if (!sigval)
break;
evl_spin_lock_irqsave(&event->wait_queue.lock, flags);
raw_spin_lock_irqsave(&event->wait_queue.lock, flags);
if (atomic_inc_return(&state->u.event.value) <= 0) {
evl_wake_up_head(&event->wait_queue);
pollable = false;
}
evl_spin_unlock_irqrestore(&event->wait_queue.lock, flags);
raw_spin_unlock_irqrestore(&event->wait_queue.lock, flags);
break;
case EVL_EVENT_MASK:
evl_spin_lock_irqsave(&event->wait_queue.lock, flags);
raw_spin_lock_irqsave(&event->wait_queue.lock, flags);
val = set_event_mask(state, (int)sigval);
if (val)
evl_flush_wait_locked(&event->wait_queue, 0);
else
pollable = false;
evl_spin_unlock_irqrestore(&event->wait_queue.lock, flags);
raw_spin_unlock_irqrestore(&event->wait_queue.lock, flags);
break;
default:
return -EINVAL;
......@@ -484,7 +484,7 @@ static int wait_monitor(struct file *filp,
}
evl_spin_lock_irqsave(&gate->lock, flags);
evl_spin_lock(&event->wait_queue.lock);
raw_spin_lock(&event->wait_queue.lock);
/*
* Track event monitors the gate protects. When multiple
......@@ -498,7 +498,7 @@ static int wait_monitor(struct file *filp,
event->gate = gate;
event->state->u.event.gate_offset = evl_shared_offset(gate->state);
} else if (event->gate != gate) {
evl_spin_unlock(&event->wait_queue.lock);
raw_spin_unlock(&event->wait_queue.lock);
evl_spin_unlock_irqrestore(&gate->lock, flags);
op_ret = -EBADFD;
goto put;
......@@ -511,7 +511,7 @@ static int wait_monitor(struct file *filp,
curr->info &= ~T_SIGNAL;
raw_spin_unlock(&curr->rq->lock);
raw_spin_unlock(&curr->lock);
evl_spin_unlock(&event->wait_queue.lock);
raw_spin_unlock(&event->wait_queue.lock);
__exit_monitor(gate, curr);
evl_spin_unlock_irqrestore(&gate->lock, flags);
......
......@@ -189,9 +189,9 @@ static int add_subscription(struct evl_observable *observable,
*/
evl_spin_lock_irqsave(&observable->lock, flags);
list_add_tail(&observer->next, &observable->observers);
evl_spin_lock(&observable->oob_wait.lock);
raw_spin_lock(&observable->oob_wait.lock);
observable->writable_observers++;
evl_spin_unlock(&observable->oob_wait.lock);
raw_spin_unlock(&observable->oob_wait.lock);
evl_spin_unlock_irqrestore(&observable->lock, flags);
evl_signal_poll_events(&observable->poll_head, POLLOUT|POLLWRNORM);
......@@ -220,12 +220,12 @@ static void detach_observer_locked(struct evl_observer *observer,
{
list_del(&observer->next);
evl_spin_lock(&observable->oob_wait.lock);
raw_spin_lock(&observable->oob_wait.lock);
if (!list_empty(&observer->free_list))
decrease_writability(observable);
evl_spin_unlock(&observable->oob_wait.lock);
raw_spin_unlock(&observable->oob_wait.lock);
}
static inline void get_observer(struct evl_observer *observer)
......@@ -351,12 +351,12 @@ static void wake_oob_threads(struct evl_observable *observable)
{
unsigned long flags;
evl_spin_lock_irqsave(&observable->oob_wait.lock, flags);
raw_spin_lock_irqsave(&observable->oob_wait.lock, flags);
if (evl_wait_active(&observable->oob_wait))
evl_flush_wait_locked(&observable->oob_wait, 0);
evl_spin_unlock_irqrestore(&observable->oob_wait.lock, flags);
raw_spin_unlock_irqrestore(&observable->oob_wait.lock, flags);
evl_schedule();
}
......@@ -385,7 +385,7 @@ void evl_flush_observable(struct evl_observable *observable)
* some events might still being pushed to the observable via
* the thread file descriptor, so locking is required.
*/
evl_spin_lock_irqsave(&observable->oob_wait.lock, flags);
raw_spin_lock_irqsave(&observable->oob_wait.lock, flags);
list_for_each_entry_safe(observer, tmp, &observable->observers, next) {
list_del_init(&observer->next);
......@@ -393,7 +393,7 @@ void evl_flush_observable(struct evl_observable *observable)
decrease_writability(observable);
}
evl_spin_unlock_irqrestore(&observable->oob_wait.lock, flags);
raw_spin_unlock_irqrestore(&observable->oob_wait.lock, flags);
wake_oob_threads(observable);
}
......@@ -459,7 +459,7 @@ static bool notify_one_observer(struct evl_observable *observable,
struct evl_notice last_notice;
unsigned long flags;
evl_spin_lock_irqsave(&observable->oob_wait.lock, flags);
raw_spin_lock_irqsave(&observable->oob_wait.lock, flags);
if (observer->flags & EVL_NOTIFY_ONCHANGE) {
last_notice = observer->last_notice;
......@@ -473,7 +473,7 @@ static bool notify_one_observer(struct evl_observable *observable,
}
if (list_empty(&observer->free_list)) {
evl_spin_unlock_irqrestore(&observable->oob_wait.lock, flags);
raw_spin_unlock_irqrestore(&observable->oob_wait.lock, flags);
return false;
}
......@@ -486,7 +486,7 @@ static bool notify_one_observer(struct evl_observable *observable,
if (list_empty(&observer->free_list))
decrease_writability(observable);
done:
evl_spin_unlock_irqrestore(&observable->oob_wait.lock, flags);
raw_spin_unlock_irqrestore(&observable->oob_wait.lock, flags);
return true;
}
......@@ -733,7 +733,7 @@ pull_from_oob(struct evl_observable *observable,
unsigned long flags;
int ret;
evl_spin_lock_irqsave(&observable->oob_wait.lock, flags);
raw_spin_lock_irqsave(&observable->oob_wait.lock, flags);
/*
* observable->wait.lock guards the pending and free
......@@ -752,17 +752,17 @@ pull_from_oob(struct evl_observable *observable,
goto out;
}
evl_add_wait_queue(&observable->oob_wait, EVL_INFINITE, EVL_REL);
evl_spin_unlock_irqrestore(&observable->oob_wait.lock, flags);
raw_spin_unlock_irqrestore(&observable->oob_wait.lock, flags);
ret = evl_wait_schedule(&observable->oob_wait);
if (ret)
return ERR_PTR(ret);
evl_spin_lock_irqsave(&observable->oob_wait.lock, flags);
raw_spin_lock_irqsave(&observable->oob_wait.lock, flags);
}
nfr = list_get_entry(&observer->pending_list,
struct evl_notification_record, next);
out:
evl_spin_unlock_irqrestore(&observable->oob_wait.lock, flags);
raw_spin_unlock_irqrestore(&observable->oob_wait.lock, flags);
return nfr;
}
......@@ -784,7 +784,7 @@ pull_from_inband(struct evl_observable *observable,
* an explanation.
*/
spin_lock_irqsave(&observable->inband_wait.lock, ib_flags);
evl_spin_lock_irqsave(&observable->oob_wait.lock, oob_flags);
raw_spin_lock_irqsave(&observable->oob_wait.lock, oob_flags);
for (;;) {
/*
......@@ -805,11 +805,11 @@ pull_from_inband(struct evl_observable *observable,
break;
}
set_current_state(TASK_INTERRUPTIBLE);
evl_spin_unlock_irqrestore(&observable->oob_wait.lock, oob_flags);
raw_spin_unlock_irqrestore(&observable->oob_wait.lock, oob_flags);
spin_unlock_irqrestore(&observable->inband_wait.lock, ib_flags);
schedule();
spin_lock_irqsave(&observable->inband_wait.lock, ib_flags);
evl_spin_lock_irqsave(&observable->oob_wait.lock, oob_flags);
raw_spin_lock_irqsave(&observable->oob_wait.lock, oob_flags);
if (signal_pending(current)) {
ret = -ERESTARTSYS;
......@@ -819,7 +819,7 @@ pull_from_inband(struct evl_observable *observable,
list_del(&wq_entry.entry);
evl_spin_unlock_irqrestore(&observable->oob_wait.lock, oob_flags);
raw_spin_unlock_irqrestore(&observable->oob_wait.lock, oob_flags);
spin_unlock_irqrestore(&observable->inband_wait.lock, ib_flags);
return ret ? ERR_PTR(ret) : nfr;
......@@ -859,7 +859,7 @@ static int pull_notification(struct evl_observable *observable,
*/
ret = raw_copy_to_user(u_buf, &nf, sizeof(nf));
evl_spin_lock_irqsave(&observable->oob_wait.lock, flags);
raw_spin_lock_irqsave(&observable->oob_wait.lock, flags);
if (ret) {
list_add(&nfr->next, &observer->pending_list);
......@@ -872,7 +872,7 @@ static int pull_notification(struct evl_observable *observable,
list_add(&nfr->next, &observer->free_list);
}
evl_spin_unlock_irqrestore(&observable->oob_wait.lock, flags);
raw_spin_unlock_irqrestore(&observable->oob_wait.lock, flags);
if (unlikely(sigpoll))
evl_signal_poll_events(&observable->poll_head,
......@@ -944,7 +944,7 @@ static __poll_t poll_observable(struct evl_observable *observable)
if (observer == NULL)
return POLLERR;
evl_spin_lock_irqsave(&observable->oob_wait.lock, flags);
raw_spin_lock_irqsave(&observable->oob_wait.lock, flags);
/* Only subscribers can inquire about readability. */
if (observer && !list_empty(&observer->pending_list))
......@@ -953,7 +953,7 @@ static __poll_t poll_observable(struct evl_observable *observable)
if (observable->writable_observers > 0)
ret |= POLLOUT|POLLWRNORM;
evl_spin_unlock_irqrestore(&observable->oob_wait.lock, flags);
raw_spin_unlock_irqrestore(&observable->oob_wait.lock, flags);
return ret;
}
......
......@@ -38,9 +38,9 @@ int evl_trydown(struct evl_ksem *ksem)
unsigned long flags;
bool ret;
evl_spin_lock_irqsave(&ksem->wait.lock, flags);
raw_spin_lock_irqsave(&ksem->wait.lock, flags);
ret = down_ksem(ksem);
evl_spin_unlock_irqrestore(&ksem->wait.lock, flags);
raw_spin_unlock_irqrestore(&ksem->wait.lock, flags);
return ret ? 0 : -EAGAIN;
}
......@@ -50,10 +50,10 @@ void evl_up(struct evl_ksem *ksem)
{
unsigned long flags;
evl_spin_lock_irqsave(&ksem->wait.lock, flags);
raw_spin_lock_irqsave(&ksem->wait.lock, flags);
ksem->value++;
evl_wake_up_head(&ksem->wait);
evl_spin_unlock_irqrestore(&ksem->wait.lock, flags);
raw_spin_unlock_irqrestore(&ksem->wait.lock, flags);
evl_schedule();
}
EXPORT_SYMBOL_GPL(evl_up);
......@@ -62,10 +62,10 @@ void evl_broadcast(struct evl_ksem *ksem)
{
unsigned long flags;
evl_spin_lock_irqsave(&ksem->wait.lock, flags);
raw_spin_lock_irqsave(&ksem->wait.lock, flags);
ksem->value = 0;
evl_flush_wait_locked(&ksem->wait, T_BCAST);
evl_spin_unlock_irqrestore(&ksem->wait.lock, flags);
raw_spin_unlock_irqrestore(&ksem->wait.lock, flags);
evl_schedule();
}
EXPORT_SYMBOL_GPL(evl_broadcast);
......@@ -53,7 +53,7 @@ static int claim_stax_from_oob(struct evl_stax *stax, int gateval)
unsigned long flags;
bool notify = false;
evl_spin_lock_irqsave(&stax->oob_wait.lock, flags);
raw_spin_lock_irqsave(&stax->oob_wait.lock, flags);
if (gateval & STAX_CLAIMED_BIT) {
prev = atomic_read(&stax->gate);
......@@ -79,9 +79,9 @@ static int claim_stax_from_oob(struct evl_stax *stax, int gateval)
if (oob_may_access(atomic_read(&stax->gate)))
break;
evl_add_wait_queue(&stax->oob_wait, EVL_INFINITE, EVL_REL);
evl_spin_unlock_irqrestore(&stax->oob_wait.lock, flags);
raw_spin_unlock_irqrestore(&stax->oob_wait.lock, flags);
ret = evl_wait_schedule(&stax->oob_wait);
evl_spin_lock_irqsave(&stax->oob_wait.lock, flags);
raw_spin_lock_irqsave(&stax->oob_wait.lock, flags);
} while (!ret);
/* Clear the claim bit if nobody contends anymore. */
......@@ -94,7 +94,7 @@ static int claim_stax_from_oob(struct evl_stax *stax, int gateval)
} while (prev != old);
}
out:
evl_spin_unlock_irqrestore(&stax->oob_wait.lock, flags);
raw_spin_unlock_irqrestore(&stax->oob_wait.lock, flags);
if (notify) {
evl_notify_thread(curr, EVL_HMDIAG_STAGEX, evl_nil);
......@@ -161,7 +161,7 @@ static int claim_stax_from_inband(struct evl_stax *stax, int gateval)
* this sequence is legit.
*/
spin_lock_irqsave(&stax->inband_wait.lock, ib_flags);
evl_spin_lock_irqsave(&stax->oob_wait.lock, oob_flags);
raw_spin_lock_irqsave(&stax->oob_wait.lock, oob_flags);
if (gateval & STAX_CLAIMED_BIT) {
prev = atomic_read(&stax->gate);
......@@ -188,11 +188,11 @@ static int claim_stax_from_inband(struct evl_stax *stax, int gateval)
break;
set_current_state(TASK_INTERRUPTIBLE);
evl_spin_unlock_irqrestore(&stax->oob_wait.lock, oob_flags);
raw_spin_unlock_irqrestore(&stax->oob_wait.lock, oob_flags);
spin_unlock_irqrestore(&stax->inband_wait.lock, ib_flags);
schedule();
spin_lock_irqsave(&stax->inband_wait.lock, ib_flags);
evl_spin_lock_irqsave(&stax->oob_wait.lock, oob_flags);
raw_spin_lock_irqsave(&stax->oob_wait.lock, oob_flags);
if (signal_pending(current)) {
ret = -ERESTARTSYS;
......@@ -211,7 +211,7 @@ static int claim_stax_from_inband(struct evl_stax *stax, int gateval)
} while (prev != old);
}
out:
evl_spin_unlock_irqrestore(&stax->oob_wait.lock, oob_flags);
raw_spin_unlock_irqrestore(&stax->oob_wait.lock, oob_flags);
spin_unlock_irqrestore(&stax->inband_wait.lock, ib_flags);
return ret;
......@@ -314,7 +314,7 @@ static void unlock_from_oob(struct evl_stax *stax)
* stax is claimed by inband, we have to take the slow path
* under lock.
*/
evl_spin_lock_irqsave(&stax->oob_wait.lock, flags);
raw_spin_lock_irqsave(&stax->oob_wait.lock, flags);
do {
old = prev;
......@@ -324,7 +324,7 @@ static void unlock_from_oob(struct evl_stax *stax)
prev = atomic_cmpxchg(&stax->gate, old, new);
} while (prev != old);
evl_spin_unlock_irqrestore(&stax->oob_wait.lock, flags);
raw_spin_unlock_irqrestore(&stax->oob_wait.lock, flags);
if (!(new & STAX_CONCURRENCY_MASK))
irq_work_queue(&stax->irq_work);
......@@ -363,7 +363,7 @@ static void unlock_from_inband(struct evl_stax *stax)
* Converse to unlock_from_oob(): stax is claimed by oob, we
* have to take the slow path under lock.
*/
evl_spin_lock_irqsave(&stax->oob_wait.lock, flags);
raw_spin_lock_irqsave(&stax->oob_wait.lock, flags);
do {
old = prev;
......@@ -378,7 +378,7 @@ static void unlock_from_inband(struct evl_stax *stax)
if (!(new & STAX_CONCURRENCY_MASK))
evl_flush_wait_locked(&stax->oob_wait, 0);
out:
evl_spin_unlock_irqrestore(&stax->oob_wait.lock, flags);
raw_spin_unlock_irqrestore(&stax->oob_wait.lock, flags);
evl_schedule();
}
......
......@@ -1451,7 +1451,7 @@ int activate_oob_mm_state(struct oob_mm_state *p)
/*
* A bit silly but we need a dynamic allocation for the EVL
* wait queue only to work around some inclusion hell when
* defining EVL's version of struct oob_mm_state.
* defining EVL's version of struct oob_mm_state. Revisit?
*/
p->ptsync_barrier = kmalloc(sizeof(*p->ptsync_barrier), GFP_KERNEL);
if (p->ptsync_barrier == NULL)
......@@ -1691,14 +1691,14 @@ static void join_ptsync(struct evl_thread *curr)
{
struct oob_mm_state *oob_mm = curr->oob_mm;
evl_spin_lock(&oob_mm->ptsync_barrier->lock);
raw_spin_lock(&oob_mm->ptsync_barrier->lock);
/* In non-stop mode, no ptsync sequence is started. */
if (test_bit(EVL_MM_PTSYNC_BIT, &oob_mm->flags) &&
list_empty(&curr->ptsync_next))
list_add_tail(&curr->ptsync_next, &oob_mm->ptrace_sync);
evl_spin_unlock(&oob_mm->ptsync_barrier->lock);
raw_spin_unlock(&oob_mm->ptsync_barrier->lock);
}
static int leave_ptsync(struct evl_thread *leaver)
......@@ -1707,7 +1707,7 @@ static int leave_ptsync(struct evl_thread *leaver)
unsigned long flags;
int ret = 0;
evl_spin_lock_irqsave(&oob_mm->ptsync_barrier->lock, flags);
raw_spin_lock_irqsave(&oob_mm->ptsync_barrier->lock, flags);
if (!test_bit(EVL_MM_PTSYNC_BIT, &oob_mm->flags))
goto out;
......@@ -1721,7 +1721,7 @@ static int leave_ptsync(struct evl_thread *leaver)
ret = 1;
}
out:
evl_spin_unlock_irqrestore(&oob_mm->ptsync_barrier->lock, flags);
raw_spin_unlock_irqrestore(&oob_mm->ptsync_barrier->lock, flags);
return ret;
}
......
......@@ -18,11 +18,11 @@ void __evl_init_wait(struct evl_wait_queue *wq,
{
wq->flags = flags;
wq->clock = clock;
evl_spin_lock_init(&wq->lock);
raw_spin_lock_init(&wq->lock);
wq->wchan.reorder_wait = evl_reorder_wait;
wq->wchan.follow_depend = evl_follow_wait_depend;
INIT_LIST_HEAD(&wq->wchan.wait_list);
lockdep_set_class_and_name(&wq->lock._lock, key, name);
lockdep_set_class_and_name(&wq->lock, key, name);
}
EXPORT_SYMBOL_GPL(__evl_init_wait);
......@@ -33,13 +33,13 @@ void evl_destroy_wait(struct evl_wait_queue *wq)
}
EXPORT_SYMBOL_GPL(evl_destroy_wait);
/* wq->lock held, irqs off */
/* wq->lock held, hard irqs off */
void evl_add_wait_queue(struct evl_wait_queue *wq, ktime_t timeout,
enum evl_tmode timeout_mode)
{
struct evl_thread *curr = evl_current();
assert_evl_lock(&wq->lock);
assert_hard_lock(&wq->lock);
trace_evl_wait(wq);