{ \
unsigned long flags; \
\
- raw_local_irq_save(flags); \
+ flags = hard_local_irq_save(); \
v->counter = v->counter c_op i; \
- raw_local_irq_restore(flags); \
+ hard_local_irq_restore(flags); \
}
#define ATOMIC_OP_RETURN(op, c_op) \
unsigned long flags; \
int ret; \
\
- raw_local_irq_save(flags); \
+ flags = hard_local_irq_save(); \
ret = (v->counter = v->counter c_op i); \
- raw_local_irq_restore(flags); \
+ hard_local_irq_restore(flags); \
\
return ret; \
}
* this is the substitute */
#define _atomic_spin_lock_irqsave(l,f) do { \
arch_spinlock_t *s = ATOMIC_HASH(l); \
- local_irq_save(f); \
+ (f) = hard_local_irq_save(); \
arch_spin_lock(s); \
} while(0)
#define _atomic_spin_unlock_irqrestore(l,f) do { \
arch_spinlock_t *s = ATOMIC_HASH(l); \
arch_spin_unlock(s); \
- local_irq_restore(f); \
+ hard_local_irq_restore(f); \
} while(0)
#else
-# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
-# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
+# define _atomic_spin_lock_irqsave(l,f) do { (f) = hard_local_irq_save(); } while (0)
+# define _atomic_spin_unlock_irqrestore(l,f) do { hard_local_irq_restore(f); } while (0)
#endif
/*
if (size == 8 && sizeof(unsigned long) != 8)
wrong_size_cmpxchg(ptr);
- raw_local_irq_save(flags);
+ flags = hard_local_irq_save();
switch (size) {
case 1: prev = *(u8 *)ptr;
if (prev == old)
default:
wrong_size_cmpxchg(ptr);
}
- raw_local_irq_restore(flags);
+ hard_local_irq_restore(flags);
return prev;
}
u64 prev;
unsigned long flags;
- raw_local_irq_save(flags);
+ flags = hard_local_irq_save();
prev = *(u64 *)ptr;
if (prev == old)
*(u64 *)ptr = new;
- raw_local_irq_restore(flags);
+ hard_local_irq_restore(flags);
return prev;
}
* helpers are enough to protect RCU uses inside the exception. So
* just return immediately if we detect we are in an IRQ.
*/
- if (in_interrupt())
+ if (!ipipe_root_p || in_interrupt())
return;
local_irq_save(flags);
{
unsigned long flags;
- if (in_interrupt())
+ if (!ipipe_root_p || in_interrupt())
return;
local_irq_save(flags);
*/
atomic_t kgdb_active = ATOMIC_INIT(-1);
EXPORT_SYMBOL_GPL(kgdb_active);
-static DEFINE_RAW_SPINLOCK(dbg_master_lock);
-static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
+static IPIPE_DEFINE_RAW_SPINLOCK(dbg_master_lock);
+static IPIPE_DEFINE_RAW_SPINLOCK(dbg_slave_lock);
/*
* We use NR_CPUs not PERCPU, in case kgdb is used to debug early
static void dbg_touch_watchdogs(void)
{
touch_softlockup_watchdog_sync();
+#ifndef CONFIG_IPIPE
clocksource_touch_watchdog();
+#endif
rcu_cpu_stall_reset();
}
* Interrupts will be restored by the 'trap return' code, except when
* single stepping.
*/
- local_irq_save(flags);
+ flags = hard_local_irq_save();
cpu = ks->cpu;
kgdb_info[cpu].debuggerinfo = regs;
smp_mb__before_atomic();
atomic_dec(&slaves_in_kgdb);
dbg_touch_watchdogs();
- local_irq_restore(flags);
+ hard_local_irq_restore(flags);
return 0;
}
cpu_relax();
atomic_set(&kgdb_active, -1);
raw_spin_unlock(&dbg_master_lock);
dbg_touch_watchdogs();
- local_irq_restore(flags);
+ hard_local_irq_restore(flags);
goto acquirelock;
}
atomic_set(&kgdb_active, -1);
raw_spin_unlock(&dbg_master_lock);
dbg_touch_watchdogs();
- local_irq_restore(flags);
+ hard_local_irq_restore(flags);
return kgdb_info[cpu].ret_state;
}
if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode)
return;
- local_irq_save(flags);
+ flags = hard_local_irq_save();
gdbstub_msg_write(s, count);
- local_irq_restore(flags);
+ hard_local_irq_restore(flags);
}
static struct console kgdbcons = {
bool ret = true;
if (module) {
- preempt_disable();
+ unsigned long flags = hard_preempt_disable();
/* Note: here, we can fail to get a reference */
if (likely(module_is_live(module) &&
atomic_inc_not_zero(&module->refcnt) != 0))
else
ret = false;
- preempt_enable();
+ hard_preempt_enable(flags);
}
return ret;
}
int ret;
if (module) {
- preempt_disable();
+ unsigned long flags = hard_preempt_disable();
ret = atomic_dec_if_positive(&module->refcnt);
WARN_ON(ret < 0); /* Failed to put refcount */
trace_module_put(module, _RET_IP_);
- preempt_enable();
+ hard_preempt_enable(flags);
}
}
EXPORT_SYMBOL(module_put);
goto Enable_cpus;
local_irq_disable();
+ hard_cond_local_irq_disable();
error = syscore_suspend();
if (error) {
goto Enable_cpus;
local_irq_disable();
+ hard_cond_local_irq_disable();
error = syscore_suspend();
if (error)
goto Enable_cpus;
local_irq_disable();
+ hard_cond_local_irq_disable();
syscore_suspend();
if (pm_wakeup_pending()) {
error = -EAGAIN;
* Ensure each lock is in a separate cacheline.
*/
static union {
- raw_spinlock_t lock;
+ ipipe_spinlock_t lock;
char pad[L1_CACHE_BYTES];
} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
[0 ... (NR_LOCKS - 1)] = {
- .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
+ .lock = IPIPE_SPIN_LOCK_UNLOCKED,
},
};
-static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
+static inline ipipe_spinlock_t *lock_addr(const atomic64_t *v)
{
unsigned long addr = (unsigned long) v;
long long atomic64_read(const atomic64_t *v)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ ipipe_spinlock_t *lock = lock_addr(v);
long long val;
raw_spin_lock_irqsave(lock, flags);
void atomic64_set(atomic64_t *v, long long i)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ ipipe_spinlock_t *lock = lock_addr(v);
raw_spin_lock_irqsave(lock, flags);
v->counter = i;
void atomic64_##op(long long a, atomic64_t *v) \
{ \
unsigned long flags; \
- raw_spinlock_t *lock = lock_addr(v); \
+ ipipe_spinlock_t *lock = lock_addr(v); \
\
raw_spin_lock_irqsave(lock, flags); \
v->counter c_op a; \
long long atomic64_##op##_return(long long a, atomic64_t *v) \
{ \
unsigned long flags; \
- raw_spinlock_t *lock = lock_addr(v); \
+ ipipe_spinlock_t *lock = lock_addr(v); \
long long val; \
\
raw_spin_lock_irqsave(lock, flags); \
long long atomic64_fetch_##op(long long a, atomic64_t *v) \
{ \
unsigned long flags; \
- raw_spinlock_t *lock = lock_addr(v); \
+ ipipe_spinlock_t *lock = lock_addr(v); \
long long val; \
\
raw_spin_lock_irqsave(lock, flags); \
long long atomic64_dec_if_positive(atomic64_t *v)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ ipipe_spinlock_t *lock = lock_addr(v);
long long val;
raw_spin_lock_irqsave(lock, flags);
long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ ipipe_spinlock_t *lock = lock_addr(v);
long long val;
raw_spin_lock_irqsave(lock, flags);
long long atomic64_xchg(atomic64_t *v, long long new)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ ipipe_spinlock_t *lock = lock_addr(v);
long long val;
raw_spin_lock_irqsave(lock, flags);
int atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ ipipe_spinlock_t *lock = lock_addr(v);
int ret = 0;
raw_spin_lock_irqsave(lock, flags);
#include <linux/export.h>
#include <linux/kallsyms.h>
#include <linux/sched.h>
+#include <linux/ipipe.h>
notrace static unsigned int check_preemption_disabled(const char *what1,
const char *what2)
{
int this_cpu = raw_smp_processor_id();
+ if (hard_irqs_disabled())
+ goto out;
+
+ if (!ipipe_root_p)
+ goto out;
+
if (likely(preempt_count()))
goto out;