return files;
}
-void fastcall put_files_struct(struct files_struct *files)
+void put_files_struct(struct files_struct *files)
{
struct fdtable *fdt;
zap_pid_ns_processes(tsk->nsproxy->pid_ns);
}
-fastcall NORET_TYPE void do_exit(long code)
+NORET_TYPE void do_exit(long code)
{
struct task_struct *tsk = current;
int group_dead;
* is dropped: either by a lazy thread or by
* mmput. Free the page directory and the mm.
*/
-void fastcall __mmdrop(struct mm_struct *mm)
+void __mmdrop(struct mm_struct *mm)
{
BUG_ON(mm == &init_mm);
mm_free_pgd(mm);
* Note: The caller is expected to handle the ack, clear, mask and
* unmask issues if necessary.
*/
-void fastcall
+void
handle_simple_irq(unsigned int irq, struct irq_desc *desc)
{
struct irqaction *action;
* it after the associated handler has acknowledged the device, so the
* interrupt line is back to inactive.
*/
-void fastcall
+void
handle_level_irq(unsigned int irq, struct irq_desc *desc)
{
unsigned int cpu = smp_processor_id();
* for modern forms of interrupt handlers, which handle the flow
* details in hardware, transparently.
*/
-void fastcall
+void
handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
{
unsigned int cpu = smp_processor_id();
* the handler was running. If all pending interrupts are handled, the
* loop is left.
*/
-void fastcall
+void
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
{
const unsigned int cpu = smp_processor_id();
*
* Per CPU interrupts on SMP machines without locking requirements
*/
-void fastcall
+void
handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
{
irqreturn_t action_ret;
*
* Handles spurious and unhandled IRQ's. It also prints a debugmessage.
*/
-void fastcall
+void
handle_bad_irq(unsigned int irq, struct irq_desc *desc)
{
print_irq_desc(irq, desc);
* This is the original x86 implementation which is used for every
* interrupt type.
*/
-fastcall unsigned int __do_IRQ(unsigned int irq)
+unsigned int __do_IRQ(unsigned int irq)
{
struct irq_desc *desc = irq_desc + irq;
struct irqaction *action;
* use of the mutex is forbidden. The mutex must not be locked when
* this function is called.
*/
-void fastcall mutex_destroy(struct mutex *lock)
+void mutex_destroy(struct mutex *lock)
{
DEBUG_LOCKS_WARN_ON(mutex_is_locked(lock));
lock->magic = NULL;
* We also put the fastpath first in the kernel image, to make sure the
* branch is predicted by the CPU as default-untaken.
*/
-static void fastcall noinline __sched
+static void noinline __sched
__mutex_lock_slowpath(atomic_t *lock_count);
/***
*
* This function is similar to (but not equivalent to) down().
*/
-void inline fastcall __sched mutex_lock(struct mutex *lock)
+void inline __sched mutex_lock(struct mutex *lock)
{
might_sleep();
/*
EXPORT_SYMBOL(mutex_lock);
#endif
-static void fastcall noinline __sched
-__mutex_unlock_slowpath(atomic_t *lock_count);
+static noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
/***
* mutex_unlock - release the mutex
*
* This function is similar to (but not equivalent to) up().
*/
-void fastcall __sched mutex_unlock(struct mutex *lock)
+void __sched mutex_unlock(struct mutex *lock)
{
/*
* The unlocking fastpath is the 0->1 transition from 'locked'
/*
* Release the lock, slowpath:
*/
-static fastcall inline void
+static inline void
__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
/*
* Release the lock, slowpath:
*/
-static fastcall noinline void
+static noinline void
__mutex_unlock_slowpath(atomic_t *lock_count)
{
__mutex_unlock_common_slowpath(lock_count, 1);
* Here come the less common (and hence less performance-critical) APIs:
* mutex_lock_interruptible() and mutex_trylock().
*/
-static int fastcall noinline __sched
+static noinline int __sched
__mutex_lock_killable_slowpath(atomic_t *lock_count);
-static noinline int fastcall __sched
+static noinline int __sched
__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
/***
*
* This function is similar to (but not equivalent to) down_interruptible().
*/
-int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
+int __sched mutex_lock_interruptible(struct mutex *lock)
{
might_sleep();
return __mutex_fastpath_lock_retval
EXPORT_SYMBOL(mutex_lock_interruptible);
-int fastcall __sched mutex_lock_killable(struct mutex *lock)
+int __sched mutex_lock_killable(struct mutex *lock)
{
might_sleep();
return __mutex_fastpath_lock_retval
}
EXPORT_SYMBOL(mutex_lock_killable);
-static void fastcall noinline __sched
+static noinline void __sched
__mutex_lock_slowpath(atomic_t *lock_count)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_);
}
-static int fastcall noinline __sched
+static noinline int __sched
__mutex_lock_killable_slowpath(atomic_t *lock_count)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_);
}
-static noinline int fastcall __sched
+static noinline int __sched
__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
* This function must not be used in interrupt context. The
* mutex must be released by the same task that acquired it.
*/
-int fastcall __sched mutex_trylock(struct mutex *lock)
+int __sched mutex_trylock(struct mutex *lock)
{
return __mutex_fastpath_trylock(&lock->count,
__mutex_trylock_slowpath);
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
-static fastcall void free_pidmap(struct pid_namespace *pid_ns, int pid)
+static void free_pidmap(struct pid_namespace *pid_ns, int pid)
{
struct pidmap *map = pid_ns->pidmap + pid / BITS_PER_PAGE;
int offset = pid & BITS_PER_PAGE_MASK;
return -1;
}
-fastcall void put_pid(struct pid *pid)
+void put_pid(struct pid *pid)
{
struct pid_namespace *ns;
put_pid(pid);
}
-fastcall void free_pid(struct pid *pid)
+void free_pid(struct pid *pid)
{
/* We can be called with write_lock_irq(&tasklist_lock) held */
int i;
goto out;
}
-struct pid * fastcall find_pid_ns(int nr, struct pid_namespace *ns)
+struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
{
struct hlist_node *elem;
struct upid *pnr;
/*
* attach_pid() must be called with the tasklist_lock write-held.
*/
-int fastcall attach_pid(struct task_struct *task, enum pid_type type,
+int attach_pid(struct task_struct *task, enum pid_type type,
struct pid *pid)
{
struct pid_link *link;
return 0;
}
-void fastcall detach_pid(struct task_struct *task, enum pid_type type)
+void detach_pid(struct task_struct *task, enum pid_type type)
{
struct pid_link *link;
struct pid *pid;
}
/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
-void fastcall transfer_pid(struct task_struct *old, struct task_struct *new,
+void transfer_pid(struct task_struct *old, struct task_struct *new,
enum pid_type type)
{
new->pids[type].pid = old->pids[type].pid;
old->pids[type].pid = NULL;
}
-struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type)
+struct task_struct *pid_task(struct pid *pid, enum pid_type type)
{
struct task_struct *result = NULL;
if (pid) {
return pid;
}
-struct task_struct *fastcall get_pid_task(struct pid *pid, enum pid_type type)
+struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
{
struct task_struct *result;
rcu_read_lock();
return success;
}
-int fastcall wake_up_process(struct task_struct *p)
+int wake_up_process(struct task_struct *p)
{
return try_to_wake_up(p, TASK_ALL, 0);
}
EXPORT_SYMBOL(wake_up_process);
-int fastcall wake_up_state(struct task_struct *p, unsigned int state)
+int wake_up_state(struct task_struct *p, unsigned int state)
{
return try_to_wake_up(p, state, 0);
}
* that must be done for every newly created context, then puts the task
* on the runqueue and wakes it.
*/
-void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
+void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
{
unsigned long flags;
struct rq *rq;
#if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
-void fastcall add_preempt_count(int val)
+void add_preempt_count(int val)
{
/*
* Underflow?
}
EXPORT_SYMBOL(add_preempt_count);
-void fastcall sub_preempt_count(int val)
+void sub_preempt_count(int val)
{
/*
* Underflow?
* @nr_exclusive: how many wake-one or wake-many threads to wake up
* @key: is directly passed to the wakeup function
*/
-void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode,
+void __wake_up(wait_queue_head_t *q, unsigned int mode,
int nr_exclusive, void *key)
{
unsigned long flags;
/*
* Same as __wake_up but called with the spinlock in wait_queue_head_t held.
*/
-void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
+void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
{
__wake_up_common(q, mode, 1, 0, NULL);
}
*
* On UP it can prevent extra preemption.
*/
-void fastcall
+void
__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
{
unsigned long flags;
/*
* This function must run with irqs disabled!
*/
-inline fastcall void raise_softirq_irqoff(unsigned int nr)
+inline void raise_softirq_irqoff(unsigned int nr)
{
__raise_softirq_irqoff(nr);
wakeup_softirqd();
}
-void fastcall raise_softirq(unsigned int nr)
+void raise_softirq(unsigned int nr)
{
unsigned long flags;
static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL };
static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL };
-void fastcall __tasklet_schedule(struct tasklet_struct *t)
+void __tasklet_schedule(struct tasklet_struct *t)
{
unsigned long flags;
EXPORT_SYMBOL(__tasklet_schedule);
-void fastcall __tasklet_hi_schedule(struct tasklet_struct *t)
+void __tasklet_hi_schedule(struct tasklet_struct *t)
{
unsigned long flags;
* init_timer() must be done to a timer prior calling *any* of the
* other timer functions.
*/
-void fastcall init_timer(struct timer_list *timer)
+void init_timer(struct timer_list *timer)
{
timer->entry.next = NULL;
timer->base = __raw_get_cpu_var(tvec_bases);
}
EXPORT_SYMBOL(init_timer);
-void fastcall init_timer_deferrable(struct timer_list *timer)
+void init_timer_deferrable(struct timer_list *timer)
{
init_timer(timer);
timer_set_deferrable(timer);
*
* In all cases the return value is guaranteed to be non-negative.
*/
-fastcall signed long __sched schedule_timeout(signed long timeout)
+signed long __sched schedule_timeout(signed long timeout)
{
struct timer_list timer;
unsigned long expire;
EXPORT_SYMBOL(init_waitqueue_head);
-void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
+void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
{
unsigned long flags;
}
EXPORT_SYMBOL(add_wait_queue);
-void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
+void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
{
unsigned long flags;
}
EXPORT_SYMBOL(add_wait_queue_exclusive);
-void fastcall remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
+void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
{
unsigned long flags;
* stops them from bleeding out - it would still allow subsequent
* loads to move into the critical region).
*/
-void fastcall
+void
prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
{
unsigned long flags;
}
EXPORT_SYMBOL(prepare_to_wait);
-void fastcall
+void
prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
{
unsigned long flags;
}
EXPORT_SYMBOL(prepare_to_wait_exclusive);
-void fastcall finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
+void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
{
unsigned long flags;
* waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
* permitted return codes. Nonzero return codes halt waiting and return.
*/
-int __sched fastcall
+int __sched
__wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
int (*action)(void *), unsigned mode)
{
}
EXPORT_SYMBOL(__wait_on_bit);
-int __sched fastcall out_of_line_wait_on_bit(void *word, int bit,
+int __sched out_of_line_wait_on_bit(void *word, int bit,
int (*action)(void *), unsigned mode)
{
wait_queue_head_t *wq = bit_waitqueue(word, bit);
}
EXPORT_SYMBOL(out_of_line_wait_on_bit);
-int __sched fastcall
+int __sched
__wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
int (*action)(void *), unsigned mode)
{
}
EXPORT_SYMBOL(__wait_on_bit_lock);
-int __sched fastcall out_of_line_wait_on_bit_lock(void *word, int bit,
+int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
int (*action)(void *), unsigned mode)
{
wait_queue_head_t *wq = bit_waitqueue(word, bit);
}
EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
-void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
+void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
{
struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
if (waitqueue_active(wq))
* may need to use a less regular barrier, such fs/inode.c's smp_mb(),
* because spin_unlock() does not guarantee a memory barrier.
*/
-void fastcall wake_up_bit(void *word, int bit)
+void wake_up_bit(void *word, int bit)
{
__wake_up_bit(bit_waitqueue(word, bit), word, bit);
}
EXPORT_SYMBOL(wake_up_bit);
-fastcall wait_queue_head_t *bit_waitqueue(void *word, int bit)
+wait_queue_head_t *bit_waitqueue(void *word, int bit)
{
const int shift = BITS_PER_LONG == 32 ? 5 : 6;
const struct zone *zone = page_zone(virt_to_page(word));
* We queue the work to the CPU it was submitted, but there is no
* guarantee that it will be processed by that CPU.
*/
-int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
+int queue_work(struct workqueue_struct *wq, struct work_struct *work)
{
int ret = 0;
*
* Returns 0 if @work was already on a queue, non-zero otherwise.
*/
-int fastcall queue_delayed_work(struct workqueue_struct *wq,
+int queue_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay)
{
timer_stats_timer_set_start_info(&dwork->timer);
* This function used to run the workqueues itself. Now we just wait for the
* helper threads to do it.
*/
-void fastcall flush_workqueue(struct workqueue_struct *wq)
+void flush_workqueue(struct workqueue_struct *wq)
{
const cpumask_t *cpu_map = wq_cpu_map(wq);
int cpu;
*
* This puts a job in the kernel-global workqueue.
*/
-int fastcall schedule_work(struct work_struct *work)
+int schedule_work(struct work_struct *work)
{
return queue_work(keventd_wq, work);
}
* After waiting for a given time this puts a job in the kernel-global
* workqueue.
*/
-int fastcall schedule_delayed_work(struct delayed_work *dwork,
+int schedule_delayed_work(struct delayed_work *dwork,
unsigned long delay)
{
timer_stats_timer_set_start_info(&dwork->timer);