* the task-switch, and shows up in ret_from_fork in entry.S,
* for example.
*/
-struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
struct thread_struct *prev = &prev_p->thread,
*next = &next_p->thread;
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
-int do_signal(struct pt_regs *regs, sigset_t *oldset)
+int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
{
siginfo_t info;
int signr;
apic_write_around(APIC_ICR, cfg);
}
-void send_IPI_self(int vector)
+void fastcall send_IPI_self(int vector)
{
__send_IPI_shortcut(APIC_DEST_SELF, vector);
}
#define VM86_REGS_SIZE2 (sizeof(struct kernel_vm86_regs) - VM86_REGS_SIZE1)
struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs));
-struct pt_regs * save_v86_state(struct kernel_vm86_regs * regs)
+struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
{
struct tss_struct *tss;
struct pt_regs *ret;
}
static void FASTCALL(rx_refill_atomic(struct net_device *ndev));
-static void rx_refill_atomic(struct net_device *ndev)
+static void fastcall rx_refill_atomic(struct net_device *ndev)
{
rx_refill(ndev, GFP_ATOMIC);
}
}
static void FASTCALL(phy_intr(struct net_device *ndev));
-static void phy_intr(struct net_device *ndev)
+static void fastcall phy_intr(struct net_device *ndev)
{
struct ns83820 *dev = PRIV(ndev);
static char *speeds[] = { "10", "100", "1000", "1000(?)", "1000F" };
}
static void FASTCALL(ns83820_rx_kick(struct net_device *ndev));
-static void ns83820_rx_kick(struct net_device *ndev)
+static void fastcall ns83820_rx_kick(struct net_device *ndev)
{
struct ns83820 *dev = PRIV(ndev);
/*if (nr_rx_empty(dev) >= NR_RX_DESC/4)*/ {
*
*/
static void FASTCALL(rx_irq(struct net_device *ndev));
-static void rx_irq(struct net_device *ndev)
+static void fastcall rx_irq(struct net_device *ndev)
{
struct ns83820 *dev = PRIV(ndev);
struct rx_info *info = &dev->rx_info;
/* wait_on_sync_kiocb:
* Waits on the given sync kiocb to complete.
*/
-ssize_t wait_on_sync_kiocb(struct kiocb *iocb)
+ssize_t fastcall wait_on_sync_kiocb(struct kiocb *iocb)
{
while (iocb->ki_users) {
set_current_state(TASK_UNINTERRUPTIBLE);
* go away, they will call put_ioctx and release any pinned memory
* associated with the request (held via struct page * references).
*/
-void exit_aio(struct mm_struct *mm)
+void fastcall exit_aio(struct mm_struct *mm)
{
struct kioctx *ctx = mm->ioctx_list;
mm->ioctx_list = NULL;
* Called when the last user of an aio context has gone away,
* and the struct needs to be freed.
*/
-void __put_ioctx(struct kioctx *ctx)
+void fastcall __put_ioctx(struct kioctx *ctx)
{
unsigned nr_events = ctx->max_reqs;
* req (after submitting it) and aio_complete() freeing the req.
*/
static struct kiocb *FASTCALL(__aio_get_req(struct kioctx *ctx));
-static struct kiocb *__aio_get_req(struct kioctx *ctx)
+static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx)
{
struct kiocb *req = NULL;
struct aio_ring *ring;
* Returns true if this put was the last user of the kiocb,
* false if the request is still in use.
*/
-int aio_put_req(struct kiocb *req)
+int fastcall aio_put_req(struct kiocb *req)
{
struct kioctx *ctx = req->ki_ctx;
int ret;
unuse_mm(ctx->mm);
}
-void kick_iocb(struct kiocb *iocb)
+void fastcall kick_iocb(struct kiocb *iocb)
{
struct kioctx *ctx = iocb->ki_ctx;
* Returns true if this is the last user of the request. The
* only other user of the request can be the cancellation code.
*/
-int aio_complete(struct kiocb *iocb, long res, long res2)
+int fastcall aio_complete(struct kiocb *iocb, long res, long res2)
{
struct kioctx *ctx = iocb->ki_ctx;
struct aio_ring_info *info;
return -EINVAL;
}
-int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
+int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
struct iocb *iocb)
{
struct kiocb *req;
}
EXPORT_SYMBOL(wake_up_buffer);
-void unlock_buffer(struct buffer_head *bh)
+void fastcall unlock_buffer(struct buffer_head *bh)
{
/*
* unlock_buffer against a zero-count bh is a bug, if the page
* mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
* mapping->page_lock and the global inode_lock.
*/
-void mark_buffer_dirty(struct buffer_head *bh)
+void fastcall mark_buffer_dirty(struct buffer_head *bh)
{
if (!buffer_uptodate(bh))
buffer_error();
#include <asm/siginfo.h>
#include <asm/uaccess.h>
-void set_close_on_exec(unsigned int fd, int flag)
+void fastcall set_close_on_exec(unsigned int fd, int flag)
{
struct files_struct *files = current->files;
spin_lock(&files->file_lock);
EXPORT_SYMBOL(close_private_file);
-void fput(struct file *file)
+void fastcall fput(struct file *file)
{
if (atomic_dec_and_test(&file->f_count))
__fput(file);
/* __fput is called from task context when aio completion releases the last
* last use of a struct file *. Do not use otherwise.
*/
-void __fput(struct file *file)
+void fastcall __fput(struct file *file)
{
struct dentry *dentry = file->f_dentry;
struct vfsmount *mnt = file->f_vfsmnt;
mntput(mnt);
}
-struct file *fget(unsigned int fd)
+struct file fastcall *fget(unsigned int fd)
{
struct file *file;
struct files_struct *files = current->files;
* and a flag is returned to be passed to the corresponding fput_light().
* There must not be a cloning between an fget_light/fput_light pair.
*/
-struct file *fget_light(unsigned int fd, int *fput_needed)
+struct file fastcall *fget_light(unsigned int fd, int *fput_needed)
{
struct file *file;
struct files_struct *files = current->files;
*
* We expect 'base' to be positive and a directory.
*/
-int link_path_walk(const char * name, struct nameidata *nd)
+int fastcall link_path_walk(const char * name, struct nameidata *nd)
{
struct path next;
struct inode *inode;
return err;
}
-int path_walk(const char * name, struct nameidata *nd)
+int fastcall path_walk(const char * name, struct nameidata *nd)
{
current->total_link_count = 0;
return link_path_walk(name, nd);
return 1;
}
-int path_lookup(const char *name, unsigned int flags, struct nameidata *nd)
+int fastcall path_lookup(const char *name, unsigned int flags, struct nameidata *nd)
{
nd->last_type = LAST_ROOT; /* if there are only slashes... */
nd->flags = flags;
* that namei follows links, while lnamei does not.
* SMP-safe
*/
-int __user_walk(const char __user *name, unsigned flags, struct nameidata *nd)
+int fastcall __user_walk(const char __user *name, unsigned flags, struct nameidata *nd)
{
char *tmp = getname(name);
int err = PTR_ERR(tmp);
files->next_fd = fd;
}
-void put_unused_fd(unsigned int fd)
+void fastcall put_unused_fd(unsigned int fd)
{
struct files_struct *files = current->files;
spin_lock(&files->file_lock);
* will follow.
*/
-void fd_install(unsigned int fd, struct file * file)
+void fastcall fd_install(unsigned int fd, struct file * file)
{
struct files_struct *files = current->files;
spin_lock(&files->file_lock);
#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
#define FASTCALL(x) x __attribute__((regparm(3)))
+#define fastcall __attribute__((regparm(3)))
#ifdef CONFIG_X86_ALIGNMENT_16
#define __ALIGN .align 16,0x90
extern void smp_flush_tlb(void);
extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
-extern void smp_send_reschedule(int cpu);
extern void smp_invalidate_rcv(void); /* Process an NMI */
extern void (*mtrr_hook) (void);
extern void zap_low_mappings (void);
#define __ASM_LINKAGE_H
#define FASTCALL(x) x __attribute__((regparm(3)))
+#define fastcall __attribute__((regparm(3)))
#endif
#ifndef FASTCALL
#define FASTCALL(x) x
+#define fastcall
#endif
#endif
extern int FASTCALL(wake_up_state(struct task_struct * tsk, unsigned int state));
extern int FASTCALL(wake_up_process(struct task_struct * tsk));
#ifdef CONFIG_SMP
- extern void FASTCALL(kick_process(struct task_struct * tsk));
+ extern void kick_process(struct task_struct *tsk);
#else
static inline void kick_process(struct task_struct *tsk) { }
#endif
/*
* sends a 'reschedule' event to another CPU:
*/
-extern void FASTCALL(smp_send_reschedule(int cpu));
+extern void smp_send_reschedule(int cpu);
/*
}
}
-void put_files_struct(struct files_struct *files)
+void fastcall put_files_struct(struct files_struct *files)
{
if (atomic_dec_and_test(&files->count)) {
close_files(files);
do_exit((error_code&0xff)<<8);
}
-task_t *next_thread(task_t *p)
+task_t fastcall *next_thread(task_t *p)
{
struct pid_link *link = p->pids + PIDTYPE_TGID;
struct list_head *tmp, *head = &link->pidptr->task_list;
free_task(tsk);
}
-void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
+void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
{
unsigned long flags;
EXPORT_SYMBOL(add_wait_queue);
-void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)
+void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)
{
unsigned long flags;
EXPORT_SYMBOL(add_wait_queue_exclusive);
-void remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
+void fastcall remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
{
unsigned long flags;
* stops them from bleeding out - it would still allow subsequent
* loads to move into the the critical region).
*/
-void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
+void fastcall prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
{
unsigned long flags;
EXPORT_SYMBOL(prepare_to_wait);
-void
+void fastcall
prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
{
unsigned long flags;
EXPORT_SYMBOL(prepare_to_wait_exclusive);
-void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
+void fastcall finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
{
unsigned long flags;
* is dropped: either by a lazy thread or by
* mmput. Free the page directory and the mm.
*/
-void __mmdrop(struct mm_struct *mm)
+void fastcall __mmdrop(struct mm_struct *mm)
{
BUG_ON(mm == &init_mm);
mm_free_pgd(mm);
static spinlock_t pidmap_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
-inline void free_pidmap(int pid)
+fastcall void free_pidmap(int pid)
{
pidmap_t *map = pidmap_array + pid / BITS_PER_PAGE;
int offset = pid & BITS_PER_PAGE_MASK;
return -1;
}
-inline struct pid *find_pid(enum pid_type type, int nr)
+fastcall struct pid *find_pid(enum pid_type type, int nr)
{
struct list_head *elem, *bucket = &pid_hash[type][pid_hashfn(nr)];
struct pid *pid;
return NULL;
}
-void link_pid(task_t *task, struct pid_link *link, struct pid *pid)
+void fastcall link_pid(task_t *task, struct pid_link *link, struct pid *pid)
{
atomic_inc(&pid->count);
list_add_tail(&link->pid_chain, &pid->task_list);
link->pidptr = pid;
}
-int attach_pid(task_t *task, enum pid_type type, int nr)
+int fastcall attach_pid(task_t *task, enum pid_type type, int nr)
{
struct pid *pid = find_pid(type, nr);
__detach_pid(task, type);
}
-void detach_pid(task_t *task, enum pid_type type)
+void fastcall detach_pid(task_t *task, enum pid_type type)
{
int nr = __detach_pid(task, type);
* The read-side of critical section that use call_rcu() for updation must
* be protected by rcu_read_lock()/rcu_read_unlock().
*/
-void call_rcu(struct rcu_head *head, void (*func)(void *arg), void *arg)
+void fastcall call_rcu(struct rcu_head *head, void (*func)(void *arg), void *arg)
{
int cpu;
unsigned long flags;
#include <linux/suspend.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
+#include <linux/smp.h>
#include <linux/timer.h>
#include <linux/rcupdate.h>
#include <linux/cpu.h>
return success;
}
-int wake_up_process(task_t * p)
+int fastcall wake_up_process(task_t * p)
{
return try_to_wake_up(p, TASK_STOPPED |
TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
EXPORT_SYMBOL(wake_up_process);
-int wake_up_state(task_t *p, unsigned int state)
+int fastcall wake_up_state(task_t *p, unsigned int state)
{
return try_to_wake_up(p, state, 0);
}
* Perform scheduler related setup for a newly forked process p.
* p is forked by current.
*/
-void sched_fork(task_t *p)
+void fastcall sched_fork(task_t *p)
{
/*
* We mark the process as running here, but have not actually
* This function will do some initial scheduler statistics housekeeping
* that must be done for every newly created process.
*/
-void wake_up_forked_process(task_t * p)
+void fastcall wake_up_forked_process(task_t * p)
{
unsigned long flags;
runqueue_t *rq = task_rq_lock(current, &flags);
* artificially, because any timeslice recovered here
* was given away by the parent in the first place.)
*/
-void sched_exit(task_t * p)
+void fastcall sched_exit(task_t * p)
{
unsigned long flags;
runqueue_t *rq;
* @mode: which threads
* @nr_exclusive: how many wake-one or wake-many threads to wake up
*/
-void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
+void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
{
unsigned long flags;
/*
* Same as __wake_up but called with the spinlock in wait_queue_head_t held.
*/
-void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
+void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
{
__wake_up_common(q, mode, 1, 0);
}
*
* On UP it can prevent extra preemption.
*/
-void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
+void fastcall __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
{
unsigned long flags;
EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
-void complete(struct completion *x)
+void fastcall complete(struct completion *x)
{
unsigned long flags;
EXPORT_SYMBOL(complete);
-void complete_all(struct completion *x)
+void fastcall complete_all(struct completion *x)
{
unsigned long flags;
spin_unlock_irqrestore(&x->wait.lock, flags);
}
-void wait_for_completion(struct completion *x)
+void fastcall wait_for_completion(struct completion *x)
{
might_sleep();
spin_lock_irq(&x->wait.lock);
__remove_wait_queue(q, &wait); \
spin_unlock_irqrestore(&q->lock, flags);
-void interruptible_sleep_on(wait_queue_head_t *q)
+void fastcall interruptible_sleep_on(wait_queue_head_t *q)
{
SLEEP_ON_VAR
EXPORT_SYMBOL(interruptible_sleep_on);
-long interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
+long fastcall interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
{
SLEEP_ON_VAR
EXPORT_SYMBOL(interruptible_sleep_on_timeout);
-void sleep_on(wait_queue_head_t *q)
+void fastcall sleep_on(wait_queue_head_t *q)
{
SLEEP_ON_VAR
EXPORT_SYMBOL(sleep_on);
-long sleep_on_timeout(wait_queue_head_t *q, long timeout)
+long fastcall sleep_on_timeout(wait_queue_head_t *q, long timeout)
{
SLEEP_ON_VAR
#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
-inline void recalc_sigpending_tsk(struct task_struct *t)
+fastcall void recalc_sigpending_tsk(struct task_struct *t)
{
if (t->signal->group_stop_count > 0 ||
PENDING(&t->pending, &t->blocked) ||
/*
* This function must run with irqs disabled!
*/
-inline void raise_softirq_irqoff(unsigned int nr)
+inline fastcall void raise_softirq_irqoff(unsigned int nr)
{
__raise_softirq_irqoff(nr);
EXPORT_SYMBOL(raise_softirq_irqoff);
-void raise_softirq(unsigned int nr)
+void fastcall raise_softirq(unsigned int nr)
{
unsigned long flags;
static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL };
static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL };
-void __tasklet_schedule(struct tasklet_struct *t)
+void fastcall __tasklet_schedule(struct tasklet_struct *t)
{
unsigned long flags;
EXPORT_SYMBOL(__tasklet_schedule);
-void __tasklet_hi_schedule(struct tasklet_struct *t)
+void fastcall __tasklet_hi_schedule(struct tasklet_struct *t)
{
unsigned long flags;
*
* In all cases the return value is guaranteed to be non-negative.
*/
-signed long schedule_timeout(signed long timeout)
+fastcall signed long schedule_timeout(signed long timeout)
{
struct timer_list timer;
unsigned long expire;
* We queue the work to the CPU it was submitted, but there is no
* guarantee that it will be processed by that CPU.
*/
-int queue_work(struct workqueue_struct *wq, struct work_struct *work)
+int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
{
int ret = 0, cpu = get_cpu();
__queue_work(wq->cpu_wq + smp_processor_id(), work);
}
-int queue_delayed_work(struct workqueue_struct *wq,
+int fastcall queue_delayed_work(struct workqueue_struct *wq,
struct work_struct *work, unsigned long delay)
{
int ret = 0;
* This function used to run the workqueues itself. Now we just wait for the
* helper threads to do it.
*/
-void flush_workqueue(struct workqueue_struct *wq)
+void fastcall flush_workqueue(struct workqueue_struct *wq)
{
struct cpu_workqueue_struct *cwq;
int cpu;
static struct workqueue_struct *keventd_wq;
-int schedule_work(struct work_struct *work)
+int fastcall schedule_work(struct work_struct *work)
{
return queue_work(keventd_wq, work);
}
-int schedule_delayed_work(struct work_struct *work, unsigned long delay)
+int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay)
{
return queue_delayed_work(keventd_wq, work, delay);
}
/*
* initialise the semaphore
*/
-void init_rwsem(struct rw_semaphore *sem)
+void fastcall init_rwsem(struct rw_semaphore *sem)
{
sem->activity = 0;
spin_lock_init(&sem->wait_lock);
/*
* get a read lock on the semaphore
*/
-void __down_read(struct rw_semaphore *sem)
+void fastcall __down_read(struct rw_semaphore *sem)
{
struct rwsem_waiter waiter;
struct task_struct *tsk;
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
-int __down_read_trylock(struct rw_semaphore *sem)
+int fastcall __down_read_trylock(struct rw_semaphore *sem)
{
int ret = 0;
rwsemtrace(sem,"Entering __down_read_trylock");
* get a write lock on the semaphore
* - note that we increment the waiting count anyway to indicate an exclusive lock
*/
-void __down_write(struct rw_semaphore *sem)
+void fastcall __down_write(struct rw_semaphore *sem)
{
struct rwsem_waiter waiter;
struct task_struct *tsk;
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
-int __down_write_trylock(struct rw_semaphore *sem)
+int fastcall __down_write_trylock(struct rw_semaphore *sem)
{
int ret = 0;
rwsemtrace(sem,"Entering __down_write_trylock");
/*
* release a read lock on the semaphore
*/
-void __up_read(struct rw_semaphore *sem)
+void fastcall __up_read(struct rw_semaphore *sem)
{
rwsemtrace(sem,"Entering __up_read");
/*
* release a write lock on the semaphore
*/
-void __up_write(struct rw_semaphore *sem)
+void fastcall __up_write(struct rw_semaphore *sem)
{
rwsemtrace(sem,"Entering __up_write");
* downgrade a write lock into a read lock
* - just wake up any readers at the front of the queue
*/
-void __downgrade_write(struct rw_semaphore *sem)
+void fastcall __downgrade_write(struct rw_semaphore *sem)
{
rwsemtrace(sem,"Entering __downgrade_write");
/*
* wait for the read lock to be granted
*/
-struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem)
+struct rw_semaphore fastcall *rwsem_down_read_failed(struct rw_semaphore *sem)
{
struct rwsem_waiter waiter;
/*
* wait for the write lock to be granted
*/
-struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem)
+struct rw_semaphore fastcall *rwsem_down_write_failed(struct rw_semaphore *sem)
{
struct rwsem_waiter waiter;
* handle waking up a waiter on the semaphore
* - up_read has decremented the active part of the count if we come here
*/
-struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
+struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
{
rwsemtrace(sem,"Entering rwsem_wake");
* - caller incremented waiting part of count, and discovered it to be still negative
* - just wake up any readers at the front of the queue
*/
-struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
+struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem)
{
rwsemtrace(sem,"Entering rwsem_downgrade_wake");
return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
}
-void wait_on_page_bit(struct page *page, int bit_nr)
+void fastcall wait_on_page_bit(struct page *page, int bit_nr)
{
wait_queue_head_t *waitqueue = page_waitqueue(page);
DEFINE_WAIT(wait);
* the clear_bit and the read of the waitqueue (to avoid SMP races with a
* parallel wait_on_page_locked()).
*/
-void unlock_page(struct page *page)
+void fastcall unlock_page(struct page *page)
{
wait_queue_head_t *waitqueue = page_waitqueue(page);
smp_mb__before_clear_bit();
* chances are that on the second loop, the block layer's plug list is empty,
* so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
*/
-void __lock_page(struct page *page)
+void fastcall __lock_page(struct page *page)
{
wait_queue_head_t *wqh = page_waitqueue(page);
DEFINE_WAIT(wait);
* and schedules an I/O to read in its contents from disk.
*/
static int FASTCALL(page_cache_read(struct file * file, unsigned long offset));
-static int page_cache_read(struct file * file, unsigned long offset)
+static int fastcall page_cache_read(struct file * file, unsigned long offset)
{
struct address_space *mapping = file->f_mapping;
struct page *page;
return vaddr;
}
-void *kmap_high(struct page *page)
+void fastcall *kmap_high(struct page *page)
{
unsigned long vaddr;
EXPORT_SYMBOL(kmap_high);
-void kunmap_high(struct page *page)
+void fastcall kunmap_high(struct page *page)
{
unsigned long vaddr;
unsigned long nr;
} while (--nr);
}
-pte_t * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
+pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
{
if (!pmd_present(*pmd)) {
struct page *new;
return pte_offset_map(pmd, address);
}
-pte_t * pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
+pte_t fastcall * pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
{
if (!pmd_present(*pmd)) {
pte_t *new;
* On a two-level page table, this ends up actually being entirely
* optimized away.
*/
-pmd_t *__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
{
pmd_t *new;
* Free a 0-order page
*/
static void FASTCALL(free_hot_cold_page(struct page *page, int cold));
-static void free_hot_cold_page(struct page *page, int cold)
+static void fastcall free_hot_cold_page(struct page *page, int cold)
{
struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp;
put_cpu();
}
-void free_hot_page(struct page *page)
+void fastcall free_hot_page(struct page *page)
{
free_hot_cold_page(page, 0);
}
-void free_cold_page(struct page *page)
+void fastcall free_cold_page(struct page *page)
{
free_hot_cold_page(page, 1);
}
* sized machine, GFP_HIGHMEM and GFP_KERNEL requests basically leave the DMA
* zone untouched.
*/
-struct page *
+struct page * fastcall
__alloc_pages(unsigned int gfp_mask, unsigned int order,
struct zonelist *zonelist)
{
/*
* Common helper functions.
*/
-unsigned long __get_free_pages(unsigned int gfp_mask, unsigned int order)
+fastcall unsigned long __get_free_pages(unsigned int gfp_mask, unsigned int order)
{
struct page * page;
EXPORT_SYMBOL(__get_free_pages);
-unsigned long get_zeroed_page(unsigned int gfp_mask)
+fastcall unsigned long get_zeroed_page(unsigned int gfp_mask)
{
struct page * page;
free_hot_cold_page(pvec->pages[i], pvec->cold);
}
-void __free_pages(struct page *page, unsigned int order)
+fastcall void __free_pages(struct page *page, unsigned int order)
{
if (!PageReserved(page) && put_page_testzero(page)) {
if (order == 0)
EXPORT_SYMBOL(__free_pages);
-void free_pages(unsigned long addr, unsigned int order)
+fastcall void free_pages(unsigned long addr, unsigned int order)
{
if (addr != 0) {
BUG_ON(!virt_addr_valid(addr));
* If the page has a single-entry pte_chain, collapse that back to a PageDirect
* representation. This way, it's only done under memory pressure.
*/
-int page_referenced(struct page * page)
+int fastcall page_referenced(struct page * page)
{
struct pte_chain *pc;
int referenced = 0;
* Add a new pte reverse mapping to a page.
* The caller needs to hold the mm->page_table_lock.
*/
-struct pte_chain *
+struct pte_chain * fastcall
page_add_rmap(struct page *page, pte_t *ptep, struct pte_chain *pte_chain)
{
pte_addr_t pte_paddr = ptep_to_paddr(ptep);
* the page.
* Caller needs to hold the mm->page_table_lock.
*/
-void page_remove_rmap(struct page *page, pte_t *ptep)
+void fastcall page_remove_rmap(struct page *page, pte_t *ptep)
{
pte_addr_t pte_paddr = ptep_to_paddr(ptep);
struct pte_chain *pc;
* mm->page_table_lock try_to_unmap_one(), trylock
*/
static int FASTCALL(try_to_unmap_one(struct page *, pte_addr_t));
-static int try_to_unmap_one(struct page * page, pte_addr_t paddr)
+static int fastcall try_to_unmap_one(struct page * page, pte_addr_t paddr)
{
pte_t *ptep = rmap_ptep_map(paddr);
unsigned long address = ptep_to_address(ptep);
* SWAP_AGAIN - we missed a trylock, try again later
* SWAP_FAIL - the page is unswappable
*/
-int try_to_unmap(struct page * page)
+int fastcall try_to_unmap(struct page * page)
{
struct pte_chain *pc, *next_pc, *start;
int ret = SWAP_SUCCESS;
*
* Currently only used for dentry validation.
*/
-int kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
+int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
{
unsigned long addr = (unsigned long) ptr;
unsigned long min_addr = PAGE_OFFSET;
/*
* FIXME: speed this up?
*/
-void activate_page(struct page *page)
+void fastcall activate_page(struct page *page)
{
struct zone *zone = page_zone(page);
* inactive,referenced -> active,unreferenced
* active,unreferenced -> active,referenced
*/
-void mark_page_accessed(struct page *page)
+void fastcall mark_page_accessed(struct page *page)
{
if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) {
activate_page(page);
static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs) = { 0, };
static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs) = { 0, };
-void lru_cache_add(struct page *page)
+void fastcall lru_cache_add(struct page *page)
{
struct pagevec *pvec = &get_cpu_var(lru_add_pvecs);
put_cpu_var(lru_add_pvecs);
}
-void lru_cache_add_active(struct page *page)
+void fastcall lru_cache_add_active(struct page *page)
{
struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs);
* This path almost never happens for VM activity - pages are normally
* freed via pagevecs. But it gets used by networking.
*/
-void __page_cache_release(struct page *page)
+void fastcall __page_cache_release(struct page *page)
{
unsigned long flags;
struct zone *zone = page_zone(page);
return len;
}
-void __rfcomm_dlc_throttle(struct rfcomm_dlc *d)
+void fastcall __rfcomm_dlc_throttle(struct rfcomm_dlc *d)
{
BT_DBG("dlc %p state %ld", d, d->state);
rfcomm_schedule(RFCOMM_SCHED_TX);
}
-void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d)
+void fastcall __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d)
{
BT_DBG("dlc %p state %ld", d, d->state);