Locking was added where necessary.
All processors take timer interrupts, but only CPU 0 calls the timer
IRQ. The others just call update_process_times to keep the
accounting straight.
The timer interrupt is blocked along with the other signals.
errno);
}
+/* Called only by the tracing thread during initialization */
+
void setup_tracer_winch(void)
{
int err;
MODULE_LICENSE("GPL");
+/* Locked by the BKL in harddog_open and harddog_release */
static int timer_alive;
-
static int harddog_in_fd = -1;
static int harddog_out_fd = -1;
int err;
char *sock = NULL;
+ lock_kernel();
if(timer_alive)
return -EBUSY;
#ifdef CONFIG_HARDDOG_NOWAYOUT
if(err) return(err);
timer_alive = 1;
+ unlock_kernel();
return 0;
}
#include "init.h"
#include "hostaudio.h"
+/* Only changed from linux_main at boot time */
char *dsp = HOSTAUDIO_DEV_DSP;
char *mixer = HOSTAUDIO_DEV_MIXER;
i = minor(tty->device) - tty->driver.minor_start;
line = &lines[i];
+ down(&line->sem);
if(line->head != line->tail){
local_irq_save(flags);
buffer_data(line, buf, len);
err = flush_buffer(line);
local_irq_restore(flags);
- if(err <= 0) return(len);
+ if(err <= 0)
+ goto out;
}
else {
n = write_chan(&line->chan_list, buf, len,
line->driver->write_irq);
- if(n < 0) return(n);
- if(n < len) buffer_data(line, buf + n, len - n);
+ if(n < 0){
+ len = n;
+ goto out;
+ }
+ if(n < len)
+ buffer_data(line, buf + n, len - n);
}
+ out:
+ up(&line->sem);
return(len);
}
else n = minor(tty->device) - tty->driver.minor_start;
line = &lines[n];
+ down(&line->sem);
line->count--;
/* I don't like this, but I can't think of anything better. What's
line->tty = NULL;
if(line->count == 0)
line_disable(line, -1);
+ up(&line->sem);
}
void close_lines(struct line *lines, int nlines)
reactivate_fd(winch->fd, WINCH_IRQ);
}
+DECLARE_MUTEX(winch_handler_sem);
LIST_HEAD(winch_handlers);
void register_winch_irq(int fd, int tty_fd, int pid, void *line)
{
struct winch *winch;
+ down(&winch_handler_sem);
winch = kmalloc(sizeof(*winch), GFP_KERNEL);
if(winch == NULL){
printk("register_winch_irq - kmalloc failed\n");
- return;
+ goto out;
}
*winch = ((struct winch) { list : LIST_HEAD_INIT(winch->list),
fd : fd,
SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM,
"winch", winch) < 0)
printk("register_winch_irq - failed to register IRQ\n");
+ out:
+ up(&winch_handler_sem);
}
static void winch_cleanup(void)
priority: 0,
};
+/* Safe without explicit locking for now. Tasklets provide their own
+ * locking, and the interrupt handler is safe because it can't interrupt
+ * itself and it can only happen on CPU 0.
+ */
+
LIST_HEAD(mc_requests);
void mc_work_proc(void *unused)
int done;
do {
- save_flags(flags);
+ local_save_flags(flags);
req = list_entry(mc_requests.next, struct mconsole_entry,
list);
list_del(&req->list);
done = list_empty(&mc_requests);
- restore_flags(flags);
+ local_irq_restore(flags);
req->request.cmd->handler(&req->request);
kfree(req);
} while(!done);
mconsole_reply(req, "", 0, 0);
}
+/* This list is populated by __initcall routines. */
+
LIST_HEAD(mconsole_devices);
void mconsole_register_dev(struct mc_device *new)
}
#endif
-static char *notify_socket = NULL;
+/* Changed by mconsole_setup, which is __setup, and called before SMP is
+ * active.
+ */
+static char *notify_socket = NULL;
int mconsole_init(void)
{
return(0);
}
+static spinlock_t notify_spinlock = SPIN_LOCK_UNLOCKED;
+
+void lock_notify(void)
+{
+ spin_lock(¬ify_spinlock);
+}
+
+void unlock_notify(void)
+{
+ spin_unlock(¬ify_spinlock);
+}
+
__initcall(create_proc_mconsole);
#define NOTIFY "=notify:"
{ "go", mconsole_go, 1 },
};
+/* Initialized in mconsole_init, which is an initcall */
char mconsole_socket_name[256];
int mconsole_reply_v0(struct mc_request *req, char *reply)
{
struct sockaddr_un target;
struct mconsole_notify packet;
- int n, err;
+ int n, err = 0;
+ lock_notify();
if(notify_sock < 0){
notify_sock = socket(PF_UNIX, SOCK_DGRAM, 0);
if(notify_sock < 0){
printk("mconsole_notify - socket failed, errno = %d\n",
errno);
- return(-errno);
+ err = -errno;
}
}
+ unlock_notify();
+
+ if(err)
+ return(err);
target.sun_family = AF_UNIX;
strcpy(target.sun_path, sock_name);
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/init.h>
+#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include <asm/irq.h>
-#include <asm/smplock.h>
#include <asm/pgtable.h>
#include "mem_user.h"
#include "user_util.h"
+/* These are set in mmapper_init, which is called at boot time */
static unsigned long mmapper_size;
static unsigned long p_buf = 0;
static char *v_buf = NULL;
#include "init.h"
#include "irq_user.h"
+static spinlock_t opened_lock = SPIN_LOCK_UNLOCKED;
LIST_HEAD(opened);
static int uml_net_rx(struct net_device *dev)
lp->tl.data = (unsigned long) &lp->user;
netif_start_queue(dev);
+ spin_lock(&opened_lock);
list_add(&lp->list, &opened);
+ spin_unlock(&opened_lock);
MOD_INC_USE_COUNT;
out:
spin_unlock(&lp->lock);
free_irq(dev->irq, dev);
if(lp->close != NULL) (*lp->close)(lp->fd, &lp->user);
lp->fd = -1;
+ spin_lock(&opened_lock);
list_del(&lp->list);
-
+ spin_unlock(&opened_lock);
+
MOD_DEC_USE_COUNT;
spin_unlock(&lp->lock);
return 0;
#endif
}
+static spinlock_t devices_lock = SPIN_LOCK_UNLOCKED;
static struct list_head devices = LIST_HEAD_INIT(devices);
static int eth_configure(int n, void *init, char *mac,
return(1);
}
+ spin_lock(&devices_lock);
list_add(&device->list, &devices);
+ spin_unlock(&devices_lock);
+
device->index = n;
size = transport->private_size + sizeof(struct uml_net_private) +
struct uml_net *device;
struct list_head *ele;
+ spin_lock(&devices_lock);
list_for_each(ele, &devices){
device = list_entry(ele, struct uml_net, list);
if(device->index == n)
- return(device);
+ goto out;
}
- return(NULL);
+ device = NULL;
+ out:
+ spin_unlock(&devices_lock);
+ return(device);
}
static int eth_parse(char *str, int *index_out, char **str_out)
int index;
};
+/* Filled in at boot time. Will need locking if the transports become
+ * modular.
+ */
struct list_head transports = LIST_HEAD_INIT(transports);
+/* Filled in during early boot */
struct list_head eth_cmd_line = LIST_HEAD_INIT(eth_cmd_line);
static int check_transport(struct transport *transport, char *eth, int n,
up(&conn->port->sem);
}
-struct list_head ports = LIST_HEAD_INIT(ports);
-
static void port_interrupt(int irq, void *data, struct pt_regs *regs)
{
struct port_list *port = data;
reactivate_fd(port->fd, ACCEPT_IRQ);
}
+DECLARE_MUTEX(ports_sem);
+struct list_head ports = LIST_HEAD_INIT(ports);
+
void *port_data(int port_num)
{
struct list_head *ele;
struct port_dev *dev;
int fd;
+ down(&ports_sem);
list_for_each(ele, &ports){
port = list_entry(ele, struct port_list, list);
if(port->port == port_num) goto found;
port = kmalloc(sizeof(struct port_list), GFP_KERNEL);
if(port == NULL){
printk(KERN_ERR "Allocation of port list failed\n");
- return(NULL);
+ goto out;
}
fd = port_listen_fd(port_num);
dev = kmalloc(sizeof(struct port_dev), GFP_KERNEL);
if(dev == NULL){
printk(KERN_ERR "Allocation of port device entry failed\n");
- return(NULL);
+ goto out;
}
*dev = ((struct port_dev) { port : port,
fd : -1,
helper_pid : -1 });
+ up(&ports_sem);
return(dev);
out_free:
kfree(port);
out_close:
os_close_file(fd);
+ out:
+ up(&ports_sem);
return(NULL);
}
symlink_to : "tts",
};
-static struct line serial_lines[NR_PORTS] =
+/* The array is initialized by line_init, which is an initcall. The
+ * individual elements are protected by individual semaphores.
+ */
+static struct line serial_lines[NR_PORTS] =
{ [0 ... NR_PORTS - 1] = LINE_INIT(CONFIG_SSL_CHAN, &driver) };
static struct lines lines = LINES_INIT(NR_PORTS);
#define MAX_TTYS (8)
+/* Referenced only by tty_driver below - presumably it's locked correctly
+ * by the tty driver.
+ */
+
static struct tty_driver console_driver;
+static int console_refcount = 0;
+
static struct chan_ops init_console_ops = {
init : NULL,
open : NULL,
static struct lines console_lines = LINES_INIT(MAX_TTYS);
+/* The array is initialized by line_init, which is an initcall. The
+ * individual elements are protected by individual semaphores.
+ */
struct line vts[MAX_TTYS] = { LINE_INIT(CONFIG_CON_ZERO_CHAN, &driver),
[ 1 ... MAX_TTYS - 1 ] =
LINE_INIT(CONFIG_CON_CHAN, &driver) };
#include "linux/vmalloc.h"
#include "linux/blkpg.h"
#include "linux/genhd.h"
+#include "linux/spinlock.h"
#include "asm/segment.h"
#include "asm/uaccess.h"
#include "asm/irq.h"
#include "2_5compat.h"
#include "os.h"
-static spinlock_t ubd_lock;
+static spinlock_t ubd_io_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t ubd_lock = SPIN_LOCK_UNLOCKED;
+
static void (*do_ubd)(void);
static int ubd_open(struct inode * inode, struct file * filp);
.revalidate = ubd_revalidate,
};
+/* Protected by the queue_lock */
static request_queue_t *ubd_queue;
+/* Protected by ubd_lock */
static int fake_major = 0;
+
static struct gendisk *ubd_gendisk[MAX_DEV];
static struct gendisk *fake_gendisk[MAX_DEV];
#define OPEN_FLAGS ((struct openflags) { .r = 1, .w = 1, .s = 0, .c = 0 })
#endif
+/* Not protected - changed only in ubd_setup_common and then only to
+ * to enable O_SYNC.
+ */
static struct openflags global_openflags = OPEN_FLAGS;
struct cow {
char name[64];
if(!fake_ide) return;
+
if(proc_ide_root == NULL) make_proc_ide();
+
dir = proc_mkdir(dev_name, proc_ide);
ent = create_proc_entry("media", S_IFREG|S_IRUGO, dir);
if(!ent) return;
{
struct openflags flags = global_openflags;
char *backing_file;
- int n;
+ int n, err;
if(index_out) *index_out = -1;
n = *str++;
return(1);
}
- fake_major = major;
+ err = 1;
+ spin_lock(&ubd_lock);
+ if(!fake_major_allowed){
+ printk(KERN_ERR "Can't assign a fake major twice\n");
+ goto out1;
+ }
+
+ fake_major = major;
fake_major_allowed = 0;
printk(KERN_INFO "Setting extra ubd major number to %d\n",
major);
- return(0);
+ err = 0;
+ out1:
+ spin_unlock(&ubd_lock);
+ return(err);
}
if(n < '0'){
return(1);
}
+ err = 1;
+ spin_lock(&ubd_lock);
+
if(ubd_dev[n].file != NULL){
printk(KERN_ERR "ubd_setup : device already configured\n");
- return(1);
+ goto out2;
}
if(index_out) *index_out = n;
}
if(*str++ != '='){
printk(KERN_ERR "ubd_setup : Expected '='\n");
- return(1);
+ goto out2;
}
+
+ err = 0;
backing_file = strchr(str, ',');
if(backing_file){
*backing_file = '\0';
ubd_dev[n].is_dir = 1;
ubd_dev[n].cow.file = backing_file;
ubd_dev[n].boot_openflags = flags;
- return(0);
+ out2:
+ spin_unlock(&ubd_lock);
+ return(err);
}
static int ubd_setup(char *str)
static void do_ubd_request(request_queue_t * q);
+/* Only changed by ubd_init, which is an initcall. */
int thread_fd = -1;
+/* Changed by ubd_handler, which is serialized because interrupts only
+ * happen on CPU 0.
+ */
int intr_count = 0;
static void ubd_finish(int error)
int nsect;
if(error){
+ spin_lock(&ubd_io_lock);
end_request(CURRENT, 0);
+ spin_unlock(&ubd_io_lock);
return;
}
nsect = CURRENT->current_nr_sectors;
CURRENT->errors = 0;
CURRENT->nr_sectors -= nsect;
CURRENT->current_nr_sectors = 0;
+ spin_lock(&ubd_io_lock);
end_request(CURRENT, 1);
+ spin_unlock(&ubd_io_lock);
}
static void ubd_handler(void)
if(n != sizeof(req)){
printk(KERN_ERR "Pid %d - spurious interrupt in ubd_handler, "
"errno = %d\n", os_getpid(), -n);
- spin_lock(&ubd_lock);
+ spin_lock(&ubd_io_lock);
end_request(CURRENT, 0);
- spin_unlock(&ubd_lock);
+ spin_unlock(&ubd_io_lock);
return;
}
(req.length != (CURRENT->current_nr_sectors) << 9))
panic("I/O op mismatch");
- spin_lock(&ubd_lock);
ubd_finish(req.error);
reactivate_fd(thread_fd, UBD_IRQ);
do_ubd_request(ubd_queue);
- spin_unlock(&ubd_lock);
}
static void ubd_intr(int irq, void *dev, struct pt_regs *unused)
ubd_handler();
}
+/* Only changed by ubd_init, which is an initcall. */
static int io_pid = -1;
void kill_io_thread(void)
return(os_file_size(file, size_out));
}
+/* Initialized in an initcall, and unchanged thereafter */
devfs_handle_t ubd_dir_handle;
devfs_handle_t ubd_fake_dir_handle;
u64 size;
if (!dev->file)
- return -1;
+ goto out;
disk = alloc_disk();
if (!disk)
MAJOR_NR, n << UBD_SHIFT,
S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP |S_IWGRP,
&ubd_blops, NULL);
- add_disk(disk);
+ if(real == NULL)
+ goto out;
+ ubd_dev[n].real = real;
+
if (fake_major) {
fake = devfs_register(ubd_fake_dir_handle, name,
DEVFS_FL_REMOVABLE, fake_major,
n << UBD_SHIFT,
S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP |
S_IWGRP, &ubd_blops, NULL);
- add_disk(fake_disk);
- if(fake == NULL) return(-1);
+ if(fake == NULL)
+ goto out_unregister;
+
ubd_dev[n].fake = fake;
+ add_disk(fake_disk);
}
- if(real == NULL) return(-1);
- ubd_dev[n].real = real;
-
+ add_disk(disk);
make_ide_entries(disk->disk_name);
return(0);
+
+ out_unregister:
+ devfs_unregister(real);
+ ubd_dev[n].real = NULL;
+ out:
+ return(-1);
}
static int ubd_config(char *str)
}
if(n == -1) return(0);
+ spin_lock(&ubd_lock);
err = ubd_add(n);
- if(err){
+ if(err)
ubd_dev[n].file = NULL;
- return(err);
- }
+ spin_unlock(&ubd_lock);
- return(0);
+ return(err);
}
static int ubd_remove(char *str)
{
struct ubd *dev;
- int n;
+ int n, err;
- if(!isdigit(*str)) return(-1);
+ if(!isdigit(*str))
+ return(-1);
n = *str - '0';
- if(n > MAX_DEV) return(-1);
+ if(n > MAX_DEV)
+ return(-1);
dev = &ubd_dev[n];
+
+ err = 0;
+ spin_lock(&ubd_lock);
del_gendisk(ubd_gendisk[n]);
put_disk(ubd_gendisk[n]);
ubd_gendisk[n] = NULL;
put_disk(fake_gendisk[n]);
fake_gendisk[n] = NULL;
}
- if(dev->file == NULL) return(0);
- if(dev->count > 0) return(-1);
- if(dev->real != NULL) devfs_unregister(dev->real);
- if(dev->fake != NULL) devfs_unregister(dev->fake);
+ if(dev->file == NULL)
+ goto out;
+ err = -1;
+ if(dev->count > 0)
+ goto out;
+ if(dev->real != NULL)
+ devfs_unregister(dev->real);
+ if(dev->fake != NULL)
+ devfs_unregister(dev->fake);
*dev = ((struct ubd) DEFAULT_UBD);
- return(0);
+ err = 0;
+ out:
+ spin_unlock(&ubd_lock);
+ return(err);
}
static struct mc_device ubd_mc = {
return -1;
}
ubd_queue = BLK_DEFAULT_QUEUE(MAJOR_NR);
- INIT_QUEUE(ubd_queue, do_ubd_request, &ubd_lock);
+ blk_init_queue(ubd_queue, do_ubd_request, &ubd_io_lock);
elevator_init(ubd_queue, &elevator_noop);
if(fake_major != 0){
char name[sizeof("ubd_nnn\0")];
}
return(0);
error:
- close_fd(dev->fd);
+ os_close_file(dev->fd);
return(err);
}
static int ubd_open(struct inode *inode, struct file *filp)
{
struct ubd *dev;
- int n, offset, err;
+ int n, offset, err = 0;
n = DEVICE_NR(inode->i_rdev);
dev = &ubd_dev[n];
if(n > MAX_DEV)
return -ENODEV;
+
offset = n << UBD_SHIFT;
if(dev->is_dir == 1)
- return(0);
+ goto out;
if(dev->count == 0){
dev->openflags = dev->boot_openflags;
if(err){
printk(KERN_ERR "ubd%d: Can't open \"%s\": "
"errno = %d\n", n, dev->file, -err);
- return(err);
+ goto out;
}
- if(err) return(err);
}
dev->count++;
if((filp->f_mode & FMODE_WRITE) && !dev->openflags.w){
if(--dev->count == 0) ubd_close(dev);
- return -EROFS;
+ err = -EROFS;
}
- return(0);
+ out:
+ return(err);
}
static int ubd_release(struct inode * inode, struct file * file)
if(dev->is_dir){
strcpy(req->buffer, "HOSTFS:");
strcat(req->buffer, dev->file);
+ spin_lock(&ubd_io_lock);
end_request(req, 1);
+ spin_unlock(&ubd_io_lock);
return(1);
}
if((rq_data_dir(req) == WRITE) && !dev->openflags.w){
printk("Write attempted on readonly ubd device %d\n", n);
+ spin_lock(&ubd_io_lock);
end_request(req, 0);
+ spin_unlock(&ubd_io_lock);
return(1);
}
n = minor(rdev) >> UBD_SHIFT;
dev = &ubd_dev[n];
+
+ err = 0;
+ spin_lock(&ubd_lock);
if(dev->is_dir)
- return(0);
+ goto out;
err = ubd_file_size(dev, &size);
if (!err) {
set_capacity(fake_gendisk[n], size / 512);
dev->size = size;
}
-
+ spin_unlock(&ubd_lock);
+ out:
return err;
}
return;
}
+/* Changed in start_io_thread, which is serialized by being called only
+ * from ubd_init, which is an initcall.
+ */
int kernel_fd = -1;
+/* Only changed by the io thread */
int io_count = 0;
int io_thread(void *arg)
return(data);
}
+/* Only changed by xterm_setup, which is a setup */
static char *terminal_emulator = "xterm";
static char *title_switch = "-T";
static char *exec_switch = "-e";
next : NULL \
}
-#define INIT_QUEUE(queue, request, lock) blk_init_queue(queue, request, lock)
-
#define INIT_HARDSECT(arr, maj, sizes)
#define SET_PRI(task) do ; while(0)
extern void init_irq_signals(int on_sigstack);
extern void forward_ipi(int fd, int pid);
extern void free_irq_later(int irq, void *dev_id);
-
+extern int activate_ipi(int fd, int pid);
+extern unsigned long irq_lock(void);
+extern void irq_unlock(unsigned long flags);
#endif
/*
extern void block_signals(void);
extern void unblock_signals(void);
extern void deliver_signals(void *t);
-extern void lock_syscall(void);
-extern void unlock_syscall(void);
-extern void lock_trap(void);
-extern void unlock_trap(void);
-extern void lock_pid(void);
-extern void unlock_pid(void);
+extern int next_syscall_index(int max);
+extern int next_trap_index(int max);
extern void default_idle(void);
extern void finish_fork(void);
extern void paging_init(void);
extern int is_valid_pid(int pid);
extern void free_irq(unsigned int, void *);
extern int um_in_interrupt(void);
-
+extern int cpu(void);
#endif
/*
extern int mconsole_notify(char *sock_name, int type, const void *data,
int len);
extern char *mconsole_notify_socket(void);
+extern void lock_notify(void);
+extern void unlock_notify(void);
#endif
extern int read_sigio_fd(int fd);
extern int add_sigio_fd(int fd, int read);
extern int ignore_sigio_fd(int fd);
+extern void sigio_lock(void);
+extern void sigio_unlock(void);
#endif
#define __TIME_USER_H__
extern void timer(void);
-extern void get_profile_timer(void);
-extern void disable_profile_timer(void);
extern void switch_timers(int to_real);
extern void user_time_init(void);
-extern void set_timers(int set_signal);
extern void idle_sleep(int secs);
+extern void enable_timer(void);
+extern void time_lock(void);
+extern void time_unlock(void);
#endif
#include "tlb.h"
#include "2_5compat.h"
#include "os.h"
+#include "time_user.h"
/* See comment above fork_tramp for why sigstop is defined and used like
* this
{
int sig = sigstop;
- block_signals();
init_new_thread(sig_stack, NULL);
kill(os_getpid(), sig);
return(0);
unprotect_stack((unsigned long) current->thread_info);
os_usr1_process(os_getpid());
+ enable_timer();
free_page(stack);
protect(uml_reserved, high_physmem - uml_reserved, 1, 1, 0, 1);
task_protections((unsigned long) current->thread_info);
#include "linux/proc_fs.h"
#include "asm/uaccess.h"
+/* If read and write race, the read will still atomically read a valid
+ * value.
+ */
int uml_exitcode = 0;
static int read_proc_exitcode(char *page, char **start, off_t off,
os_stop_process(os_getpid());
}
+/* Changed only during early boot */
struct sc_frame signal_frame_sc;
struct sc_frame_raw {
struct arch_frame_data_raw arch;
};
+/* Changed only during early boot */
static struct sc_frame_raw *raw_sc = NULL;
static void sc_handler(int sig, struct sigcontext sc)
return(-1);
}
+/* Changed only during early boot */
struct si_frame signal_frame_si;
struct si_frame_raw {
unsigned long sp;
};
+/* Changed only during early boot */
static struct si_frame_raw *raw_si = NULL;
static void si_handler(int sig, siginfo_t *si)
int fd;
};
+/* Debugging aid, changed only from gdb */
int helper_pause = 0;
static void helper_hup(int sig)
#include "init.h"
#include "os.h"
+/* Changed by uml_initrd_setup, which is a setup */
static char *initrd __initdata = NULL;
static int __init read_initrd(void)
end_none
};
+/* Not changed */
volatile unsigned long irq_err_count;
/*
int get_irq_list(char *buf)
{
int i, j;
+ unsigned long flags;
struct irqaction * action;
char *p = buf;
*p++ = '\n';
for (i = 0 ; i < NR_IRQS ; i++) {
+ spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
- continue;
+ goto end;
p += sprintf(p, "%3d: ",i);
#ifndef CONFIG_SMP
p += sprintf(p, "%10u ", kstat_irqs(i));
for (action=action->next; action; action = action->next)
p += sprintf(p, ", %s", action->name);
*p++ = '\n';
+ end:
+ spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
p += sprintf(p, "\n");
#ifdef notdef
}
}
+/* These are initialized by sysctl_init, which is called from init/main.c */
static struct proc_dir_entry * root_irq_dir;
static struct proc_dir_entry * irq_dir [NR_IRQS];
static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
-unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
+/* These are read and written as longs, so a read won't see a partial write
+ * even during a race.
+ */
+static unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
#define HEX_DIGITS 8
smp_affinity_entry[irq] = entry;
}
+/* Read and written as a long */
unsigned long prof_cpu_mask = -1;
void __init init_irq_proc (void)
register_irq_proc(i);
}
+static spinlock_t irq_spinlock = SPIN_LOCK_UNLOCKED;
+
+unsigned long irq_lock(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&irq_spinlock, flags);
+ return(flags);
+}
+
+void irq_unlock(unsigned long flags)
+{
+ spin_unlock_irqrestore(&irq_spinlock, flags);
+}
+
unsigned long probe_irq_on(void)
{
return(0);
int activate_fd(int irq, int fd, int type, void *dev_id)
{
- struct irq_fd *new_fd;
- int pid, retval, events, err;
+ struct pollfd *tmp_pfd;
+ struct irq_fd *new_fd, *irq_fd;
+ unsigned long flags;
+ int pid, events, err, n, size;
+
+ pid = os_getpid();
+ err = os_set_fd_async(fd, pid);
+ if(err < 0)
+ goto out;
- for(new_fd = active_fds;new_fd;new_fd = new_fd->next){
- if((new_fd->fd == fd) && (new_fd->type == type)){
- printk("Registering fd %d twice\n", fd);
- printk("Irqs : %d, %d\n", new_fd->irq, irq);
- printk("Ids : 0x%x, 0x%x\n", new_fd->id, dev_id);
- return(-EIO);
- }
- }
- pid = cpu_tasks[0].pid;
- if((retval = os_set_fd_async(fd, pid)) != 0)
- return(retval);
new_fd = um_kmalloc(sizeof(*new_fd));
err = -ENOMEM;
- if(new_fd == NULL) return(err);
- pollfds_num++;
- if(pollfds_num > pollfds_size){
- struct pollfd *tmp_pfd;
-
- tmp_pfd = um_kmalloc(pollfds_num * sizeof(pollfds[0]));
- if(tmp_pfd == NULL){
- pollfds_num--;
- goto out_irq;
- }
- if(pollfds != NULL){
- memcpy(tmp_pfd, pollfds,
- sizeof(pollfds[0]) * pollfds_size);
- kfree(pollfds);
- }
- pollfds = tmp_pfd;
- pollfds_size = pollfds_num;
- }
+ if(new_fd == NULL)
+ goto out;
if(type == IRQ_READ) events = POLLIN | POLLPRI;
else events = POLLOUT;
current_events: 0,
freed : 0 } );
- *last_irq_ptr = new_fd;
- last_irq_ptr = &new_fd->next;
+ /* Critical section - locked by a spinlock because this stuff can
+ * be changed from interrupt handlers. The stuff above is done
+ * outside the lock because it allocates memory.
+ */
+
+ /* Actually, it only looks like it can be called from interrupt
+ * context. The culprit is reactivate_fd, which calls
+ * maybe_sigio_broken, which calls write_sigio_workaround,
+ * which calls activate_fd. However, write_sigio_workaround should
+ * only be called once, at boot time. That would make it clear that
+ * this is called only from process context, and can be locked with
+ * a semaphore.
+ */
+ flags = irq_lock();
+ for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){
+ if((irq_fd->fd == fd) && (irq_fd->type == type)){
+ printk("Registering fd %d twice\n", fd);
+ printk("Irqs : %d, %d\n", irq_fd->irq, irq);
+ printk("Ids : 0x%x, 0x%x\n", irq_fd->id, dev_id);
+ goto out_free;
+ }
+ }
+
+ n = pollfds_num;
+ if(n == pollfds_size){
+ while(1){
+ /* Here we have to drop the lock in order to call
+ * kmalloc, which might sleep. If something else
+ * came in and changed the pollfds array, we free
+ * the buffer and try again.
+ */
+ irq_unlock(flags);
+ size = (pollfds_num + 1) * sizeof(pollfds[0]);
+ tmp_pfd = um_kmalloc(size);
+ flags = irq_lock();
+ if(tmp_pfd == NULL)
+ goto out_unlock;
+ if(n == pollfds_size)
+ break;
+ kfree(tmp_pfd);
+ }
+ if(pollfds != NULL){
+ memcpy(tmp_pfd, pollfds,
+ sizeof(pollfds[0]) * pollfds_size);
+ kfree(pollfds);
+ }
+ pollfds = tmp_pfd;
+ pollfds_size++;
+ }
if(type == IRQ_WRITE) events = 0;
- pollfds[pollfds_num - 1] = ((struct pollfd) { fd : fd,
- events : events,
- revents : 0 });
+ pollfds[pollfds_num] = ((struct pollfd) { fd : fd,
+ events : events,
+ revents : 0 });
+ pollfds_num++;
+
+ *last_irq_ptr = new_fd;
+ last_irq_ptr = &new_fd->next;
+ irq_unlock(flags);
+
+ /* This calls activate_fd, so it has to be outside the critical
+ * section.
+ */
maybe_sigio_broken(fd, type);
return(0);
- out_irq:
+ out_unlock:
+ irq_unlock(flags);
+ out_free:
kfree(new_fd);
+ out:
return(err);
}
static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
{
struct irq_fd **prev;
+ unsigned long flags;
int i = 0;
+ flags = irq_lock();
prev = &active_fds;
while(*prev != NULL){
if((*test)(*prev, arg)){
printk("free_irq_by_cb - mismatch between "
"active_fds and pollfds, fd %d vs %d\n",
(*prev)->fd, pollfds[i].fd);
- return;
+ goto out;
}
memcpy(&pollfds[i], &pollfds[i + 1],
(pollfds_num - i - 1) * sizeof(pollfds[0]));
prev = &(*prev)->next;
i++;
}
+ out:
+ irq_unlock(flags);
}
struct irq_and_dev {
{
struct irq_fd *irq;
int i = 0;
-
+
for(irq=active_fds; irq != NULL; irq = irq->next){
if((irq->fd == fd) && (irq->irq == irqnum)) break;
i++;
}
if(irq == NULL){
printk("find_irq_by_fd doesn't have descriptor %d\n", fd);
- return(NULL);
+ goto out;
}
if((pollfds[i].fd != -1) && (pollfds[i].fd != fd)){
printk("find_irq_by_fd - mismatch between active_fds and "
"pollfds, fd %d vs %d, need %d\n", irq->fd,
pollfds[i].fd, fd);
- return(NULL);
+ irq = NULL;
+ goto out;
}
*index_out = i;
+ out:
return(irq);
}
void free_irq_later(int irq, void *dev_id)
{
struct irq_fd *irq_fd;
+ unsigned long flags;
+ flags = irq_lock();
for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){
if((irq_fd->irq == irq) && (irq_fd->id == dev_id))
break;
if(irq_fd == NULL){
printk("free_irq_later found no irq, irq = %d, "
"dev_id = 0x%p\n", irq, dev_id);
- return;
+ goto out;
}
irq_fd->freed = 1;
+ out:
+ irq_unlock(flags);
}
void reactivate_fd(int fd, int irqnum)
{
struct irq_fd *irq;
+ unsigned long flags;
int i;
+ flags = irq_lock();
irq = find_irq_by_fd(fd, irqnum, &i);
- if(irq == NULL) return;
+ if(irq == NULL)
+ goto out;
pollfds[i].fd = irq->fd;
+
+ irq_unlock(flags);
+
+ /* This calls activate_fd, so it has to be outside the critical
+ * section.
+ */
maybe_sigio_broken(fd, irq->type);
+ out:
+ irq_unlock(flags);
}
void deactivate_fd(int fd, int irqnum)
{
struct irq_fd *irq;
+ unsigned long flags;
int i;
+ flags = irq_lock();
irq = find_irq_by_fd(fd, irqnum, &i);
- if(irq == NULL) return;
+ if(irq == NULL)
+ goto out;
pollfds[i].fd = -1;
+ out:
+ irq_unlock(flags);
}
void forward_ipi(int fd, int pid)
void forward_interrupts(int pid)
{
struct irq_fd *irq;
+ unsigned long flags;
+ flags = irq_lock();
for(irq=active_fds;irq != NULL;irq = irq->next){
if(fcntl(irq->fd, F_SETOWN, pid) < 0){
int save_errno = errno;
}
irq->pid = pid;
}
+ irq_unlock(flags);
}
void init_irq_signals(int on_sigstack)
if(timer_irq_inited) h = (__sighandler_t) alarm_handler;
else h = boot_timer_handler;
- set_handler(SIGVTALRM, h, flags | SA_NODEFER | SA_RESTART,
- SIGUSR1, SIGIO, SIGWINCH, -1);
+ set_handler(SIGVTALRM, h, flags | SA_RESTART,
+ SIGUSR1, SIGIO, SIGWINCH, SIGALRM, -1);
set_handler(SIGIO, (__sighandler_t) sig_handler, flags | SA_RESTART,
- SIGUSR1, SIGIO, SIGWINCH, -1);
+ SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
signal(SIGWINCH, SIG_IGN);
}
#include "kern.h"
#include "init.h"
+/* Changed during early boot */
+pgd_t swapper_pg_dir[1024];
unsigned long high_physmem;
-unsigned long low_physmem;
-
unsigned long vm_start;
unsigned long vm_end;
-
unsigned long highmem;
-
-pgd_t swapper_pg_dir[1024];
-
unsigned long *empty_zero_page = NULL;
-
unsigned long *empty_bad_page = NULL;
+/* Not modified */
const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n";
extern char __init_begin, __init_end;
extern long physmem_size;
+/* Not changed by UML */
mmu_gather_t mmu_gathers[NR_CPUS];
+/* Changed during early boot */
int kmalloc_ok = 0;
#define NREGIONS (phys_region_index(0xffffffff) - phys_region_index(0x0) + 1)
struct mem_region *regions[NREGIONS] = { [ 0 ... NREGIONS - 1 ] = NULL };
#define REGION_SIZE ((0xffffffff & ~REGION_MASK) + 1)
+/* Changed during early boot */
static unsigned long brk_end;
static void map_cb(void *unused)
}
#if CONFIG_HIGHMEM
+/* Changed during early boot */
pte_t *kmap_pte;
pgprot_t kmap_prot;
return(0);
}
+DECLARE_MUTEX(regions_sem);
+
static int setup_one_range(int fd, char *driver, unsigned long start,
unsigned long pfn, int len,
struct mem_region *region)
{
int i;
+ down(®ions_sem);
for(i = 0; i < NREGIONS; i++){
if(regions[i] == NULL) break;
}
if(i == NREGIONS){
printk("setup_range : no free regions\n");
- return(-1);
+ i = -1;
+ goto out;
}
if(fd == -1)
len : len,
fd : fd } );
regions[i] = region;
+ out:
+ up(®ions_sem);
return(i);
}
printk("%d pages swap cached\n", cached);
}
-unsigned long kmem_top = 0;
+/* Changed during early boot */
+static unsigned long kmem_top = 0;
unsigned long get_kmem_end(void)
{
goto again;
}
+DECLARE_MUTEX(vm_reserved_sem);
static struct list_head vm_reserved = LIST_HEAD_INIT(vm_reserved);
+/* Static structures, linked in to the list in early boot */
static struct vm_reserved head = {
list : LIST_HEAD_INIT(head.list),
start : 0,
{
struct vm_reserved *entry = e, *reserved, *prev;
struct list_head *ele;
+ int err;
+ down(&vm_reserved_sem);
list_for_each(ele, &vm_reserved){
reserved = list_entry(ele, struct vm_reserved, list);
if(reserved->start >= end) goto found;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if(entry == NULL){
printk("reserve_vm : Failed to allocate entry\n");
- return(-ENOMEM);
+ err = -ENOMEM;
+ goto out;
}
*entry = ((struct vm_reserved)
{ list : LIST_HEAD_INIT(entry->list),
start : start,
end : end });
list_add(&entry->list, &prev->list);
+ err = 0;
+ out:
+ up(&vm_reserved_sem);
return(0);
}
unsigned long start;
int err;
+ down(&vm_reserved_sem);
list_for_each(ele, &vm_reserved){
this = list_entry(ele, struct vm_reserved, list);
next = list_entry(ele->next, struct vm_reserved, list);
(this->end + len + PAGE_SIZE <= next->start))
goto found;
}
+ up(&vm_reserved_sem);
return(0);
found:
+ up(&vm_reserved_sem);
start = (unsigned long) ROUND_UP(this->end) + PAGE_SIZE;
err = reserve_vm(start, start + len, NULL);
if(err) return(0);
unsigned long size;
};
-struct iomem iomem_regions[NREGIONS] = { [ 0 ... NREGIONS - 1 ] =
+/* iomem regions can only be added on the command line at the moment.
+ * Locking will be needed when they can be added via mconsole.
+ */
+
+struct iomem iomem_regions[NREGIONS] = { [ 0 ... NREGIONS - 1 ] =
{ name : NULL,
fd : -1,
size : 0 } };
#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
+/* Changed during early boot */
static struct mem_region physmem_region;
static struct vm_reserved physmem_reserved;
#include "mem_user.h"
#include "init.h"
#include "os.h"
+#include "tempfile.h"
-struct mem_region physmem_region;
-
-struct mem_region *mem_list = &physmem_region;
+extern struct mem_region physmem_region;
#define TEMPNAME_TEMPLATE "vm_file-XXXXXX"
flags = SA_ONSTACK;
}
set_handler(SIGSEGV, (__sighandler_t) sig_handler, flags,
- SIGUSR1, SIGIO, SIGWINCH, -1);
+ SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
set_handler(SIGTRAP, (__sighandler_t) sig_handler, flags,
- SIGUSR1, SIGIO, SIGWINCH, -1);
+ SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
set_handler(SIGFPE, (__sighandler_t) sig_handler, flags,
- SIGUSR1, SIGIO, SIGWINCH, -1);
+ SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
set_handler(SIGILL, (__sighandler_t) sig_handler, flags,
- SIGUSR1, SIGIO, SIGWINCH, -1);
+ SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
set_handler(SIGBUS, (__sighandler_t) sig_handler, flags,
- SIGUSR1, SIGIO, SIGWINCH, -1);
+ SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
set_handler(SIGWINCH, (__sighandler_t) sig_handler, flags,
- SIGUSR1, SIGIO, SIGWINCH, -1);
+ SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
set_handler(SIGUSR2, (__sighandler_t) sig_handler,
SA_NOMASK | flags, -1);
if(usr1_handler) set_handler(SIGUSR1, usr1_handler, flags, -1);
signal(SIGCHLD, SIG_IGN);
signal(SIGHUP, SIG_IGN);
- set_timers(1); /* XXX A bit of a race here */
+
init_irq_signals(sig_stack != NULL);
}
#include "2_5compat.h"
#include "os.h"
+/* This is a per-cpu array. A processor only modifies its entry and it only
+ * cares about its entry, so it's OK if another processor is modifying its
+ * entry.
+ */
struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
struct task_struct *get_task(int pid, int require)
{
int i;
- for(i = 0; i < num_online_cpus(); i++){
+ for(i = 0; i < ncpus; i++){
if(cpu_tasks[i].pid == pid) return(i);
}
return(-1);
current->thread.regs.regs.sc = (void *) (&sig + 1);
suspend_new_thread(current->thread.switch_pipe[0]);
+ block_signals();
+#ifdef CONFIG_SMP
+ schedule_tail(NULL);
+#endif
+ enable_timer();
free_page(current->thread.temp_stack);
set_cmdline("(kernel thread)");
force_flush_all();
current->thread.prev_sched = NULL;
change_sig(SIGUSR1, 1);
+ change_sig(SIGVTALRM, 1);
+ change_sig(SIGPROF, 1);
unblock_signals();
if(!run_kernel_thread(fn, arg, ¤t->thread.jmp))
do_exit(0);
static int new_thread_proc(void *stack)
{
- block_signals();
+ change_sig(SIGIO, 0);
+ change_sig(SIGVTALRM, 0);
+ change_sig(SIGPROF, 0);
init_new_thread(stack, new_thread_handler);
os_usr1_process(os_getpid());
return(0);
unsigned long flags;
int vtalrm, alrm, prof, err, cpu;
char c;
+ /* jailing and SMP are incompatible, so this doesn't need to be
+ * made per-cpu
+ */
static int reading;
from = prev;
* onto the signal frame.
*/
-extern int hit_me;
-
void finish_fork_handler(int sig)
{
current->thread.regs.regs.sc = (void *) (&sig + 1);
suspend_new_thread(current->thread.switch_pipe[0]);
+ schedule_tail(NULL);
+ enable_timer();
+ change_sig(SIGVTALRM, 1);
force_flush_all();
if(current->mm != current->parent->mm)
protect(uml_reserved, high_physmem - uml_reserved, 1, 1, 0, 1);
current->thread.prev_sched = NULL;
free_page(current->thread.temp_stack);
- block_signals();
change_sig(SIGUSR1, 0);
set_user_mode(current);
}
{
int sig = sigusr1;
- block_signals();
+ change_sig(SIGIO, 0);
+ change_sig(SIGVTALRM, 0);
+ change_sig(SIGPROF, 0);
init_new_thread(stack, finish_fork_handler);
kill(os_getpid(), sig);
void default_idle(void)
{
- if(current->thread_info->cpu == 0) idle_timer();
+ idle_timer();
atomic_inc(&init_mm.mm_count);
current->mm = &init_mm;
return(new);
}
+/* Changed by jail_setup, which is a setup */
int jail = 0;
int __init jail_setup(char *line, int *add)
mprotect_kernel_vm(w);
}
-int jail_timer_off = 0;
-
+/* No SMP problems since jailing and SMP are incompatible */
void unprotect_kernel_mem(void)
{
mprotect_kernel_mem(1);
- jail_timer_off = 0;
}
void protect_kernel_mem(void)
{
- jail_timer_off = 1;
mprotect_kernel_mem(0);
}
int smp_sigio_handler(void)
{
+ int cpu = current->thread_info->cpu;
#ifdef CONFIG_SMP
- IPI_handler(hard_smp_processor_id());
- if (hard_smp_processor_id() != 0) return(1);
+ IPI_handler(cpu);
+ if(cpu != 0)
+ return(1);
#endif
return(0);
}
return(in_interrupt());
}
+int cpu(void)
+{
+ return(current->thread_info->cpu);
+}
+
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
#include "sigio.h"
#include "irq_user.h"
+/* Protected by sigio_lock() called from write_sigio_workaround */
static int sigio_irq_fd = -1;
void sigio_interrupt(int irq, void *data, struct pt_regs *unused)
return(0);
}
+static spinlock_t sigio_spinlock = SPIN_LOCK_UNLOCKED;
+
+void sigio_lock(void)
+{
+ spin_lock(&sigio_spinlock);
+}
+
+void sigio_unlock(void)
+{
+ spin_unlock(&sigio_spinlock);
+}
+
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
#include "helper.h"
#include "os.h"
+/* Changed during early boot */
int pty_output_sigio = 0;
int pty_close_sigio = 0;
+/* Used as a flag during SIGIO testing early in boot */
static int got_sigio = 0;
void __init handler(int sig)
check_one_sigio(tty_close);
}
+/* Protected by sigio_lock(), also used by sigio_cleanup, which is an
+ * exitcall.
+ */
static int write_sigio_pid = -1;
+
+/* These arrays are initialized before the sigio thread is started, and
+ * the descriptors closed after it is killed. So, it can't see them change.
+ * On the UML side, they are changed under the sigio_lock.
+ */
static int write_sigio_fds[2] = { -1, -1 };
static int sigio_private[2] = { -1, -1 };
int used;
};
+/* Protected by sigio_lock(). Used by the sigio thread, but the UML thread
+ * synchronizes with it.
+ */
struct pollfds current_poll = {
poll : NULL,
size : 0,
}
}
-/* XXX SMP locking needed here too */
-
static int need_poll(int n)
{
if(n <= next_poll.size){
set_signals(flags);
return;
fail:
+ sigio_lock();
if(write_sigio_pid != -1) kill(write_sigio_pid, SIGKILL);
write_sigio_pid = -1;
close(sigio_private[0]);
close(sigio_private[1]);
close(write_sigio_fds[0]);
close(write_sigio_fds[1]);
+ sigio_unlock();
set_signals(flags);
}
int add_sigio_fd(int fd, int read)
{
- int err, i, n, events;
+ int err = 0, i, n, events;
- for(i = 0; i < current_poll.used; i++)
- if(current_poll.poll[i].fd == fd) return(0);
+ sigio_lock();
+ for(i = 0; i < current_poll.used; i++){
+ if(current_poll.poll[i].fd == fd)
+ goto out;
+ }
n = current_poll.used + 1;
err = need_poll(n);
- if(err) return(err);
+ if(err)
+ goto out;
for(i = 0; i < current_poll.used; i++)
next_poll.poll[i] = current_poll.poll[i];
events : events,
revents : 0 });
update_thread();
- return(0);
+ out:
+ sigio_unlock();
+ return(err);
}
int ignore_sigio_fd(int fd)
{
struct pollfd *p;
- int err, i, n = 0;
+ int err = 0, i, n = 0;
+ sigio_lock();
for(i = 0; i < current_poll.used; i++){
if(current_poll.poll[i].fd == fd) break;
}
- if(i == current_poll.used) return(0);
+ if(i == current_poll.used)
+ goto out;
err = need_poll(current_poll.used - 1);
- if(err) return(err);
+ if(err)
+ goto out;
for(i = 0; i < current_poll.used; i++){
p = ¤t_poll.poll[i];
}
if(n == i){
printk("ignore_sigio_fd : fd %d not found\n", fd);
- return(-1);
+ err = -1;
+ goto out;
}
update_thread();
- return(0);
+ out:
+ sigio_unlock();
+ return(err);
}
static int setup_initial_poll(int fd)
unsigned long stack;
int err;
- if(write_sigio_pid != -1) return;
+ sigio_lock();
+ if(write_sigio_pid != -1)
+ goto out;
- /* XXX This needs SMP locking */
err = os_pipe(write_sigio_fds, 1, 1);
if(err){
printk("write_sigio_workaround - os_pipe 1 failed, "
"errno = %d\n", -err);
- return;
+ goto out;
}
err = os_pipe(sigio_private, 1, 1);
if(err){
if(write_sigio_irq(write_sigio_fds[0]))
goto out_kill;
+ out:
+ sigio_unlock();
return;
out_kill:
out_close1:
close(write_sigio_fds[0]);
close(write_sigio_fds[1]);
+ sigio_unlock();
}
int read_sigio_fd(int fd)
#include "sysdep/sigcontext.h"
#include "sigcontext.h"
-extern int kern_timer_on;
-
void set_sigstack(void *sig_stack, int size)
{
stack_t stack;
sigset_t mask;
sigemptyset(&mask);
- if(type == SIG_BLOCK) kern_timer_on = 0;
- else {
- kern_timer_on = 1;
- sigaddset(&mask, SIGVTALRM);
- sigaddset(&mask, SIGALRM);
- }
+ sigaddset(&mask, SIGVTALRM);
+ sigaddset(&mask, SIGALRM);
sigaddset(&mask, SIGIO);
sigaddset(&mask, SIGPROF);
if(sigprocmask(type, &mask, NULL) < 0)
sigs = sigismember(mask, SIGIO) ? 1 << SIGIO_BIT : 0;
sigs |= sigismember(mask, SIGVTALRM) ? 1 << SIGVTALRM_BIT : 0;
sigs |= sigismember(mask, SIGALRM) ? 1 << SIGVTALRM_BIT : 0;
- if(!kern_timer_on) sigs |= 1 << SIGVTALRM_BIT;
return(sigs);
}
int ret;
sigemptyset(&mask);
- if(!(disable & (1 << SIGIO_BIT))) sigaddset(&mask, SIGIO);
+ if(!(disable & (1 << SIGIO_BIT)))
+ sigaddset(&mask, SIGIO);
if(!(disable & (1 << SIGVTALRM_BIT))){
- kern_timer_on = 1;
sigaddset(&mask, SIGVTALRM);
sigaddset(&mask, SIGALRM);
}
if(sigprocmask(SIG_UNBLOCK, &mask, &mask) < 0)
panic("Failed to enable signals");
+
ret = disable_mask(&mask);
+
sigemptyset(&mask);
- if(disable & (1 << SIGIO_BIT)) sigaddset(&mask, SIGIO);
- if(disable & (1 << SIGVTALRM_BIT))
- kern_timer_on = 0;
+ if(disable & (1 << SIGIO_BIT))
+ sigaddset(&mask, SIGIO);
+ if(disable & (1 << SIGVTALRM_BIT)){
+ sigaddset(&mask, SIGVTALRM);
+ sigaddset(&mask, SIGALRM);
+ }
if(sigprocmask(SIG_BLOCK, &mask, NULL) < 0)
panic("Failed to block signals");
+
return(ret);
}
#include "linux/config.h"
-/* CPU online map */
+/* CPU online map, set by smp_boot_cpus */
unsigned long cpu_online_map = 1;
#ifdef CONFIG_SMP
#include "user_util.h"
#include "kern_util.h"
#include "kern.h"
+#include "irq_user.h"
#include "os.h"
-/* The 'big kernel lock' */
-spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
-
-/* Per CPU bogomips and other parameters */
+/* Per CPU bogomips and other parameters
+ * The only piece used here is the ipi pipe, which is set before SMP is
+ * started and never changed.
+ */
struct cpuinfo_um cpu_data[NR_CPUS];
spinlock_t um_bh_lock = SPIN_LOCK_UNLOCKED;
atomic_t global_bh_count;
+/* Not used by UML */
unsigned char global_irq_holder = NO_PROC_ID;
unsigned volatile long global_irq_lock;
/* Set when the idlers are all forked */
int smp_threads_ready = 0;
+
+/* A statistic, can be a little off */
int num_reschedules_sent = 0;
+/* Small, random number, never changed */
+unsigned long cache_decay_ticks = 5;
+
void smp_send_reschedule(int cpu)
{
write(cpu_data[cpu].ipi_pipe[1], "R", 1);
void smp_send_stop(void)
{
- printk(KERN_INFO "Stopping all CPUs\n");
-}
+ int i;
+ printk(KERN_INFO "Stopping all CPUs...");
+ for(i = 0; i < num_online_cpus(); i++){
+ if(i == current->thread_info->cpu)
+ continue;
+ write(cpu_data[i].ipi_pipe[1], "S", 1);
+ }
+ printk("done\n");
+}
-static atomic_t smp_commenced = ATOMIC_INIT(0);
+static unsigned long smp_commenced_mask;
static volatile unsigned long smp_callin_map = 0;
-void smp_commence(void)
+static int idle_proc(void *cpup)
{
- printk("All CPUs are go!\n");
-
- wmb();
- atomic_set(&smp_commenced, 1);
-}
-
-static int idle_proc(void *unused)
-{
- int cpu, err;
-
- set_current(current);
- del_from_runqueue(current);
- unhash_process(current);
+ int cpu = (int) cpup, err;
- cpu = current->processor;
err = os_pipe(cpu_data[cpu].ipi_pipe, 1, 1);
if(err)
panic("CPU#%d failed to create IPI pipe, errno = %d", cpu,
activate_ipi(cpu_data[cpu].ipi_pipe[0], current->thread.extern_pid);
wmb();
- if (test_and_set_bit(current->processor, &smp_callin_map)) {
- printk("huh, CPU#%d already present??\n", current->processor);
+ if (test_and_set_bit(cpu, &smp_callin_map)) {
+ printk("huh, CPU#%d already present??\n", cpu);
BUG();
}
- while (!atomic_read(&smp_commenced))
+ while (!test_bit(cpu, &smp_commenced_mask))
cpu_relax();
- init_idle();
+ set_bit(cpu, &cpu_online_map);
default_idle();
return(0);
}
-int inited_cpus = 1;
-
-static int idle_thread(int (*fn)(void *), int cpu)
+static struct task_struct *idle_thread(int cpu)
{
- struct task_struct *p;
- int pid;
+ struct task_struct *new_task;
unsigned char c;
- current->thread.request.u.thread.proc = fn;
- current->thread.request.u.thread.arg = NULL;
- p = do_fork(CLONE_VM | CLONE_PID, 0, NULL, 0);
- if(IS_ERR(p)) panic("do_fork failed in idle_thread");
-
- cpu_tasks[cpu].pid = p->thread.extern_pid;
- cpu_tasks[cpu].task = p;
- inited_cpus++;
- init_tasks[cpu] = p;
- p->processor = cpu;
- p->cpus_allowed = 1 << cpu;
- p->cpus_runnable = p->cpus_allowed;
- write(p->thread.switch_pipe[1], &c, sizeof(c));
- return(p->thread.extern_pid);
+ current->thread.request.u.thread.proc = idle_proc;
+ current->thread.request.u.thread.arg = (void *) cpu;
+ new_task = do_fork(CLONE_VM | CLONE_IDLETASK, 0, NULL, 0, NULL);
+ if(IS_ERR(new_task)) panic("do_fork failed in idle_thread");
+
+ cpu_tasks[cpu] = ((struct cpu_task)
+ { .pid = new_task->thread.extern_pid,
+ .task = new_task } );
+ write(new_task->thread.switch_pipe[1], &c, sizeof(c));
+ return(new_task);
}
-void smp_boot_cpus(void)
+void smp_prepare_cpus(unsigned int maxcpus)
{
- int err;
+ struct task_struct *idle;
+ unsigned long waittime;
+ int err, cpu;
set_bit(0, &cpu_online_map);
set_bit(0, &smp_callin_map);
activate_ipi(cpu_data[0].ipi_pipe[0], current->thread.extern_pid);
- if(ncpus < 1){
- printk(KERN_INFO "ncpus set to 1\n");
- ncpus = 1;
- }
- else if(ncpus > NR_CPUS){
- printk(KERN_INFO
- "ncpus can't be greater than NR_CPUS, set to %d\n",
- NR_CPUS);
- ncpus = NR_CPUS;
- }
-
- if(ncpus > 1){
- int i, pid;
-
- printk(KERN_INFO "Starting up other processors:\n");
- for(i=1;i<ncpus;i++){
- int waittime;
-
- /* Do this early, for hard_smp_processor_id() */
- cpu_tasks[i].pid = -1;
- set_bit(i, &cpu_online_map);
+ for(cpu = 1; cpu < ncpus; cpu++){
+ printk("Booting processor %d...\n", cpu);
+
+ idle = idle_thread(cpu);
- pid = idle_thread(idle_proc, i);
- printk(KERN_INFO "\t#%d - idle thread pid = %d.. ",
- i, pid);
+ init_idle(idle, cpu);
+ unhash_process(idle);
- waittime = 200000000;
- while (waittime-- && !test_bit(i, &smp_callin_map))
- cpu_relax();
+ waittime = 200000000;
+ while (waittime-- && !test_bit(cpu, &smp_callin_map))
+ cpu_relax();
- if (test_bit(i, &smp_callin_map))
- printk("online\n");
- else {
- printk("failed\n");
- clear_bit(i, &cpu_online_map);
- }
- }
+ if (test_bit(cpu, &smp_callin_map))
+ printk("done\n");
+ else printk("failed\n");
}
}
+int __cpu_up(unsigned int cpu)
+{
+ set_bit(cpu, &smp_commenced_mask);
+ while (!test_bit(cpu, &cpu_online_map))
+ mb();
+ return(0);
+}
+
int setup_profiling_timer(unsigned int multiplier)
{
printk(KERN_INFO "setup_profiling_timer\n");
break;
case 'R':
- current->need_resched = 1;
+ set_tsk_need_resched(current);
+ break;
+
+ case 'S':
+ printk("CPU#%d stopping\n", cpu);
+ while(1)
+ pause();
break;
default:
info = _info;
for (i=0;i<NR_CPUS;i++)
- if (i != current->processor && test_bit(i, &cpu_online_map))
+ if((i != current->thread_info->cpu) &&
+ test_bit(i, &cpu_online_map))
write(cpu_data[i].ipi_pipe[1], "C", 1);
while (atomic_read(&scf_started) != cpus)
return(0);
}
+/* Unlocked, I don't care if this is a bit off */
int nsyscalls = 0;
extern syscall_handler_t *sys_call_table[];
spinlock_t syscall_lock = SPIN_LOCK_UNLOCKED;
-void lock_syscall(void)
-{
- spin_lock(&syscall_lock);
-}
+static int syscall_index = 0;
-void unlock_syscall(void)
+int next_syscall_index(int limit)
{
+ int ret;
+
+ spin_lock(&syscall_lock);
+ ret = syscall_index;
+ if(++syscall_index == limit)
+ syscall_index = 0;
spin_unlock(&syscall_lock);
+ return(ret);
}
/*
struct timeval end;
} syscall_record[1024];
-int syscall_index = 0;
-
-extern int kern_timer_on;
-
void syscall_handler(int sig, struct uml_pt_regs *regs)
{
void *sc;
long result;
- int index, syscall;
+ int index, max, syscall;
- lock_syscall();
- if(syscall_index == 1024) syscall_index = 0;
- index = syscall_index;
- syscall_index++;
- unlock_syscall();
+ max = sizeof(syscall_record)/sizeof(syscall_record[0]);
+ index = next_syscall_index(max);
syscall = regs->syscall;
sc = regs->sc;
#include "user.h"
#include "process.h"
#include "signal_user.h"
+#include "time_user.h"
extern struct timeval xtime;
-void timer_handler(int sig, struct uml_pt_regs *regs)
-{
- timer_irq(regs);
-}
-
void timer(void)
{
gettimeofday(&xtime, NULL);
}
-static struct itimerval profile_interval;
-
-void get_profile_timer(void)
-{
- getitimer(ITIMER_PROF, &profile_interval);
- profile_interval.it_value = profile_interval.it_interval;
-}
-
-void disable_profile_timer(void)
-{
- struct itimerval interval = ((struct itimerval) { { 0, 0 }, { 0, 0 }});
- setitimer(ITIMER_PROF, &interval, NULL);
-}
-
static void set_interval(int timer_type)
{
struct itimerval interval;
panic("setitimer failed - errno = %d\n", errno);
}
+void enable_timer(void)
+{
+ struct itimerval enable = ((struct itimerval) { { 0, 1000000/hz() },
+ { 0, 1000000/hz() }});
+ if(setitimer(ITIMER_VIRTUAL, &enable, NULL))
+ printk("enable_timer - setitimer failed, errno = %d\n",
+ errno);
+}
+
void switch_timers(int to_real)
{
struct itimerval disable = ((struct itimerval) { { 0, 0 }, { 0, 0 }});
{
if(signal(SIGVTALRM, SIG_IGN) == SIG_ERR)
panic("Couldn't unset SIGVTALRM handler");
+
set_handler(SIGALRM, (__sighandler_t) alarm_handler,
- SA_NODEFER | SA_RESTART, SIGUSR1, SIGIO, SIGWINCH, -1);
+ SA_RESTART, SIGUSR1, SIGIO, SIGWINCH, SIGVTALRM, -1);
set_interval(ITIMER_REAL);
}
set_interval(ITIMER_VIRTUAL);
}
-void set_timers(int set_signal)
-{
- if(set_signal)
- set_interval(ITIMER_VIRTUAL);
- if(setitimer(ITIMER_PROF, &profile_interval, NULL) == -1)
- panic("setitimer ITIMER_PROF failed - errno = %d\n", errno);
-}
-
struct timeval local_offset = { 0, 0 };
void do_gettimeofday(struct timeval *tv)
{
+ time_lock();
gettimeofday(tv, NULL);
timeradd(tv, &local_offset, tv);
+ time_unlock();
}
void do_settimeofday(struct timeval *tv)
{
struct timeval now;
+ time_lock();
gettimeofday(&now, NULL);
timersub(tv, &now, &local_offset);
+ time_unlock();
}
void idle_sleep(int secs)
return(HZ);
}
+/* Changed at early boot */
int timer_irq_inited = 0;
-/* kern_timer_on and missed_ticks are modified after kernel memory has been
+/* missed_ticks will be modified after kernel memory has been
* write-protected, so this puts it in a section which will be left
* write-enabled.
*/
-int __attribute__ ((__section__ (".unprotected"))) kern_timer_on = 0;
-int __attribute__ ((__section__ (".unprotected"))) missed_ticks = 0;
+int __attribute__ ((__section__ (".unprotected"))) missed_ticks[NR_CPUS];
void timer_irq(struct uml_pt_regs *regs)
{
- int ticks = missed_ticks;
+ int cpu = current->thread_info->cpu, ticks = missed_ticks[cpu];
if(!timer_irq_inited) return;
- missed_ticks = 0;
+ missed_ticks[cpu] = 0;
while(ticks--) do_IRQ(TIMER_IRQ, regs);
}
for(i=0;i<n;i++) ;
}
+void timer_handler(int sig, struct uml_pt_regs *regs)
+{
+#ifdef CONFIG_SMP
+ update_process_times(user_context(UPT_SP(regs)));
+#endif
+ if(current->thread_info->cpu == 0)
+ timer_irq(regs);
+}
+
+static spinlock_t timer_spinlock = SPIN_LOCK_UNLOCKED;
+
+void time_lock(void)
+{
+ spin_lock(&timer_spinlock);
+}
+
+void time_unlock(void)
+{
+ spin_unlock(&timer_spinlock);
+}
+
int __init timer_init(void)
{
int err;
spinlock_t trap_lock = SPIN_LOCK_UNLOCKED;
-void lock_trap(void)
-{
- spin_lock(&trap_lock);
-}
+static int trap_index = 0;
-void unlock_trap(void)
+int next_trap_index(int limit)
{
+ int ret;
+
+ spin_lock(&trap_lock);
+ ret = trap_index;
+ if(++trap_index == limit)
+ trap_index = 0;
spin_unlock(&trap_lock);
+ return(ret);
}
extern int debugger_pid;
tramp_stack : 0,
};
+/* Accessed by the tracing thread, which automatically serializes access */
static void *xterm_data;
static int xterm_fd;
while(waitpid(pid, NULL, 0) > 0) kill(pid, SIGCONT);
}
+/* Changed early in boot, and then only read */
int debug = 0;
int debug_stop = 1;
int debug_parent = 0;
-
int honeypot = 0;
static int signal_tramp(void *arg)
signal(SIGUSR1, SIG_IGN);
change_sig(SIGCHLD, 0);
signal(SIGSEGV, (__sighandler_t) sig_handler);
- set_timers(0);
set_cmdline("(idle thread)");
set_init_pid(os_getpid());
proc = arg;
}
}
-#ifdef CONFIG_SMP
-#error need to make these arrays
-#endif
-
+/* Accessed only by the tracing thread */
int debugger_pid = -1;
int debugger_parent = -1;
int debugger_fd = -1;
int gdb_pid = -1;
-struct {
- unsigned long address;
- int is_write;
- int pid;
- unsigned long sp;
- int is_user;
-} segfault_record[1024];
-
-int segfault_index = 0;
-
struct {
int pid;
int signal;
unsigned long addr;
struct timeval time;
-} signal_record[1024];
+} signal_record[1024][32];
-int signal_index = 0;
+int signal_index[32];
int nsignals = 0;
int debug_trace = 0;
extern int io_nsignals, io_count, intr_count;
if(WIFEXITED(status)) ;
#ifdef notdef
{
- printk("Child %d exited with status %d\n", pid,
+ printf("Child %d exited with status %d\n", pid,
WEXITSTATUS(status));
}
#endif
else if(WIFSIGNALED(status)){
sig = WTERMSIG(status);
if(sig != 9){
- printk("Child %d exited with signal %d\n", pid,
+ printf("Child %d exited with signal %d\n", pid,
sig);
}
}
else if(WIFSTOPPED(status)){
+ proc_id = pid_to_processor_id(pid);
sig = WSTOPSIG(status);
- if(signal_index == 1024){
- signal_index = 0;
+ if(signal_index[proc_id] == 1024){
+ signal_index[proc_id] = 0;
last_index = 1023;
}
- else last_index = signal_index - 1;
+ else last_index = signal_index[proc_id] - 1;
if(((sig == SIGPROF) || (sig == SIGVTALRM) ||
(sig == SIGALRM)) &&
- (signal_record[last_index].signal == sig) &&
- (signal_record[last_index].pid == pid))
- signal_index = last_index;
- signal_record[signal_index].pid = pid;
- gettimeofday(&signal_record[signal_index].time, NULL);
+ (signal_record[proc_id][last_index].signal == sig)&&
+ (signal_record[proc_id][last_index].pid == pid))
+ signal_index[proc_id] = last_index;
+ signal_record[proc_id][signal_index[proc_id]].pid = pid;
+ gettimeofday(&signal_record[proc_id][signal_index[proc_id]].time, NULL);
eip = ptrace(PTRACE_PEEKUSER, pid, PT_IP_OFFSET, 0);
- signal_record[signal_index].addr = eip;
- signal_record[signal_index++].signal = sig;
+ signal_record[proc_id][signal_index[proc_id]].addr = eip;
+ signal_record[proc_id][signal_index[proc_id]++].signal = sig;
- proc_id = pid_to_processor_id(pid);
if(proc_id == -1){
sleeping_process_signal(pid, sig);
continue;
" UML. This implies 'jail'.\n\n"
);
+/* Unlocked - don't care if this is a bit off */
int nsegfaults = 0;
+struct {
+ unsigned long address;
+ int is_write;
+ int pid;
+ unsigned long sp;
+ int is_user;
+} segfault_record[1024];
+
void segv_handler(int sig, struct uml_pt_regs *regs)
{
struct sigcontext *context = regs->sc;
- int index;
+ int index, max;
if(regs->is_user && !SEGV_IS_FIXABLE(context)){
bad_segv(SC_FAULT_ADDR(context), SC_IP(context),
SC_FAULT_WRITE(context));
return;
}
- lock_trap();
- index = segfault_index++;
- if(segfault_index == 1024) segfault_index = 0;
- unlock_trap();
+ max = sizeof(segfault_record)/sizeof(segfault_record[0]);
+ index = next_trap_index(max);
+
nsegfaults++;
segfault_record[index].address = SC_FAULT_ADDR(context);
segfault_record[index].pid = os_getpid();
regs->is_user, context);
}
-extern int kern_timer_on;
-
struct signal_info {
void (*handler)(int, struct uml_pt_regs *);
int is_irq;
{
struct uml_pt_regs save_regs, *r;
struct signal_info *info;
- int save_errno = errno, save_timer = kern_timer_on, is_user;
+ int save_errno = errno, is_user;
unprotect_kernel_mem();
(*info->handler)(sig, r);
- kern_timer_on = save_timer;
if(is_user){
interrupt_end();
block_signals();
sig_handler_common(sig, &sc);
}
-extern int timer_irq_inited, missed_ticks;
-
-extern int jail_timer_off;
+extern int timer_irq_inited, missed_ticks[];
void alarm_handler(int sig, struct sigcontext sc)
{
int user;
if(!timer_irq_inited) return;
- missed_ticks++;
+ missed_ticks[cpu()]++;
user = user_context(SC_SP(&sc));
- if(!user && !kern_timer_on) return;
- if(!user && jail_timer_off) return;
if(sig == SIGALRM)
switch_timers(0);
#define TTY_LOG_DIR "./"
-char *tty_log_dir = TTY_LOG_DIR;
-
+/* Set early in boot and then unchanged */
+static char *tty_log_dir = TTY_LOG_DIR;
static int tty_log_fd = -1;
#define TTY_LOG_OPEN 1
#define DEFAULT_COMMAND_LINE "root=6200"
+struct cpuinfo_um boot_cpu_data = {
+ .loops_per_jiffy = 0,
+ .ipi_pipe = { -1, -1 }
+};
+
unsigned long thread_saved_pc(struct task_struct *task)
{
return(os_process_pc(task->thread.extern_pid));
#define SIZE ((CONFIG_NEST_LEVEL + CONFIG_KERNEL_HALF_GIGS) * 0x20000000)
#define START (TOP - SIZE)
+/* Set in main */
unsigned long host_task_size;
unsigned long task_size;
task_size = START;
}
+/* Set in early boot */
unsigned long uml_physmem;
unsigned long uml_reserved;
-
unsigned long start_vm;
unsigned long end_vm;
-
int ncpus = 1;
+/* Pointer set in linux_main, the array itself is private to each thread,
+ * and changed at address space creation time so this poses no concurrency
+ * problems.
+ */
static char *argv1_begin = NULL;
static char *argv1_end = NULL;
+/* Set in early boot */
static int have_root __initdata = 0;
long physmem_size = 32 * 1024 * 1024;
}
extern int debug_trace;
-unsigned long brk_start;
+/* Set during early boot */
+unsigned long brk_start;
static struct vm_reserved kernel_vm_reserved;
#define MIN_VMALLOC (32 * 1024 * 1024)
check_sigio();
}
-spinlock_t pid_lock = SPIN_LOCK_UNLOCKED;
-
-void lock_pid(void)
-{
- spin_lock(&pid_lock);
-}
-
-void unlock_pid(void)
-{
- spin_unlock(&pid_lock);
-}
-
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
#define UMID_LEN 64
#define UML_DIR "~/.uml/"
+/* Changed by set_umid and make_umid, which are run early in boot */
static char umid[UMID_LEN] = { 0 };
+
+/* Changed by set_uml_dir and make_uml_dir, which are run early in boot */
static char *uml_dir = UML_DIR;
+/* Changed by set_umid */
static int umid_is_random = 1;
static int umid_inited = 0;
#define COMMAND_LINE_SIZE _POSIX_ARG_MAX
+/* Changed in linux_main and setup_arch, which run before SMP is started */
char saved_command_line[COMMAND_LINE_SIZE] = { 0 };
char command_line[COMMAND_LINE_SIZE] = { 0 };
#include "user.h"
#include "init.h"
+/* Set in set_stklim, which is called from main and __wrap_malloc.
+ * __wrap_malloc only calls it if main hasn't started.
+ */
unsigned long stacksizelim;
+/* Set in main */
char *linux_prog;
#define PGD_BOUND (4 * 1024 * 1024)
#define STACKSIZE (8 * 1024 * 1024)
#define THREAD_NAME_LEN (256)
-char padding[THREAD_NAME_LEN] = { [ 0 ... THREAD_NAME_LEN - 2] = ' ', '\0' };
+/* Never changed */
+static char padding[THREAD_NAME_LEN] = {
+ [ 0 ... THREAD_NAME_LEN - 2] = ' ', '\0'
+};
static void set_stklim(void)
{
return(uml_exitcode);
}
-int allocating_monbuf = 0;
+/* Changed in __wrap___monstartup and __wrap_malloc very early */
+static int allocating_monbuf = 0;
#ifdef PROFILING
extern void __real___monstartup (unsigned long, unsigned long);
extern void *__real_malloc(int);
extern unsigned long host_task_size;
+/* Set in __wrap_malloc early */
static void *gmon_buf = NULL;
void *__wrap_malloc(int size)
return(0);
}
+/* Used by the tracing thread */
static debugger_state parent;
static int parent_syscall(debugger_state *debugger, int pid);
syscall_continue(debugger->pid);
}
-#ifdef CONFIG_SMP
-#error need to make these arrays
-#endif
-
+/* Used by the tracing thread */
static debugger_state debugger;
static debugee_state debugee;
#define MAXTOKEN 64
+/* Set during early boot */
int cpu_has_cmov = 1;
int cpu_has_xmm = 0;
}
}
+/* Accessed only by the tracing thread */
static unsigned long kernel_debugregs[8] = { [ 0 ... 7 ] = 0 };
static int debugregs_seq = 0;
#include "linux/stddef.h" // for NULL
#include "linux/elf.h" // for AT_NULL
-/* unsigned int local_bh_count[NR_CPUS]; */
-unsigned long isa_io_base = 0;
-
/* The following function nicked from arch/ppc/kernel/process.c and
* adapted slightly */
/*
__setup_start = .;
.setup.init : { *(.setup.init) }
__setup_end = .;
+ __per_cpu_start = . ;
+ .data.percpu : { *(.data.percpu) }
+ __per_cpu_end = . ;
__initcall_start = .;
.initcall.init : {
*(.initcall1.init)
#ifndef __UM_CACHE_H
#define __UM_CACHE_H
+/* These are x86 numbers */
#define L1_CACHE_SHIFT 5
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
+
#endif
#ifdef CONFIG_SMP
#include "linux/config.h"
+#include "linux/bitops.h"
#include "asm/current.h"
-#define smp_processor_id() (current->processor)
+#define smp_processor_id() (current->thread_info->cpu)
#define cpu_logical_map(n) (n)
#define cpu_number_map(n) (n)
#define PROC_CHANGE_PENALTY 15 /* Pick a number, any number */
extern int hard_smp_processor_id(void);
#define NO_PROC_ID -1
+#define cpu_online(cpu) (cpu_online_map & (1<<(cpu)))
+
+extern int ncpus;
+#define cpu_possible(cpu) (cpu < ncpus)
+
+extern inline unsigned int num_online_cpus(void)
+{
+ return(hweight32(cpu_online_map));
+}
+
+extern inline void smp_cpus_done(unsigned int maxcpus)
+{
+}
+
#endif
#endif
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_SIGPENDING 1 /* signal pending */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
+#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
+ * TIF_NEED_RESCHED
+ */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#endif