VERSION = 2
PATCHLEVEL = 3
-SUBLEVEL = 0
+SUBLEVEL = 1
EXTRAVERSION =
ARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/)
static struct file * init_fd_array[NR_OPEN] = { NULL, };
static struct files_struct init_files = INIT_FILES;
static struct signal_struct init_signals = INIT_SIGNALS;
-struct mm_struct init_mm = INIT_MM;
+struct mm_struct init_mm = INIT_MM(init_mm);
/*
* Initial task structure.
* "init_task" linker map entry..
*/
union task_union init_task_union
- __attribute__((__section__(".data.init_task"))) = { INIT_TASK };
+ __attribute__((__section__(".data.init_task"))) =
+ { INIT_TASK(init_task_union.task) };
#define FD_COMMAND_OKAY 3
static volatile int command_status = FD_COMMAND_NONE, fdc_busy = 0;
-static struct wait_queue *fdc_wait = NULL, *command_done = NULL;
+static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
+static DECLARE_WAIT_QUEUE_HEAD(command_done);
+
#define NO_SIGNAL (!interruptible || !signal_pending(current))
#define CALL(x) if ((x) == -EINTR) return -EINTR
#define ECALL(x) if ((ret = (x))) return ret;
static char special_op[MAX_HD] = { 0, };
static int access_count[MAX_HD] = {0, };
static char busy[MAX_HD] = {0, };
-static struct wait_queue * busy_wait = NULL;
+static DECLARE_WAIT_QUEUE_HEAD(busy_wait);
static int reset = 0;
static int hd_error = 0;
} else {
dma_base = dev->base_address[4] & PCI_BASE_ADDRESS_IO_MASK;
if (!dma_base || dma_base == PCI_BASE_ADDRESS_IO_MASK) {
- printk("%s: dma_base is invalid (0x%04lx, BIOS problem), please report to <mj@ucw.cz>\n", name, dma_base);
+ printk("%s: dma_base is invalid (0x%04lx)\n", name, dma_base);
dma_base = 0;
}
}
ide_hwgroup_t *hwgroup = HWGROUP(drive);
unsigned int major = HWIF(drive)->major;
struct request *cur_rq;
- struct semaphore sem = MUTEX_LOCKED;
+ DECLARE_MUTEX_LOCKED(sem);
if (IS_PDC4030_DRIVE && rq->buffer != NULL)
return -ENOSYS; /* special drive cmds not supported */
unsigned short cyl; /* "real" number of cyls */
unsigned int drive_data; /* for use by tuneproc/selectproc as needed */
void *hwif; /* actually (ide_hwif_t *) */
- struct wait_queue *wqueue; /* used to wait for drive in open() */
+ wait_queue_head_t wqueue; /* used to wait for drive in open() */
struct hd_driveid *id; /* drive model identification info */
struct hd_struct *part; /* drive partition table */
char name[4]; /* drive name, such as "hda" */
/*
* used to wait on when there are no free requests
*/
-struct wait_queue * wait_for_request = NULL;
+DECLARE_WAIT_QUEUE_HEAD(wait_for_request);
/* This specifies how many sectors to read ahead on the disk. */
static struct request * __get_request_wait(int n, kdev_t dev)
{
register struct request *req;
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
add_wait_queue(&wait_for_request, &wait);
if (!thread) return NULL;
memset(thread, 0, sizeof(struct md_thread));
- init_waitqueue(&thread->wqueue);
+ init_waitqueue_head(&thread->wqueue);
thread->sem = &sem;
thread->run = run;
memset(&inode, 0, sizeof(inode));
memset(&in_dentry, 0, sizeof(in_dentry));
inode.i_rdev = device;
+ init_waitqueue_head(&inode.i_wait);
infile.f_mode = 1; /* read only */
infile.f_dentry = &in_dentry;
in_dentry.d_inode = &inode;
memset(&out_inode, 0, sizeof(out_inode));
memset(&out_dentry, 0, sizeof(out_dentry));
out_inode.i_rdev = ram_device;
+ init_waitqueue_head(&out_inode.i_wait);
outfile.f_mode = 3; /* read/write */
outfile.f_dentry = &out_dentry;
out_dentry.d_inode = &out_inode;
def_color = 0x07; /* white */
ulcolor = 0x0f; /* bold white */
halfcolor = 0x08; /* grey */
- vt_cons[currcons]->paste_wait = 0;
+ init_waitqueue_head(&vt_cons[currcons]->paste_wait);
reset_terminal(currcons, do_clear);
}
extern void ctrl_alt_del(void);
-struct wait_queue * keypress_wait = NULL;
+DECLARE_WAIT_QUEUE_HEAD(keypress_wait);
struct console;
int keyboard_wait_for_keypress(struct console *co)
tty->canon_data++;
if (tty->fasync)
kill_fasync(tty->fasync, SIGIO);
- if (tty->read_wait)
+ if (waitqueue_active(&tty->read_wait))
wake_up_interruptible(&tty->read_wait);
return;
}
if (!tty->icanon && (tty->read_cnt >= tty->minimum_to_wake)) {
if (tty->fasync)
kill_fasync(tty->fasync, SIGIO);
- if (tty->read_wait)
+ if (waitqueue_active(&tty->read_wait))
wake_up_interruptible(&tty->read_wait);
}
unsigned char *buf, size_t nr)
{
unsigned char *b = buf;
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
int c;
int minimum, time;
ssize_t retval = 0;
const unsigned char * buf, size_t nr)
{
const unsigned char *b = buf;
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
int c;
ssize_t retval = 0;
static ssize_t read_aux(struct file * file, char * buffer,
size_t count, loff_t *ppos)
{
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
ssize_t i = count;
unsigned char c;
queue = (struct aux_queue *) kmalloc(sizeof(*queue), GFP_KERNEL);
memset(queue, 0, sizeof(*queue));
queue->head = queue->tail = 0;
- queue->proc_list = NULL;
+ init_waitqueue_head(&queue->proc_list);
#ifdef INITIALIZE_MOUSE
kbd_write(KBD_CNTL_REG, KBD_CCMD_MOUSE_ENABLE); /* Enable Aux. */
struct aux_queue {
unsigned long head;
unsigned long tail;
- struct wait_queue *proc_list;
+ wait_queue_head_t proc_list;
struct fasync_struct *fasync;
unsigned char buf[AUX_BUF_SIZE];
};
struct pty_struct {
int magic;
- struct wait_queue * open_wait;
+ wait_queue_head_t open_wait;
};
#define PTY_MAGIC 0x5001
__initfunc(int pty_init(void))
{
-#ifdef CONFIG_UNIX98_PTYS
int i;
-#endif
/* Traditional BSD devices */
memset(&pty_state, 0, sizeof(pty_state));
+ for (i = 0; i < NR_PTYS; i++)
+ init_waitqueue_head(&pty_state[i].open_wait);
memset(&pty_driver, 0, sizeof(struct tty_driver));
pty_driver.magic = TTY_DRIVER_MAGIC;
pty_driver.driver_name = "pty_master";
struct qp_queue {
unsigned long head;
unsigned long tail;
- struct wait_queue *proc_list;
+ wait_queue_head_t proc_list;
struct fasync_struct *fasync;
unsigned char buf[QP_BUF_SIZE];
};
static ssize_t read_qp(struct file * file, char * buffer,
size_t count, loff_t *ppos)
{
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
ssize_t i = count;
unsigned char c;
queue = (struct qp_queue *) kmalloc(sizeof(*queue), GFP_KERNEL);
memset(queue, 0, sizeof(*queue));
queue->head = queue->tail = 0;
- queue->proc_list = NULL;
+ init_waitqueue_head(&queue->proc_list);
return 0;
}
static struct timer_rand_state extract_timer_state;
static struct timer_rand_state *irq_timer_state[NR_IRQS];
static struct timer_rand_state *blkdev_timer_state[MAX_BLKDEV];
-static struct wait_queue *random_read_wait;
-static struct wait_queue *random_write_wait;
+static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
+static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
static ssize_t random_read(struct file * file, char * buf,
size_t nbytes, loff_t *ppos);
initialize_benchmark(&timer_benchmark, "timer", 0);
#endif
extract_timer_state.dont_count_entropy = 1;
- random_read_wait = NULL;
- random_write_wait = NULL;
}
void rand_initialize_irq(int irq)
static ssize_t
random_read(struct file * file, char * buf, size_t nbytes, loff_t *ppos)
{
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
ssize_t n, retval = 0, count = 0;
if (nbytes == 0)
* ioctls.
*/
-static struct wait_queue *rtc_wait;
+static DECLARE_WAIT_QUEUE_HEAD(rtc_wait);
static struct timer_list rtc_irq_timer;
static ssize_t rtc_read(struct file *file, char *buf,
size_t count, loff_t *ppos)
{
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
unsigned long data;
ssize_t retval;
#endif
init_timer(&rtc_irq_timer);
rtc_irq_timer.function = rtc_dropped_irq;
- rtc_wait = NULL;
save_flags(flags);
cli();
/* Initialize periodic freq. to CMOS reset default, which is 1024Hz */
{
struct vt_struct *vt = (struct vt_struct *) tty->driver_data;
int pasted = 0, count;
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
poke_blanked_console();
add_wait_queue(&vt->paste_wait, &wait);
* memory if large numbers of serial ports are open.
*/
static unsigned char *tmp_buf;
-static struct semaphore tmp_buf_sem = MUTEX;
+static DECLARE_MUTEX(tmp_buf_sem);
static inline int serial_paranoia_check(struct async_struct *info,
kdev_t device, const char *routine)
static int block_til_ready(struct tty_struct *tty, struct file * filp,
struct async_struct *info)
{
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
struct serial_state *state = info->state;
int retval;
int do_clocal = 0, extra_count = 0;
}
/* Semaphore to protect creating and releasing a tty */
-static struct semaphore tty_sem = MUTEX;
+static DECLARE_MUTEX(tty_sem);
static void down_tty_sem(int index)
{
tty->flip.flag_buf_ptr = tty->flip.flag_buf;
tty->flip.tqueue.routine = flush_to_ldisc;
tty->flip.tqueue.data = tty;
- tty->flip.pty_sem = MUTEX;
+ init_MUTEX(&tty->flip.pty_sem);
+ init_waitqueue_head(&tty->write_wait);
+ init_waitqueue_head(&tty->read_wait);
tty->tq_hangup.routine = do_tty_hangup;
tty->tq_hangup.data = tty;
sema_init(&tty->atomic_read, 1);
void tty_wait_until_sent(struct tty_struct * tty, long timeout)
{
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
#ifdef TTY_DEBUG_WAIT_UNTIL_SENT
char buf[64];
* while those not ready go back to sleep. Seems overkill to add a wait
* to each vt just for this - usually this does nothing!
*/
-static struct wait_queue *vt_activate_queue = NULL;
+static DECLARE_WAIT_QUEUE_HEAD(vt_activate_queue);
/*
* Sleeps until a vt is activated, or the task is interrupted. Returns
int vt_waitactive(int vt)
{
int retval;
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
add_wait_queue(&vt_activate_queue, &wait);
for (;;) {
static int falcon_got_lock = 0;
-static struct wait_queue *falcon_fairness_wait = NULL;
+static DECLARE_WAIT_QUEUE_HEAD(falcon_fairness_wait);
static int falcon_trying_lock = 0;
-static struct wait_queue *falcon_try_wait = NULL;
+static DECLARE_WAIT_QUEUE_HEAD(falcon_try_wait);
static int falcon_dont_release = 0;
/* This function releases the lock on the DMA chip if there is no
retval->host_no = max_scsi_hosts++; /* never reuse host_no (DB) */
next_scsi_host++;
retval->host_queue = NULL;
- retval->host_wait = NULL;
+ init_waitqueue_head(&retval->host_wait);
retval->resetting = 0;
retval->last_reset = 0;
retval->irq = 0;
*/
static void launch_error_handler_thread(struct Scsi_Host * shpnt)
{
- struct semaphore sem = MUTEX_LOCKED;
+ DECLARE_MUTEX_LOCKED(sem);
shpnt->eh_notify = &sem;
host. */
unsigned int eh_active:1; /* Indicates the eh thread is awake and active if
this is true. */
- struct wait_queue * host_wait;
+ wait_queue_head_t host_wait;
Scsi_Host_Template * hostt;
atomic_t host_active; /* commands checked out */
volatile unsigned short host_busy; /* commands actually active on low-level */
*----------------------------------------------------------------------*/
volatile static int internal_done_flag = 0;
volatile static int internal_done_errcode = 0;
-static struct wait_queue *internal_wait = NULL;
+static DECLARE_WAIT_QUEUE_HEAD(internal_wait);
static void internal_done (Scsi_Cmnd * SCpnt)
{
SDpnt->host = shpnt;
SDpnt->online = TRUE;
+ init_waitqueue_head(&SDpnt->device_wait);
+
/*
* Next, hook the device to the host in question.
*/
SCpnt->lun = SDpnt->lun;
SCpnt->channel = SDpnt->channel;
{
- struct semaphore sem = MUTEX_LOCKED;
+ DECLARE_MUTEX_LOCKED(sem);
SCpnt->request.sem = &sem;
SCpnt->request.rq_status = RQ_SCSI_BUSY;
spin_lock_irq(&io_request_lock);
scsi_cmd[5] = 0;
SCpnt->cmd_len = 0;
{
- struct semaphore sem = MUTEX_LOCKED;
+ DECLARE_MUTEX_LOCKED(sem);
SCpnt->request.sem = &sem;
SCpnt->request.rq_status = RQ_SCSI_BUSY;
spin_lock_irq(&io_request_lock);
scsi_cmd[5] = 0;
SCpnt->cmd_len = 0;
{
- struct semaphore sem = MUTEX_LOCKED;
+ DECLARE_MUTEX_LOCKED(sem);
SCpnt->request.rq_status = RQ_SCSI_BUSY;
SCpnt->request.sem = &sem;
spin_lock_irq(&io_request_lock);
SDpnt->device_queue = SCpnt;
SDpnt->online = TRUE;
+ init_waitqueue_head(&SDpnt->device_wait);
+
/*
* Since we just found one device, there had damn well better be one in the list
* already.
{
if( shpnt->hostt == tpnt && shpnt->hostt->use_new_eh_code )
{
- struct semaphore sem = MUTEX_LOCKED;
+ DECLARE_MUTEX_LOCKED(sem);
shpnt->eh_notify = &sem;
kernel_thread((int (*)(void *))scsi_error_handler,
&& shpnt->hostt->use_new_eh_code
&& shpnt->ehandler != NULL )
{
- struct semaphore sem = MUTEX_LOCKED;
+ DECLARE_MUTEX_LOCKED(sem);
shpnt->eh_notify = &sem;
send_sig(SIGKILL, shpnt->ehandler, 1);
*/
struct scsi_device * next; /* Used for linked list */
struct scsi_device * prev; /* Used for linked list */
- struct wait_queue * device_wait;/* Used to wait if
+ wait_queue_head_t device_wait;/* Used to wait if
device is busy */
struct Scsi_Host * host;
volatile unsigned short device_busy; /* commands actually active on low-level */
#define SCSI_SLEEP(QUEUE, CONDITION) { \
if (CONDITION) { \
- struct wait_queue wait = { current, NULL}; \
+ DECLARE_WAITQUEUE(wait, current); \
add_wait_queue(QUEUE, &wait); \
for(;;) { \
current->state = TASK_UNINTERRUPTIBLE; \
void scsi_sleep (int timeout)
{
- struct semaphore sem = MUTEX_LOCKED;
+ DECLARE_MUTEX_LOCKED(sem);
struct timer_list timer;
init_timer(&timer);
if (host->can_queue)
{
- struct semaphore sem = MUTEX_LOCKED;
+ DECLARE_MUTEX_LOCKED(sem);
SCpnt->eh_state = SCSI_STATE_QUEUED;
{
struct Scsi_Host * host = (struct Scsi_Host *) data;
int rtn;
- struct semaphore sem = MUTEX_LOCKED;
+ DECLARE_MUTEX_LOCKED(sem);
unsigned long flags;
lock_kernel();
SCSI_LOG_IOCTL(1, printk("Trying ioctl with scsi command %d\n", cmd[0]));
SCpnt = scsi_allocate_device(NULL, dev, 1);
{
- struct semaphore sem = MUTEX_LOCKED;
+ DECLARE_MUTEX_LOCKED(sem);
SCpnt->request.sem = &sem;
scsi_do_cmd(SCpnt, cmd, NULL, 0, scsi_ioctl_done, timeout, retries);
spin_unlock_irqrestore(&io_request_lock, flags);
SCpnt = scsi_allocate_device(NULL, dev, 1);
{
- struct semaphore sem = MUTEX_LOCKED;
+ DECLARE_MUTEX_LOCKED(sem);
SCpnt->request.sem = &sem;
scsi_do_cmd(SCpnt, cmd, buf, needed, scsi_ioctl_done,
timeout, retries);
SCpnt->sense_buffer[2] = 0;
{
- struct semaphore sem = MUTEX_LOCKED;
+ DECLARE_MUTEX_LOCKED(sem);
/* Mark as really busy again */
SCpnt->request.rq_status = RQ_SCSI_BUSY;
SCpnt->request.sem = &sem;
SCpnt->sense_buffer[2] = 0;
{
- struct semaphore sem = MUTEX_LOCKED;
+ DECLARE_MUTEX_LOCKED(sem);
/* Mark as really busy again */
SCpnt->request.rq_status = RQ_SCSI_BUSY;
SCpnt->request.sem = &sem;
SCpnt->sense_buffer[2] = 0;
{
- struct semaphore sem = MUTEX_LOCKED;
+ DECLARE_MUTEX_LOCKED(sem);
/* Mark as really busy again */
SCpnt->request.rq_status = RQ_SCSI_BUSY;
SCpnt->request.sem = &sem;
/* same code as READCAPA !! */
{
- struct semaphore sem = MUTEX_LOCKED;
+ DECLARE_MUTEX_LOCKED(sem);
SCpnt->request.rq_status = RQ_SCSI_BUSY; /* Mark as really busy again */
SCpnt->request.sem = &sem;
scsi_do_cmd (SCpnt,
{
struct sg_fd * nextfp; /* NULL when last opened fd on this device */
struct sg_device * parentdp; /* owning device */
- struct wait_queue * read_wait; /* queue read until command done */
+ wait_queue_head_t read_wait; /* queue read until command done */
int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
char * fst_buf; /* try to grab SG_SCATTER_SZ sized buffer on open */
int fb_size; /* actual size of allocated fst_buf */
typedef struct sg_device /* holds the state of each scsi generic device */
{
Scsi_Device * device;
- struct wait_queue * generic_wait;/* queue open if O_EXCL on prev. open */
+ wait_queue_head_t generic_wait;/* queue open if O_EXCL on prev. open */
int sg_tablesize; /* adapter's max scatter-gather table size */
Sg_fd * headfp; /* first open fd belonging to this device */
kdev_t i_rdev; /* holds device major+minor number */
SCSI_LOG_TIMEOUT(3, printk("sg_attach: dev=%d \n", k));
sdp->device = scsidp;
- sdp->generic_wait = NULL;
+ init_waitqueue_head(&sdp->generic_wait);
sdp->headfp= NULL;
sdp->exclude = 0;
sdp->merge_fd = 0; /* Cope with SG_DEF_MERGE_FD on open */
/* Do the command and wait.. */
{
- struct semaphore sem = MUTEX_LOCKED;
+ DECLARE_MUTEX_LOCKED(sem);
SCpnt->request.sem = &sem;
spin_lock_irqsave(&io_request_lock, flags);
scsi_do_cmd (SCpnt,
if( !scsi_block_when_processing_errors(SDev) )
return -ENODEV;
{
- struct semaphore sem = MUTEX_LOCKED;
+ DECLARE_MUTEX_LOCKED(sem);
SCpnt->request.sem = &sem;
spin_lock_irqsave(&io_request_lock, flags);
scsi_do_cmd(SCpnt,
}
cmd[1] |= (SCpnt->lun << 5) & 0xe0;
- STp->sem = MUTEX_LOCKED;
+ init_MUTEX_LOCKED(&STp->sem);
SCpnt->use_sg = (bytes > (STp->buffer)->sg[0].length) ?
(STp->buffer)->use_sg : 0;
if (SCpnt->use_sg) {
if [ "$CONFIG_ARCH_ACORN" = "y" ]; then
bool 'Acorn VIDC support' CONFIG_FB_ACORN
fi
+ if [ "$CONFIG_ARCH_NETWINDER" = "y" ]; then
+ tristate 'Cyber2000 support' CONFIG_FB_CYBER2000
+ fi
if [ "$CONFIG_APOLLO" = "y" ]; then
define_bool CONFIG_FB_APOLLO y
fi
"$CONFIG_FB_VIRGE" = "y" -o "$CONFIG_FB_CYBER" = "y" -o \
"$CONFIG_FB_VALKYRIE" = "y" -o "$CONFIG_FB_PLATINUM" = "y" -o \
"$CONFIG_FB_IGA" = "y" -o "$CONFIG_FB_MATROX" = "y" -o \
- "$CONFIG_FB_CT65550" = "y" -o "$CONFIG_FB_PM2" = "y" -o \
- "$CONFIG_FB_SGIVW" = "y" ]; then
+ "$CONFIG_FB_CT65550" = "y" -o "$CONFIG_FB_PM2" = "y" -o \
+ "$CONFIG_FB_SGIVW" = "y" -o "$CONFIG_FB_CYBER2000" = "y" ]; then
define_bool CONFIG_FBCON_CFB8 y
else
if [ "$CONFIG_FB_ACORN" = "m" -o "$CONFIG_FB_ATARI" = "m" -o \
"$CONFIG_FB_VIRGE" = "m" -o "$CONFIG_FB_CYBER" = "m" -o \
"$CONFIG_FB_VALKYRIE" = "m" -o "$CONFIG_FB_PLATINUM" = "m" -o \
"$CONFIG_FB_IGA" = "m" -o "$CONFIG_FB_MATROX" = "m" -o \
- "$CONFIG_FB_CT65550" = "m" -o "$CONFIG_FB_PM2" = "m" -o \
- "$CONFIG_FB_SGIVW" = "m" ]; then
+ "$CONFIG_FB_CT65550" = "m" -o "$CONFIG_FB_PM2" = "m" -o \
+ "$CONFIG_FB_SGIVW" = "m" -o "$CONFIG_FB_CYBER2000" = "m" ]; then
define_bool CONFIG_FBCON_CFB8 m
fi
fi
"$CONFIG_FB_VIRGE" = "y" -o "$CONFIG_FB_CYBER" = "y" -o \
"$CONFIG_FB_VALKYRIE" = "y" -o "$CONFIG_FB_PLATINUM" = "y" -o \
"$CONFIG_FB_CT65550" = "y" -o "$CONFIG_FB_MATROX" = "y" -o \
- "$CONFIG_FB_PM2" = "y" -o "$CONFIG_FB_SGIVW" = "y" ]; then
+ "$CONFIG_FB_PM2" = "y" -o "$CONFIG_FB_SGIVW" = "y" -o \
+ "$CONFIG_FB_CYBER2000" = "y" ]; then
define_bool CONFIG_FBCON_CFB16 y
else
if [ "$CONFIG_FB_ATARI" = "m" -o "$CONFIG_FB_ATY" = "m" -o \
"$CONFIG_FB_CONTROL" = "m" -o "$CONFIG_FB_CLGEN" = "m" -o \
"$CONFIG_FB_VIRGE" = "m" -o "$CONFIG_FB_CYBER" = "m" -o \
"$CONFIG_FB_VALKYRIE" = "m" -o "$CONFIG_FB_PLATINUM" = "m" -o \
- "$CONFIG_FB_CT65550" = "m" -o "$CONFIG_FB_MATROX" = "m" -o \
- "$CONFIG_FB_PM2" = "m" -o "$CONFIG_FB_SGIVW" = "m" ]; then
+ "$CONFIG_FB_CT65550" = "m" -o "$CONFIG_FB_MATROX" = "m" -o \
+ "$CONFIG_FB_PM2" = "y" -o "$CONFIG_FB_SGIVW" = "m" -o \
+ "$CONFIG_FB_CYBER2000" = "m" ]; then
define_bool CONFIG_FBCON_CFB16 m
fi
fi
if [ "$CONFIG_FB_ATY" = "y" -o "$CONFIG_FB_VIRTUAL" = "y" -o \
"$CONFIG_FB_CLGEN" = "y" -o "$CONFIG_FB_VESA" = "y" -o \
- "$CONFIG_FB_MATROX" = "y" -o "$CONFIG_FB_PM2" = "y" ]; then
+ "$CONFIG_FB_MATROX" = "y" -o "$CONFIG_FB_PM2" = "y" -o \
+ "$CONFIG_FB_CYBER2000" = "y" ]; then
define_bool CONFIG_FBCON_CFB24 y
else
if [ "$CONFIG_FB_ATY" = "m" -o "$CONFIG_FB_VIRTUAL" = "m" -o \
"$CONFIG_FB_CLGEN" = "m" -o "$CONFIG_FB_VESA" = "m" -o \
- "$CONFIG_FB_MATROX" = "m" -o "$CONFIG_FB_PM2" = "m" ]; then
+ "$CONFIG_FB_MATROX" = "m" -o "$CONFIG_FB_PM2" = "m" -o \
+ "$CONFIG_FB_CYBER2000" = "m" ]; then
define_bool CONFIG_FBCON_CFB24 m
fi
fi
if [ "$CONFIG_AMIGA" = "y" ]; then
define_bool CONFIG_FONT_PEARL_8x8 y
fi
- if [ "$CONFIG_ARM" = "y" ]; then
+ if [ "$CONFIG_ARM" = "y" -a "$CONFIG_ARCH_ACORN" = "y" ]; then
define_bool CONFIG_FONT_ACORN_8x8 y
fi
fi
endif
endif
+ifeq ($(CONFIG_FB_CYBER2000),y)
+L_OBJS += cyber2000fb.o
+else
+ ifeq ($(CONFIG_FB_CYBER2000),m)
+ M_OBJS += cyber2000fb.o
+ endif
+endif
+
ifeq ($(CONFIG_FB_SGIVW),y)
L_OBJS += sgivwfb.o
else
/*
* linux/drivers/video/acorn.c
*
- * Copyright (C) 1998 Russell King
+ * Copyright (C) 1998,1999 Russell King
*
* Frame buffer code for Acorn platforms
+ *
+ * NOTE: Most of the modes with X!=640 will disappear shortly.
+ * NOTE: Startup setting of HS & VS polarity not supported.
+ * (do we need to support it if we're coming up in 640x480?)
*/
+
+#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/string.h>
+#include <linux/ctype.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/malloc.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
+#include <video/fbcon.h>
#include <video/fbcon-mfb.h>
#include <video/fbcon-cfb2.h>
#include <video/fbcon-cfb4.h>
#include <video/fbcon-cfb8.h>
+#include <video/fbcon-cfb16.h>
+
+/*
+ * Default resolution.
+ * NOTE that it has to be supported in the table towards
+ * the end of this file.
+ */
+#define DEFAULT_XRES 640
+#define DEFAULT_YRES 480
+
+/*
+ * define this to debug the video mode selection
+ */
+#undef DEBUG_MODE_SELECTION
+
+#if defined(HAS_VIDC20)
+#define VIDC_PALETTE_SIZE 256
+#define VIDC_NAME "VIDC20"
+#elif defined(HAS_VIDC)
+#include <asm/memc.h>
+#define VIDC_PALETTE_SIZE 16
+#define VIDC_NAME "VIDC"
+#endif
-#define MAX_VIDC20_PALETTE 256
-#define MAX_VIDC_PALETTE 16
+#define EXTEND8(x) ((x)|(x)<<8)
+#define EXTEND4(x) ((x)|(x)<<4|(x)<<8|(x)<<16)
+
+struct vidc20_palette {
+ u_int red:8;
+ u_int green:8;
+ u_int blue:8;
+ u_int ext:4;
+ u_int unused:4;
+};
+
+struct vidc_palette {
+ u_int red:4;
+ u_int green:4;
+ u_int blue:4;
+ u_int trans:1;
+ u_int sbz1:13;
+ u_int reg:4;
+ u_int sbz2:2;
+};
+
+union palette {
+ struct vidc20_palette vidc20;
+ struct vidc_palette vidc;
+ u_int p;
+};
struct acornfb_par {
- unsigned long screen_base;
- unsigned int xres;
- unsigned int yres;
- unsigned char bits_per_pixel;
- unsigned int palette_size;
+ unsigned long screen_base;
+ unsigned long screen_base_p;
+ unsigned long screen_end;
+ unsigned long screen_size;
+ unsigned int dram_size;
+ unsigned int vram_half_sam;
+ unsigned int palette_size;
+ signed int montype;
+ signed int currcon;
+ unsigned int allow_modeset : 1;
+ unsigned int using_vram : 1;
+ unsigned int dpms : 1;
+
+ union palette palette[VIDC_PALETTE_SIZE];
union {
- union {
- struct {
- unsigned long red:8;
- unsigned long green:8;
- unsigned long blue:8;
- unsigned long ext:4;
- unsigned long unused:4;
- } d;
- unsigned long p;
- } vidc20[MAX_VIDC20_PALETTE];
- union {
- struct {
- unsigned long red:4;
- unsigned long green:4;
- unsigned long blue:4;
- unsigned long trans:1;
- unsigned long unused:19;
- } d;
- unsigned long p;
- } vidc[MAX_VIDC_PALETTE];
- } palette;
+ unsigned short cfb16[16];
+ unsigned long cfb32[16];
+ } cmap;
+};
+
+/*
+ * Translation from RISC OS monitor types to actual
+ * HSYNC and VSYNC frequency ranges. These are
+ * probably not right...
+ */
+#define NR_MONTYPES 6
+static struct fb_monspecs monspecs[NR_MONTYPES] __initdata = {
+ { 15625, 15625, 50, 50, 0 }, /* TV */
+ { 0, 99999, 0, 99, 0 }, /* Multi Freq */
+ { 58608, 58608, 64, 64, 0 }, /* Hi-res mono */
+ { 30000, 70000, 60, 60, 0 }, /* VGA */
+ { 30000, 70000, 56, 75, 0 }, /* SVGA */
+ { 30000, 70000, 60, 60, 0 }
};
-static int currcon = 0;
-static struct display disp;
+static struct display global_disp;
static struct fb_info fb_info;
static struct acornfb_par current_par;
+static struct fb_var_screeninfo __initdata init_var = {};
+
+extern int acornfb_depth; /* set by setup.c */
+extern unsigned int vram_size; /* set by setup.c */
+
+
+static struct vidc_timing {
+ u_int h_cycle;
+ u_int h_sync_width;
+ u_int h_border_start;
+ u_int h_display_start;
+ u_int h_display_end;
+ u_int h_border_end;
+ u_int h_interlace;
+
+ u_int v_cycle;
+ u_int v_sync_width;
+ u_int v_border_start;
+ u_int v_display_start;
+ u_int v_display_end;
+ u_int v_border_end;
+
+ u_int control;
+
+ /* VIDC20 only */
+ u_int pll_ctl;
+} current_vidc;
+
+#ifdef HAS_VIDC
+
+#define VID_CTL_VS_NVSYNC (1 << 3)
+#define VID_CTL_HS_NHSYNC (1 << 2)
+#define VID_CTL_24MHz (0)
+#define VID_CTL_25MHz (1)
+#define VID_CTL_36MHz (2)
+
+#define VIDC_CTRL_INTERLACE (1 << 6)
+#define VIDC_CTRL_FIFO_0_4 (0 << 4)
+#define VIDC_CTRL_FIFO_1_5 (1 << 4)
+#define VIDC_CTRL_FIFO_2_6 (2 << 4)
+#define VIDC_CTRL_FIFO_3_7 (3 << 4)
+#define VIDC_CTRL_1BPP (0 << 2)
+#define VIDC_CTRL_2BPP (1 << 2)
+#define VIDC_CTRL_4BPP (2 << 2)
+#define VIDC_CTRL_8BPP (3 << 2)
+#define VIDC_CTRL_DIV3 (0 << 0)
+#define VIDC_CTRL_DIV2 (1 << 0)
+#define VIDC_CTRL_DIV1_5 (2 << 0)
+#define VIDC_CTRL_DIV1 (3 << 0)
+
+/* CTL VIDC Actual
+ * 24.000 0 8.000
+ * 25.175 0 8.392
+ * 36.000 0 12.000
+ * 24.000 1 12.000
+ * 25.175 1 12.588
+ * 24.000 2 16.000
+ * 25.175 2 16.783
+ * 36.000 1 18.000
+ * 24.000 3 24.000
+ * 36.000 2 24.000
+ * 25.175 3 25.175
+ * 36.000 3 36.000
+ */
+static struct pixclock {
+ u_long min_clock;
+ u_long max_clock;
+ u_int vidc_ctl;
+ u_int vid_ctl;
+} pixclocks[] = {
+ /* we allow +/-1% on these */
+ { 123750, 126250, VIDC_CTRL_DIV3, VID_CTL_24MHz }, /* 8.000MHz */
+ { 82500, 84167, VIDC_CTRL_DIV2, VID_CTL_24MHz }, /* 12.000MHz */
+ { 61875, 63125, VIDC_CTRL_DIV1_5, VID_CTL_24MHz }, /* 16.000MHz */
+ { 41250, 42083, VIDC_CTRL_DIV1, VID_CTL_24MHz }, /* 24.000MHz */
+#ifdef CONFIG_ARCH_A5K
+ { 117974, 120357, VIDC_CTRL_DIV3, VID_CTL_25MHz }, /* 8.392MHz */
+ { 78649, 80238, VIDC_CTRL_DIV2, VID_CTL_25MHz }, /* 12.588MHz */
+ { 58987, 60178, VIDC_CTRL_DIV1_5, VID_CTL_25MHz }, /* 16.588MHz */
+ { 55000, 56111, VIDC_CTRL_DIV2, VID_CTL_36MHz }, /* 18.000MHz */
+ { 39325, 40119, VIDC_CTRL_DIV1, VID_CTL_25MHz }, /* 25.175MHz */
+ { 27500, 28055, VIDC_CTRL_DIV1, VID_CTL_36MHz }, /* 36.000MHz */
+#endif
+ { 0, }
+};
+
+static struct pixclock *
+acornfb_valid_pixrate(u_long pixclock)
+{
+ u_int i;
+
+ for (i = 0; pixclocks[i].min_clock; i++)
+ if (pixclock > pixclocks[i].min_clock &&
+ pixclock < pixclocks[i].max_clock)
+ return pixclocks + i;
+
+ return NULL;
+}
+
+/* VIDC Rules:
+ * hcr : must be even (interlace, hcr/2 must be even)
+ * hswr : must be even
+ * hdsr : must be odd
+ * hder : must be odd
+ *
+ * vcr : must be odd
+ * vswr : >= 1
+ * vdsr : >= 1
+ * vder : >= vdsr
+ * if interlaced, then hcr/2 must be even
+ */
+static void
+acornfb_set_timing(struct fb_var_screeninfo *var)
+{
+ struct pixclock *pclk;
+ struct vidc_timing vidc;
+ u_int horiz_correction;
+ u_int sync_len, display_start, display_end, cycle;
+ u_int is_interlaced;
+ u_int vid_ctl, vidc_ctl;
+ u_int bandwidth;
+
+ memset(&vidc, 0, sizeof(vidc));
+
+ pclk = acornfb_valid_pixrate(var->pixclock);
+ vidc_ctl = pclk->vidc_ctl;
+ vid_ctl = pclk->vid_ctl;
+
+ bandwidth = var->pixclock * 8 / var->bits_per_pixel;
+ /* 25.175, 4bpp = 79.444ns per byte, 317.776ns per word: fifo = 2,6 */
+ if (bandwidth > 71750)
+ vidc_ctl |= VIDC_CTRL_FIFO_2_6;
+ else if (bandwidth > 35875)
+ vidc_ctl |= VIDC_CTRL_FIFO_1_5;
+ else
+ vidc_ctl |= VIDC_CTRL_FIFO_0_4;
+
+ switch (var->bits_per_pixel) {
+ case 1:
+ horiz_correction = 19;
+ vidc_ctl |= VIDC_CTRL_1BPP;
+ break;
+
+ case 2:
+ horiz_correction = 11;
+ vidc_ctl |= VIDC_CTRL_2BPP;
+ break;
+
+ case 4:
+ horiz_correction = 7;
+ vidc_ctl |= VIDC_CTRL_4BPP;
+ break;
+
+ default:
+ case 8:
+ horiz_correction = 5;
+ vidc_ctl |= VIDC_CTRL_8BPP;
+ break;
+ }
+
+ if (!(var->sync & FB_SYNC_HOR_HIGH_ACT))
+ vid_ctl |= VID_CTL_HS_NHSYNC;
+
+ if (!(var->sync & FB_SYNC_VERT_HIGH_ACT))
+ vid_ctl |= VID_CTL_VS_NVSYNC;
+
+ sync_len = var->hsync_len;
+ display_start = sync_len + var->left_margin;
+ display_end = display_start + var->xres;
+ cycle = display_end + var->right_margin;
+
+ /* if interlaced, then hcr/2 must be even */
+ is_interlaced = (var->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED;
+
+ if (is_interlaced) {
+ vidc_ctl |= VIDC_CTRL_INTERLACE;
+ if (cycle & 2) {
+ cycle += 2;
+ var->right_margin += 2;
+ }
+ }
+
+ vidc.h_cycle = (cycle - 2) / 2;
+ vidc.h_sync_width = (sync_len - 2) / 2;
+ vidc.h_border_start = (display_start - 1) / 2;
+ vidc.h_display_start = (display_start - horiz_correction) / 2;
+ vidc.h_display_end = (display_end - horiz_correction) / 2;
+ vidc.h_border_end = (display_end - 1) / 2;
+ vidc.h_interlace = (vidc.h_cycle + 1) / 2;
+
+ sync_len = var->vsync_len;
+ display_start = sync_len + var->upper_margin;
+ display_end = display_start + var->yres;
+ cycle = display_end + var->lower_margin;
+
+ if (is_interlaced)
+ cycle = (cycle - 3) / 2;
+ else
+ cycle = cycle - 1;
+
+ vidc.v_cycle = cycle;
+ vidc.v_sync_width = sync_len - 1;
+ vidc.v_border_start = display_start - 1;
+ vidc.v_display_start = vidc.v_border_start;
+ vidc.v_display_end = display_end - 1;
+ vidc.v_border_end = vidc.v_display_end;
+
+#ifdef CONFIG_ARCH_A5K
+ outb(vid_ctl, IOEB_VID_CTL);
+#endif
+ if (memcmp(¤t_vidc, &vidc, sizeof(vidc))) {
+ current_vidc = vidc;
+
+ outl(0xe0000000 | vidc_ctl, IO_VIDC_BASE);
+ outl(0x80000000 | (vidc.h_cycle << 14), IO_VIDC_BASE);
+ outl(0x84000000 | (vidc.h_sync_width << 14), IO_VIDC_BASE);
+ outl(0x88000000 | (vidc.h_border_start << 14), IO_VIDC_BASE);
+ outl(0x8c000000 | (vidc.h_display_start << 14), IO_VIDC_BASE);
+ outl(0x90000000 | (vidc.h_display_end << 14), IO_VIDC_BASE);
+ outl(0x94000000 | (vidc.h_border_end << 14), IO_VIDC_BASE);
+ outl(0x98000000, IO_VIDC_BASE);
+ outl(0x9c000000 | (vidc.h_interlace << 14), IO_VIDC_BASE);
+ outl(0xa0000000 | (vidc.v_cycle << 14), IO_VIDC_BASE);
+ outl(0xa4000000 | (vidc.v_sync_width << 14), IO_VIDC_BASE);
+ outl(0xa8000000 | (vidc.v_border_start << 14), IO_VIDC_BASE);
+ outl(0xac000000 | (vidc.v_display_start << 14), IO_VIDC_BASE);
+ outl(0xb0000000 | (vidc.v_display_end << 14), IO_VIDC_BASE);
+ outl(0xb4000000 | (vidc.v_border_end << 14), IO_VIDC_BASE);
+ outl(0xb8000000, IO_VIDC_BASE);
+ outl(0xbc000000, IO_VIDC_BASE);
+ }
+#ifdef DEBUG_MODE_SELECTION
+ printk(KERN_DEBUG "VIDC registers for %dx%dx%d:\n", var->xres,
+ var->yres, var->bits_per_pixel);
+ printk(KERN_DEBUG " H-cycle : %d\n", vidc.h_cycle);
+ printk(KERN_DEBUG " H-sync-width : %d\n", vidc.h_sync_width);
+ printk(KERN_DEBUG " H-border-start : %d\n", vidc.h_border_start);
+ printk(KERN_DEBUG " H-display-start : %d\n", vidc.h_display_start);
+ printk(KERN_DEBUG " H-display-end : %d\n", vidc.h_display_end);
+ printk(KERN_DEBUG " H-border-end : %d\n", vidc.h_border_end);
+ printk(KERN_DEBUG " H-interlace : %d\n", vidc.h_interlace);
+ printk(KERN_DEBUG " V-cycle : %d\n", vidc.v_cycle);
+ printk(KERN_DEBUG " V-sync-width : %d\n", vidc.v_sync_width);
+ printk(KERN_DEBUG " V-border-start : %d\n", vidc.v_border_start);
+ printk(KERN_DEBUG " V-display-start : %d\n", vidc.v_display_start);
+ printk(KERN_DEBUG " V-display-end : %d\n", vidc.v_display_end);
+ printk(KERN_DEBUG " V-border-end : %d\n", vidc.v_border_end);
+ printk(KERN_DEBUG " VIDC Ctrl (E) : 0x%08X\n", vidc_ctl);
+ printk(KERN_DEBUG " IOEB Ctrl : 0x%08X\n", vid_ctl);
+#endif
+}
+
+static inline void
+acornfb_palette_write(u_int regno, union palette pal)
+{
+ outl(pal.p, IO_VIDC_BASE);
+}
+
+static inline union palette
+acornfb_palette_encode(u_int regno, u_int red, u_int green, u_int blue,
+ u_int trans)
+{
+ union palette pal;
+
+ pal.p = 0;
+ pal.vidc.reg = regno;
+ pal.vidc.red = red >> 12;
+ pal.vidc.green = green >> 12;
+ pal.vidc.blue = blue >> 12;
+ return pal;
+}
+
+static void
+acornfb_palette_decode(u_int regno, u_int *red, u_int *green, u_int *blue,
+ u_int *trans)
+{
+ *red = EXTEND4(current_par.palette[regno].vidc.red);
+ *green = EXTEND4(current_par.palette[regno].vidc.green);
+ *blue = EXTEND4(current_par.palette[regno].vidc.blue);
+ *trans = current_par.palette[regno].vidc.trans ? -1 : 0;
+}
+#endif
+
+#ifdef HAS_VIDC20
+/*
+ * VIDC20 registers
+ */
+#define VIDC20_CTRL 0xe0000000
+#define VIDC20_CTRL_PIX_VCLK (0 << 0)
+#define VIDC20_CTRL_PIX_HCLK (1 << 0)
+#define VIDC20_CTRL_PIX_RCLK (2 << 0)
+#define VIDC20_CTRL_PIX_CK (0 << 2)
+#define VIDC20_CTRL_PIX_CK2 (1 << 2)
+#define VIDC20_CTRL_PIX_CK3 (2 << 2)
+#define VIDC20_CTRL_PIX_CK4 (3 << 2)
+#define VIDC20_CTRL_PIX_CK5 (4 << 2)
+#define VIDC20_CTRL_PIX_CK6 (5 << 2)
+#define VIDC20_CTRL_PIX_CK7 (6 << 2)
+#define VIDC20_CTRL_PIX_CK8 (7 << 2)
+#define VIDC20_CTRL_1BPP (0 << 5)
+#define VIDC20_CTRL_2BPP (1 << 5)
+#define VIDC20_CTRL_4BPP (2 << 5)
+#define VIDC20_CTRL_8BPP (3 << 5)
+#define VIDC20_CTRL_16BPP (4 << 5)
+#define VIDC20_CTRL_32BPP (6 << 5)
+#define VIDC20_CTRL_FIFO_NS (0 << 8)
+#define VIDC20_CTRL_FIFO_4 (1 << 8)
+#define VIDC20_CTRL_FIFO_8 (2 << 8)
+#define VIDC20_CTRL_FIFO_12 (3 << 8)
+#define VIDC20_CTRL_FIFO_16 (4 << 8)
+#define VIDC20_CTRL_FIFO_20 (5 << 8)
+#define VIDC20_CTRL_FIFO_24 (6 << 8)
+#define VIDC20_CTRL_FIFO_28 (7 << 8)
+#define VIDC20_CTRL_INT (1 << 12)
+#define VIDC20_CTRL_DUP (1 << 13)
+#define VIDC20_CTRL_PDOWN (1 << 14)
+
+#define VIDC20_ECTL 0xc0000000
+#define VIDC20_ECTL_REG(x) ((x) & 0xf3)
+#define VIDC20_ECTL_ECK (1 << 2)
+#define VIDC20_ECTL_REDPED (1 << 8)
+#define VIDC20_ECTL_GREENPED (1 << 9)
+#define VIDC20_ECTL_BLUEPED (1 << 10)
+#define VIDC20_ECTL_DAC (1 << 12)
+#define VIDC20_ECTL_LCDGS (1 << 13)
+#define VIDC20_ECTL_HRM (1 << 14)
+
+#define VIDC20_ECTL_HS_MASK (3 << 16)
+#define VIDC20_ECTL_HS_HSYNC (0 << 16)
+#define VIDC20_ECTL_HS_NHSYNC (1 << 16)
+#define VIDC20_ECTL_HS_CSYNC (2 << 16)
+#define VIDC20_ECTL_HS_NCSYNC (3 << 16)
+
+#define VIDC20_ECTL_VS_MASK (3 << 18)
+#define VIDC20_ECTL_VS_VSYNC (0 << 18)
+#define VIDC20_ECTL_VS_NVSYNC (1 << 18)
+#define VIDC20_ECTL_VS_CSYNC (2 << 18)
+#define VIDC20_ECTL_VS_NCSYNC (3 << 18)
+
+#define VIDC20_DCTL 0xf0000000
+/* 0-9 = number of words in scanline */
+#define VIDC20_DCTL_SNA (1 << 12)
+#define VIDC20_DCTL_HDIS (1 << 13)
+#define VIDC20_DCTL_BUS_NS (0 << 16)
+#define VIDC20_DCTL_BUS_D31_0 (1 << 16)
+#define VIDC20_DCTL_BUS_D63_32 (2 << 16)
+#define VIDC20_DCTL_BUS_D63_0 (3 << 16)
+#define VIDC20_DCTL_VRAM_DIS (0 << 18)
+#define VIDC20_DCTL_VRAM_PXCLK (1 << 18)
+#define VIDC20_DCTL_VRAM_PXCLK2 (2 << 18)
+#define VIDC20_DCTL_VRAM_PXCLK4 (3 << 18)
+
+#define acornfb_valid_pixrate(rate) (1)
+
+/*
+ * Try to find the best PLL parameters for the pixel clock.
+ * This algorithm seems to give best predictable results,
+ * and produces the same values as detailed in the VIDC20
+ * data sheet.
+ */
+static inline u_int
+acornfb_vidc20_find_pll(u_int pixclk)
+{
+ u_int r, best_r = 2, best_v = 2;
+ int best_d = 0x7fffffff;
+
+ for (r = 2; r <= 32; r++) {
+ u_int rr, v, p;
+ int d;
+
+ rr = 41667 * r;
+
+ v = (rr + pixclk / 2) / pixclk;
+
+ if (v > 32 || v < 2)
+ continue;
+
+ p = (rr + v / 2) / v;
+
+ d = pixclk - p;
+
+ if (d < 0)
+ d = -d;
+
+ if (d < best_d) {
+ best_d = d;
+ best_v = v - 1;
+ best_r = r - 1;
+ }
+
+ if (d == 0)
+ break;
+ }
+
+ return best_v << 8 | best_r;
+}
+
+static inline void
+acornfb_vidc20_find_rates(struct vidc_timing *vidc,
+ struct fb_var_screeninfo *var)
+{
+ u_int div, bandwidth;
+
+ /* Select pixel-clock divisor to keep PLL in range */
+ div = var->pixclock / 9090; /*9921*/
+
+ /* Limit divisor */
+ if (div == 0)
+ div = 1;
+ if (div > 8)
+ div = 8;
+
+ /* Encode divisor to VIDC20 setting */
+ switch (div) {
+ case 1: vidc->control |= VIDC20_CTRL_PIX_CK; break;
+ case 2: vidc->control |= VIDC20_CTRL_PIX_CK2; break;
+ case 3: vidc->control |= VIDC20_CTRL_PIX_CK3; break;
+ case 4: vidc->control |= VIDC20_CTRL_PIX_CK4; break;
+ case 5: vidc->control |= VIDC20_CTRL_PIX_CK5; break;
+ case 6: vidc->control |= VIDC20_CTRL_PIX_CK6; break;
+ case 7: vidc->control |= VIDC20_CTRL_PIX_CK7; break;
+ case 8: vidc->control |= VIDC20_CTRL_PIX_CK8; break;
+ }
+
+ /* Calculate bandwidth */
+ bandwidth = var->pixclock * 8 / var->bits_per_pixel;
+
+ /* Encode bandwidth as VIDC20 setting */
+ if (bandwidth > 16667*2)
+ vidc->control |= VIDC20_CTRL_FIFO_16;
+ else if (bandwidth > 13333*2)
+ vidc->control |= VIDC20_CTRL_FIFO_20;
+ else if (bandwidth > 11111*2)
+ vidc->control |= VIDC20_CTRL_FIFO_24;
+ else
+ vidc->control |= VIDC20_CTRL_FIFO_28;
+
+ /* Find the PLL values */
+ vidc->pll_ctl = acornfb_vidc20_find_pll(var->pixclock / div);
+}
+
+/* VIDC20 has a different set of rules from the VIDC:
+ * hcr : must be multiple of 4
+ * hswr : must be even
+ * hdsr : must be even
+ * hder : must be even
+ * vcr : >= 2, (interlace, must be odd)
+ * vswr : >= 1
+ * vdsr : >= 1
+ * vder : >= vdsr
+ */
+static void
+acornfb_set_timing(struct fb_var_screeninfo *var)
+{
+ struct vidc_timing vidc;
+ u_int vcr, fsize;
+ u_int ext_ctl, dat_ctl;
+ u_int words_per_line;
+
+ memset(&vidc, 0, sizeof(vidc));
+
+ vidc.h_sync_width = var->hsync_len - 8;
+ vidc.h_border_start = vidc.h_sync_width + var->left_margin + 8 - 12;
+ vidc.h_display_start = vidc.h_border_start + 12 - 18;
+ vidc.h_display_end = vidc.h_display_start + var->xres;
+ vidc.h_border_end = vidc.h_display_end + 18 - 12;
+ vidc.h_cycle = vidc.h_border_end + var->right_margin + 12 - 8;
+ vidc.h_interlace = vidc.h_cycle / 2;
+ vidc.v_sync_width = var->vsync_len - 1;
+ vidc.v_border_start = vidc.v_sync_width + var->upper_margin;
+ vidc.v_display_start = vidc.v_border_start;
+ vidc.v_display_end = vidc.v_display_start + var->yres;
+ vidc.v_border_end = vidc.v_display_end;
+ vidc.control = VIDC20_CTRL_PIX_VCLK;
+
+ vcr = var->vsync_len + var->upper_margin + var->yres +
+ var->lower_margin;
+
+ if ((var->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED) {
+ vidc.v_cycle = (vcr - 3) / 2;
+ vidc.control |= VIDC20_CTRL_INT;
+ } else
+ vidc.v_cycle = vcr - 2;
+
+ switch (var->bits_per_pixel) {
+ case 1: vidc.control |= VIDC20_CTRL_1BPP; break;
+ case 2: vidc.control |= VIDC20_CTRL_2BPP; break;
+ case 4: vidc.control |= VIDC20_CTRL_4BPP; break;
+ default:
+ case 8: vidc.control |= VIDC20_CTRL_8BPP; break;
+ case 16: vidc.control |= VIDC20_CTRL_16BPP; break;
+ case 32: vidc.control |= VIDC20_CTRL_32BPP; break;
+ }
+
+ acornfb_vidc20_find_rates(&vidc, var);
+ fsize = var->vsync_len + var->upper_margin + var->lower_margin - 1;
+
+ if (memcmp(¤t_vidc, &vidc, sizeof(vidc))) {
+ current_vidc = vidc;
+
+ outl(VIDC20_CTRL| vidc.control, IO_VIDC_BASE);
+ outl(0xd0000000 | vidc.pll_ctl, IO_VIDC_BASE);
+ outl(0x80000000 | vidc.h_cycle, IO_VIDC_BASE);
+ outl(0x81000000 | vidc.h_sync_width, IO_VIDC_BASE);
+ outl(0x82000000 | vidc.h_border_start, IO_VIDC_BASE);
+ outl(0x83000000 | vidc.h_display_start, IO_VIDC_BASE);
+ outl(0x84000000 | vidc.h_display_end, IO_VIDC_BASE);
+ outl(0x85000000 | vidc.h_border_end, IO_VIDC_BASE);
+ outl(0x86000000, IO_VIDC_BASE);
+ outl(0x87000000 | vidc.h_interlace, IO_VIDC_BASE);
+ outl(0x90000000 | vidc.v_cycle, IO_VIDC_BASE);
+ outl(0x91000000 | vidc.v_sync_width, IO_VIDC_BASE);
+ outl(0x92000000 | vidc.v_border_start, IO_VIDC_BASE);
+ outl(0x93000000 | vidc.v_display_start, IO_VIDC_BASE);
+ outl(0x94000000 | vidc.v_display_end, IO_VIDC_BASE);
+ outl(0x95000000 | vidc.v_border_end, IO_VIDC_BASE);
+ outl(0x96000000, IO_VIDC_BASE);
+ outl(0x97000000, IO_VIDC_BASE);
+ }
+
+ outl(fsize, IOMD_FSIZE);
+
+ ext_ctl = VIDC20_ECTL_DAC | VIDC20_ECTL_REG(3);
+
+ if (var->sync & FB_SYNC_HOR_HIGH_ACT)
+ ext_ctl |= VIDC20_ECTL_HS_HSYNC;
+ else
+ ext_ctl |= VIDC20_ECTL_HS_NHSYNC;
+
+ if (var->sync & FB_SYNC_VERT_HIGH_ACT)
+ ext_ctl |= VIDC20_ECTL_VS_VSYNC;
+ else
+ ext_ctl |= VIDC20_ECTL_VS_NVSYNC;
+
+ outl(VIDC20_ECTL | ext_ctl, IO_VIDC_BASE);
+
+ words_per_line = var->xres * var->bits_per_pixel / 32;
+
+ if (current_par.using_vram && current_par.screen_size == 2048*1024)
+ words_per_line /= 2;
+
+ /* RiscPC doesn't use the VIDC's VRAM control. */
+ dat_ctl = VIDC20_DCTL_VRAM_DIS | VIDC20_DCTL_SNA | words_per_line;
+
+ /* The data bus width is dependent on both the type
+ * and amount of video memory.
+ * DRAM 32bit low
+ * 1MB VRAM 32bit
+ * 2MB VRAM 64bit
+ */
+ if (current_par.using_vram && current_par.vram_half_sam == 2048) {
+ dat_ctl |= VIDC20_DCTL_BUS_D63_0;
+ } else
+ dat_ctl |= VIDC20_DCTL_BUS_D31_0;
+
+ outl(VIDC20_DCTL | dat_ctl, IO_VIDC_BASE);
+
+#ifdef DEBUG_MODE_SELECTION
+ printk(KERN_DEBUG "VIDC registers for %dx%dx%d:\n", var->xres,
+ var->yres, var->bits_per_pixel);
+ printk(KERN_DEBUG " H-cycle : %d\n", vidc.h_cycle);
+ printk(KERN_DEBUG " H-sync-width : %d\n", vidc.h_sync_width);
+ printk(KERN_DEBUG " H-border-start : %d\n", vidc.h_border_start);
+ printk(KERN_DEBUG " H-display-start : %d\n", vidc.h_display_start);
+ printk(KERN_DEBUG " H-display-end : %d\n", vidc.h_display_end);
+ printk(KERN_DEBUG " H-border-end : %d\n", vidc.h_border_end);
+ printk(KERN_DEBUG " H-interlace : %d\n", vidc.h_interlace);
+ printk(KERN_DEBUG " V-cycle : %d\n", vidc.v_cycle);
+ printk(KERN_DEBUG " V-sync-width : %d\n", vidc.v_sync_width);
+ printk(KERN_DEBUG " V-border-start : %d\n", vidc.v_border_start);
+ printk(KERN_DEBUG " V-display-start : %d\n", vidc.v_display_start);
+ printk(KERN_DEBUG " V-display-end : %d\n", vidc.v_display_end);
+ printk(KERN_DEBUG " V-border-end : %d\n", vidc.v_border_end);
+ printk(KERN_DEBUG " Ext Ctrl (C) : 0x%08X\n", ext_ctl);
+ printk(KERN_DEBUG " PLL Ctrl (D) : 0x%08X\n", vidc.pll_ctl);
+ printk(KERN_DEBUG " Ctrl (E) : 0x%08X\n", vidc.control);
+ printk(KERN_DEBUG " Data Ctrl (F) : 0x%08X\n", dat_ctl);
+ printk(KERN_DEBUG " Fsize : 0x%08X\n", fsize);
+#endif
+}
+
+static inline void
+acornfb_palette_write(u_int regno, union palette pal)
+{
+ outl(0x10000000 | regno, IO_VIDC_BASE);
+ outl(pal.p, IO_VIDC_BASE);
+}
+
+static inline union palette
+acornfb_palette_encode(u_int regno, u_int red, u_int green, u_int blue,
+ u_int trans)
+{
+ union palette pal;
+
+ pal.p = 0;
+ pal.vidc20.red = red >> 8;
+ pal.vidc20.green = green >> 8;
+ pal.vidc20.blue = blue >> 8;
+ return pal;
+}
+
+static void
+acornfb_palette_decode(u_int regno, u_int *red, u_int *green, u_int *blue,
+ u_int *trans)
+{
+ *red = EXTEND8(current_par.palette[regno].vidc20.red);
+ *green = EXTEND8(current_par.palette[regno].vidc20.green);
+ *blue = EXTEND8(current_par.palette[regno].vidc20.blue);
+ *trans = EXTEND4(current_par.palette[regno].vidc20.ext);
+}
+#endif
+
+/*
+ * Before selecting the timing parameters, adjust
+ * the resolution to fit the rules.
+ */
+static void
+acornfb_pre_adjust_timing(struct fb_var_screeninfo *var, int con)
+{
+ u_int font_line_len;
+ u_int fontht;
+ u_int sam_size, min_size, size;
+ u_int nr_y;
+
+ /* xres must be even */
+ var->xres = (var->xres + 1) & ~1;
+
+ /*
+ * We don't allow xres_virtual to differ from xres
+ */
+ var->xres_virtual = var->xres;
+ var->xoffset = 0;
+
+ /*
+ * Find the font height
+ */
+ if (con == -1)
+ fontht = fontheight(&global_disp);
+ else
+ fontht = fontheight(fb_display + con);
+
+ if (fontht == 0)
+ fontht = 8;
+
+ if (current_par.using_vram)
+ sam_size = current_par.vram_half_sam * 2;
+ else
+ sam_size = 16;
+
+ /*
+ * Now, find a value for yres_virtual which allows
+ * us to do ywrap scrolling. The value of
+ * yres_virtual must be such that the end of the
+ * displayable frame buffer must be aligned with
+ * the start of a font line.
+ */
+ font_line_len = var->xres * var->bits_per_pixel * fontht / 8;
+ min_size = var->xres * var->yres * var->bits_per_pixel / 8;
+
+ /* Find int 'y', such that y * fll == s * sam < maxsize
+ * y = s * sam / fll; s = maxsize / sam
+ */
+ for (size = current_par.screen_size; min_size <= size;
+ size -= sam_size) {
+ nr_y = size / font_line_len;
+
+ if (nr_y * font_line_len == size)
+ break;
+ }
+
+ if (min_size > size) {
+ /*
+ * failed, use ypan
+ */
+ size = current_par.screen_size;
+ var->yres_virtual = size / (font_line_len / fontht);
+ } else
+ var->yres_virtual = nr_y * fontht;
+
+ current_par.screen_end = current_par.screen_base_p + size;
+
+ /*
+ * Fix yres & yoffset if needed.
+ */
+ if (var->yres > var->yres_virtual)
+ var->yres = var->yres_virtual;
+
+ if (var->vmode & FB_VMODE_YWRAP) {
+ if (var->yoffset > var->yres_virtual)
+ var->yoffset = var->yres_virtual;
+ } else {
+ if (var->yoffset + var->yres > var->yres_virtual)
+ var->yoffset = var->yres_virtual - var->yres;
+ }
+}
+
+/*
+ * After selecting the timing parameters, adjust
+ * the timing to suit the chip.
+ * NOTE! Only minor adjustments should be made here.
+ */
+static void
+acornfb_post_adjust_timing(struct fb_var_screeninfo *var)
+{
+ /* hsync_len must be even */
+ var->hsync_len = (var->hsync_len + 1) & ~1;
+
+#ifdef HAS_VIDC
+ /* left_margin must be odd */
+ if ((var->left_margin & 1) == 0) {
+ var->left_margin -= 1;
+ var->right_margin += 1;
+ }
+
+ /* right_margin must be odd */
+ var->right_margin |= 1;
+#elif defined(HAS_VIDC20)
+ /* left_margin must be even */
+ if (var->left_margin & 1) {
+ var->left_margin += 1;
+ var->right_margin -= 1;
+ }
+
+ /* right_margin must be even */
+ if (var->right_margin & 1)
+ var->right_margin += 1;
+#endif
+
+ if (var->vsync_len < 1)
+ var->vsync_len = 1;
+}
+
+static inline void
+acornfb_update_dma(struct fb_var_screeninfo *var)
+{
+ int off = (var->yoffset * var->xres_virtual *
+ var->bits_per_pixel) >> 3;
+
+#if defined(HAS_MEMC)
+ memc_write(VDMA_INIT, off >> 2);
+#elif defined(HAS_IOMD)
+ outl(current_par.screen_base_p + off, IOMD_VIDINIT);
+#endif
+}
static int
acornfb_open(struct fb_info *info, int user)
return 0;
}
-static int
-acornfb_release(struct fb_info *info, int user)
-{
- MOD_DEC_USE_COUNT;
- return 0;
-}
+static int
+acornfb_release(struct fb_info *info, int user)
+{
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+static int
+acornfb_getcolreg(u_int regno, u_int *red, u_int *green, u_int *blue,
+ u_int *trans, struct fb_info *info)
+{
+ if (regno >= current_par.palette_size)
+ return 1;
+
+ acornfb_palette_decode(regno, red, green, blue, trans);
+
+ return 0;
+}
+
+static int
+acornfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
+ u_int trans, struct fb_info *info)
+{
+ union palette pal;
+
+ if (regno >= current_par.palette_size)
+ return 1;
+
+ pal = acornfb_palette_encode(regno, red, green, blue, trans);
+ acornfb_palette_write(regno, pal);
+ current_par.palette[regno] = pal;
+
+ if (regno < 16) {
+ switch (info->disp->var.bits_per_pixel) {
+#ifdef FBCON_HAS_CFB16
+ case 16: /* RGB555 */
+ current_par.cmap.cfb16[regno] = (regno << 10) | (regno << 5) | regno;
+ break;
+#endif
+
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+acornfb_get_cmap(struct fb_cmap *cmap, int kspc, int con,
+ struct fb_info *info)
+{
+ int err = 0;
+
+ if (con == current_par.currcon)
+ err = fb_get_cmap(cmap, kspc, acornfb_getcolreg, info);
+ else if (fb_display[con].cmap.len)
+ fb_copy_cmap(&fb_display[con].cmap, cmap, kspc ? 0 : 2);
+ else
+ fb_copy_cmap(fb_default_cmap(current_par.palette_size),
+ cmap, kspc ? 0 : 2);
+ return err;
+}
+
+static int
+acornfb_set_cmap(struct fb_cmap *cmap, int kspc, int con,
+ struct fb_info *info)
+{
+ int err = 0;
+
+ if (!fb_display[con].cmap.len)
+ err = fb_alloc_cmap(&fb_display[con].cmap,
+ current_par.palette_size, 0);
+ if (!err) {
+ if (con == current_par.currcon)
+ err = fb_set_cmap(cmap, kspc, acornfb_setcolreg,
+ info);
+ else
+ fb_copy_cmap(cmap, &fb_display[con].cmap,
+ kspc ? 0 : 1);
+ }
+ return err;
+}
+
+static int
+acornfb_decode_var(struct fb_var_screeninfo *var, int con, int *visual)
+{
+ switch (var->bits_per_pixel) {
+#ifdef FBCON_HAS_MFB
+ case 1:
+ *visual = FB_VISUAL_MONO10;
+ break;
+#endif
+#ifdef FBCON_HAS_CFB8
+ case 8:
+#ifdef HAS_VIDC
+ *visual = FB_VISUAL_STATIC_PSEUDOCOLOR;
+#else
+ *visual = FB_VISUAL_PSEUDOCOLOR;
+#endif
+ break;
+#endif
+#ifdef FBCON_HAS_CFB4
+ case 4:
+ *visual = FB_VISUAL_PSEUDOCOLOR;
+ break;
+#endif
+#ifdef FBCON_HAS_CFB2
+ case 2:
+ *visual = FB_VISUAL_PSEUDOCOLOR;
+ break;
+#endif
+ case 16:
+ case 24:
+ case 32:
+ *visual = FB_VISUAL_TRUECOLOR;
+ default:
+ return -EINVAL;
+ }
+
+ if (!acornfb_valid_pixrate(var->pixclock))
+ return -EINVAL;
+
+ /*
+ * Adjust the resolution before using it.
+ */
+ acornfb_pre_adjust_timing(var, con);
+
+#if defined(HAS_VIDC20)
+ var->red.length = 8;
+ var->transp.length = 4;
+#elif defined(HAS_VIDC)
+ var->red.length = 4;
+ var->transp.length = 1;
+#endif
+ var->green = var->red;
+ var->blue = var->red;
+
+ /*
+ * Now adjust the timing parameters
+ */
+ acornfb_post_adjust_timing(var);
-static void
-acornfb_encode_var(struct fb_var_screeninfo *var, struct acornfb_par *par)
-{
- var->xres = par->xres;
- var->yres = par->yres;
- var->xres_virtual = par->xres;
- var->yres_virtual = par->yres;
- var->xoffset = 0;
- var->yoffset = 0;
- var->bits_per_pixel = par->bits_per_pixel;
- var->grayscale = 0;
- var->red.offset = 0;
- var->red.length = 8;
- var->red.msb_right = 0;
- var->green.offset = 0;
- var->green.length = 8;
- var->green.msb_right = 0;
- var->blue.offset = 0;
- var->blue.length = 8;
- var->blue.msb_right = 0;
- var->transp.offset = 0;
- var->transp.length = 4;
- var->transp.msb_right = 0;
- var->nonstd = 0;
- var->activate = FB_ACTIVATE_NOW;
- var->height = -1;
- var->width = -1;
- var->vmode = FB_VMODE_NONINTERLACED;
- var->pixclock = 1;
- var->sync = 0;
- var->left_margin = 0;
- var->right_margin = 0;
- var->upper_margin = 0;
- var->lower_margin = 0;
- var->hsync_len = 0;
- var->vsync_len = 0;
+ return 0;
}
static int
acornfb_get_fix(struct fb_fix_screeninfo *fix, int con, struct fb_info *info)
{
- struct acornfb_par *par = ¤t_par;
- unsigned int line_length;
+ struct display *display;
memset(fix, 0, sizeof(struct fb_fix_screeninfo));
strcpy(fix->id, "Acorn");
- line_length = par->xres * par->bits_per_pixel / 8;
+ if (con >= 0)
+ display = fb_display + con;
+ else
+ display = &global_disp;
- fix->smem_start = (char *)SCREEN2_BASE;
- fix->smem_len = (((line_length * par->yres) - 1) | (PAGE_SIZE - 1)) + 1;
- fix->type = FB_TYPE_PACKED_PIXELS;
- fix->type_aux = 0;
- fix->visual = FB_VISUAL_PSEUDOCOLOR;
+ fix->smem_start = (char *)current_par.screen_base_p;
+ fix->smem_len = current_par.screen_size;
+ fix->type = display->type;
+ fix->type_aux = display->type_aux;
fix->xpanstep = 0;
- fix->ypanstep = 0;
- fix->ywrapstep = 1;
- fix->line_length = line_length;
+ fix->ypanstep = display->ypanstep;
+ fix->ywrapstep = display->ywrapstep;
+ fix->visual = display->visual;
+ fix->line_length = display->line_length;
fix->accel = FB_ACCEL_NONE;
return 0;
acornfb_get_var(struct fb_var_screeninfo *var, int con, struct fb_info *info)
{
if (con == -1) {
- acornfb_encode_var(var, ¤t_par);
+ *var = global_disp.var;
} else
*var = fb_display[con].var;
+
return 0;
}
static int
acornfb_set_var(struct fb_var_screeninfo *var, int con, struct fb_info *info)
{
- return 0;
-}
-
-static void
-acornfb_set_disp(int con)
-{
- struct fb_fix_screeninfo fix;
struct display *display;
+ int err, chgvar = 0, visual;
if (con >= 0)
- display = &fb_display[con];
+ display = fb_display + con;
else
- display = &disp;
-
- current_par.xres = 8 * ORIG_VIDEO_COLS;
- current_par.yres = 8 * ORIG_VIDEO_LINES;
- current_par.bits_per_pixel = 8;
- current_par.palette_size = MAX_VIDC20_PALETTE;
-
- acornfb_get_fix(&fix, con, 0);
-
- acornfb_get_var(&display->var, con, 0);
-
- display->cmap.start = 0;
- display->cmap.len = 0;
- display->cmap.red = NULL;
- display->cmap.green = NULL;
- display->cmap.blue = NULL;
- display->cmap.transp = NULL;
- display->screen_base = fix.smem_start;
- display->visual = fix.visual;
- display->type = fix.type;
- display->type_aux = fix.type_aux;
- display->ypanstep = fix.ypanstep;
- display->ywrapstep = fix.ywrapstep;
- display->line_length = fix.line_length;
- display->can_soft_blank = 0;
- display->inverse = 0;
+ display = &global_disp;
+
+ if (!current_par.allow_modeset && con != -1)
+ return -EINVAL;
+
+ err = acornfb_decode_var(var, con, &visual);
+ if (err)
+ return err;
+
+ switch (var->activate & FB_ACTIVATE_MASK) {
+ case FB_ACTIVATE_TEST:
+ return 0;
+
+ case FB_ACTIVATE_NXTOPEN:
+ case FB_ACTIVATE_NOW:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (con >= 0) {
+ if (display->var.xres != var->xres)
+ chgvar = 1;
+ if (display->var.yres != var->yres)
+ chgvar = 1;
+ if (display->var.xres_virtual != var->xres_virtual)
+ chgvar = 1;
+ if (display->var.yres_virtual != var->yres_virtual)
+ chgvar = 1;
+ if (memcmp(&display->var.red, &var->red, sizeof(var->red)))
+ chgvar = 1;
+ if (memcmp(&display->var.green, &var->green, sizeof(var->green)))
+ chgvar = 1;
+ if (memcmp(&display->var.blue, &var->blue, sizeof(var->blue)))
+ chgvar = 1;
+ }
+
+ display->var = *var;
+ display->var.activate &= ~FB_ACTIVATE_ALL;
- outl(SCREEN_START, VDMA_START);
- outl(SCREEN_START + fix.smem_len - VDMA_XFERSIZE, VDMA_END);
- outl(SCREEN_START, VDMA_INIT);
+ if (var->activate & FB_ACTIVATE_ALL)
+ global_disp.var = display->var;
+
+ display->screen_base = (char *)current_par.screen_base;
+ display->visual = visual;
+ display->type = FB_TYPE_PACKED_PIXELS;
+ display->type_aux = 0;
+ display->ypanstep = 1;
+ display->ywrapstep = 1;
+ display->line_length =
+ display->next_line = (var->xres * var->bits_per_pixel) / 8;
+ display->can_soft_blank = visual == FB_VISUAL_PSEUDOCOLOR ? 1 : 0;
+ display->inverse = 0;
switch (display->var.bits_per_pixel) {
#ifdef FBCON_HAS_MFB
case 1:
+ current_par.palette_size = 2;
display->dispsw = &fbcon_mfb;
break;
#endif
#ifdef FBCON_HAS_CFB2
case 2:
+ current_par.palette_size = 4;
display->dispsw = &fbcon_cfb2;
break;
#endif
#ifdef FBCON_HAS_CFB4
case 4:
+ current_par.palette_size = 16;
display->dispsw = &fbcon_cfb4;
break;
#endif
#ifdef FBCON_HAS_CFB8
case 8:
+ current_par.palette_size = VIDC_PALETTE_SIZE;
display->dispsw = &fbcon_cfb8;
break;
+#endif
+#ifdef FBCON_HAS_CFB16
+ case 16:
+ current_par.palette_size = VIDC_PALETTE_SIZE;
+ display->dispsw = &fbcon_cfb16;
+ display->dispsw_data = current_par.cmap.cfb16;
+ break;
#endif
default:
display->dispsw = &fbcon_dummy;
break;
}
-}
-
-static int
-acornfb_vidc20_getcolreg(u_int regno, u_int *red, u_int *green, u_int *blue, u_int *trans, struct fb_info *info)
-{
- int t;
- if (regno >= current_par.palette_size)
- return 1;
- t = current_par.palette.vidc20[regno].d.red;
- *red = (t << 8) | t;
- t = current_par.palette.vidc20[regno].d.green;
- *green = (t << 8) | t;
- t = current_par.palette.vidc20[regno].d.blue;
- *blue = (t << 8) | t;
- t = current_par.palette.vidc20[regno].d.ext;
- t |= t << 4;
- *transp = (t << 8) | t;
- return 0;
-}
+ if (chgvar && info && info->changevar)
+ info->changevar(con);
-static int
-acornfb_vidc20_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int trans, struct fb_info *info)
-{
- if (regno >= current_par.palette_size)
- return 1;
+ if (con == current_par.currcon) {
+ struct fb_cmap *cmap;
+ unsigned long start, size;
+ int control;
- red >>= 8;
- green >>= 8;
- blue >>= 8;
- current_par.palette.vidc20[regno].p = 0;
- current_par.palette.vidc20[regno].d.red = red;
- current_par.palette.vidc20[regno].d.green = green;
- current_par.palette.vidc20[regno].d.blue = blue;
+#if defined(HAS_MEMC)
+ start = 0;
+ size = current_par.screen_size - VDMA_XFERSIZE;
+ control = 0;
- outl(0x10000000 | regno, VIDC_BASE);
- outl(current_par.palette.vidc20[regno].p, VIDC_BASE);
+ memc_write(VDMA_START, start);
+ memc_write(VDMA_END, size >> 2);
+#elif defined(HAS_IOMD)
- return 0;
-}
+ start = current_par.screen_base_p;
+ size = current_par.screen_end;
-static int
-acornfb_get_cmap(struct fb_cmap *cmap, int kspc, int con,
- struct fb_info *info)
-{
- int err = 0;
+ if (current_par.using_vram) {
+ size -= current_par.vram_half_sam;
+ control = DMA_CR_E | (current_par.vram_half_sam / 256);
+ } else {
+ size -= 16;
+ control = DMA_CR_E | DMA_CR_D | 16;
+ }
- if (con == currcon)
- err = fb_get_cmap(cmap, kspc, acornfb_vidc20_getcolreg, info);
- else if (fb_display[con].cmap.len)
- fb_copy_cmap(&fb_display[con].cmap, cmap, kspc ? 0 : 2);
- else
- fb_copy_cmap(fb_default_cmap(current_par.palette_size),
- cmap, kspc ? 0 : 2);
- return err;
-}
+ outl(start, IOMD_VIDSTART);
+ outl(size, IOMD_VIDEND);
+ outl(control, IOMD_VIDCR);
+#endif
+ acornfb_update_dma(var);
-static int
-acornfb_set_cmap(struct fb_cmap *cmap, int kspc, int con,
- struct fb_info *info)
-{
- int err = 0;
+ if (current_par.allow_modeset)
+ acornfb_set_timing(var);
- if (!fb_display[con].cmap.len)
- err = fb_alloc_cmap(&fb_display[con].cmap,
- current_par.palette_size, 0);
- if (!err) {
- if (con == currcon)
- err = fb_set_cmap(cmap, kspc, acornfb_vidc20_setcolreg,
- info);
+ if (display->cmap.len)
+ cmap = &display->cmap;
else
- fb_copy_cmap(cmap, &fb_display[con].cmap,
- kspc ? 0 : 1);
+ cmap = fb_default_cmap(current_par.palette_size);
+
+ fb_set_cmap(cmap, 1, acornfb_setcolreg, info);
}
- return err;
+ return 0;
}
static int
acornfb_pan_display(struct fb_var_screeninfo *var, int con,
struct fb_info *info)
{
- if (var->xoffset || var->yoffset)
+ u_int y_bottom;
+
+ if (var->xoffset)
+ return -EINVAL;
+
+ y_bottom = var->yoffset;
+
+ if (!(var->vmode & FB_VMODE_YWRAP))
+ y_bottom += var->yres;
+
+ if (y_bottom > fb_display[con].var.yres_virtual)
return -EINVAL;
+
+ acornfb_update_dma(var);
+
+ fb_display[con].var.yoffset = var->yoffset;
+ if (var->vmode & FB_VMODE_YWRAP)
+ fb_display[con].var.vmode |= FB_VMODE_YWRAP;
else
- return 0;
+ fb_display[con].var.vmode &= ~FB_VMODE_YWRAP;
+
+ return 0;
}
static int
acornfb_ioctl
};
-void
-acornfb_setup(char *options, int *ints)
+static int
+acornfb_updatevar(int con, struct fb_info *info)
{
+ if (con == current_par.currcon)
+ acornfb_update_dma(&fb_display[con].var);
+
+ return 0;
}
static int
-acornfb_update_var(int con, struct fb_info *info)
+acornfb_switch(int con, struct fb_info *info)
{
- if (con == currcon) {
- int off = fb_display[con].var.yoffset *
- fb_display[con].var.xres_virtual *
- fb_display[con].var.bits_per_pixel >> 3;
- unsigned long base;
+ struct fb_cmap *cmap;
- base = current_par.screen_base = SCREEN_START + off;
+ if (current_par.currcon >= 0) {
+ cmap = &fb_display[current_par.currcon].cmap;
- outl (SCREEN_START + base, VDMA_INIT);
+ if (cmap->len)
+ fb_get_cmap(cmap, 1, acornfb_getcolreg, info);
}
- return 0;
-}
+ current_par.currcon = con;
+
+ fb_display[con].var.activate = FB_ACTIVATE_NOW;
+
+ acornfb_set_var(&fb_display[con].var, con, info);
-static int
-acornfb_switch(int con, struct fb_info *info)
-{
- currcon = con;
- acornfb_update_var(con, info);
return 0;
}
static void
acornfb_blank(int blank, struct fb_info *info)
{
+ int i;
+
+ if (blank)
+ for (i = 0; i < current_par.palette_size; i++) {
+ union palette p;
+
+ p = acornfb_palette_encode(i, 0, 0, 0, 0);
+
+ acornfb_palette_write(i, p);
+ }
+ else
+ for (i = 0; i < current_par.palette_size; i++)
+ acornfb_palette_write(i, current_par.palette[i]);
+}
+
+/*
+ * Everything after here is initialisation!!!
+ */
+struct modey_params {
+ u_int y_res;
+ u_int u_margin;
+ u_int b_margin;
+ u_int vsync_len;
+ u_int vf;
+};
+
+struct modex_params {
+ u_int x_res;
+ u_int l_margin;
+ u_int r_margin;
+ u_int hsync_len;
+ u_int clock;
+ u_int hf;
+ const struct modey_params *modey;
+};
+
+static const struct modey_params modey_640_15600[] __initdata = {
+ { 250, 38, 21, 3, 50 }, /* 640x 250, 50Hz */
+ { 256, 35, 18, 3, 50 }, /* 640x 256, 50Hz */
+ { 0, 0, 0, 0, 0 }
+};
+
+static const struct modey_params modey_640_26800[] __initdata = {
+ { 512, 18, 1, 3, 50 }, /* 640x 512, 50Hz */
+ { 0, 0, 0, 0, 0 }
+};
+
+static const struct modey_params modey_640_31500[] __initdata = {
+ { 250, 109, 88, 2, 70 }, /* 640x 250, 70Hz */
+ { 256, 106, 85, 2, 70 }, /* 640x 256, 70Hz */
+ { 352, 58, 37, 2, 70 }, /* 640x 352, 70Hz */
+ { 480, 32, 11, 2, 60 }, /* 640x 480, 60Hz */
+ { 0, 0, 0, 0, 0 }
+};
+
+static const struct modey_params modey_800_35200[] __initdata = {
+ { 600, 22, 1, 2, 56 }, /* 800x 600, 56Hz */
+ { 0, 0, 0, 0, 0 }
+};
+
+static const struct modey_params modey_896_21800[] __initdata = {
+ { 352, 9, 0, 3, 60 }, /* 896x 352, 60Hz */
+ { 0, 0, 0, 0, 0 }
+};
+
+/* everything after here is not supported */
+static const struct modey_params modey_1024_uk[] __initdata = {
+ { 768, 0, 0, 0, 0 }, /* 1024x 768 */
+ { 0, 0, 0, 0, 0 }
+};
+
+static const struct modey_params modey_1056_uk[] __initdata = {
+ { 250, 0, 0, 0, 0 }, /* 1056x 250 */
+ { 256, 0, 0, 0, 0 }, /* 1056x 256 */
+ { 0, 0, 0, 0, 0 }
+};
+
+static const struct modey_params modey_1152_uk[] __initdata = {
+ { 896, 0, 0, 0, 0 }, /* 1152x 896 */
+ { 0, 0, 0, 0, 0 }
+};
+
+static const struct modey_params modey_1280_63600[] __initdata = {
+ { 1024, 0, 0, 0, 60 }, /* 1280x1024, 60Hz */
+ { 0, 0, 0, 0, 0 }
+};
+
+static const struct modey_params modey_1600_uk[] __initdata = {
+ { 1280, 0, 0, 0, 0 }, /* 1600x1280 */
+ { 0, 0, 0, 0, 0 }
+};
+
+/*
+ * Horizontal video programming requirements.
+ * This table is searched for the required horizontal
+ * and required frequency, and then the tables above
+ * are then searched for the required vertical
+ * resolution.
+ *
+ * NOTE! we can match multiple entries, so we search
+ * all horizontal entries for which the hfreq is within
+ * the monitor's range.
+ */
+static const struct modex_params modex_params[] __initdata = {
+ { /* X: 640, 15.6kHz */
+ 640, 185, 123, 76, 16000, 15625, modey_640_15600
+ },
+ { /* X: 640, 26.8kHz */
+ 640, 113, 87, 56, 24000, 26800, modey_640_26800
+ },
+ { /* X: 640, 31.5kHz */
+ 640, 48, 16, 96, 25175, 31500, modey_640_31500
+ },
+ { /* X: 800, 35.2kHz */
+ 800, 101, 23, 100, 36000, 35200, modey_800_35200
+ },
+ { /* X: 896, 21.8kHz */
+ 896, 59, 27, 118, 24000, 21800, modey_896_21800
+ },
+ { /* X: 1024 */
+ 1024, 0, 0, 0, 0, 0, modey_1024_uk
+ },
+ { /* X: 1056 */
+ 1056, 0, 0, 0, 0, 0, modey_1056_uk
+ },
+ { /* X: 1152 */
+ 1152, 0, 0, 0, 0, 0, modey_1152_uk
+ },
+ { /* X: 1280, 63.6kHz */
+ 1280, 0, 0, 0, 0, 63600, modey_1280_63600
+ },
+ { /* X: 1600 */
+ 1600, 0, 0, 0, 0, 0, modey_1600_uk
+ },
+ {
+ 0,
+ }
+};
+
+__initfunc(static int
+acornfb_lookup_timing(struct fb_var_screeninfo *var))
+{
+ const struct modex_params *x;
+ const struct modey_params *y;
+
+ /*
+ * We must adjust the resolution parameters
+ * before selecting the timing parameters.
+ */
+ acornfb_pre_adjust_timing(var, -1);
+
+ for (x = modex_params; x->x_res; x++) {
+
+ /*
+ * Is this resolution one we're looking for?
+ */
+ if (x->x_res != var->xres)
+ continue;
+
+ /*
+ * Is the hsync frequency ok for our monitor?
+ */
+ if (x->hf > fb_info.monspecs.hfmax ||
+ x->hf < fb_info.monspecs.hfmin)
+ continue;
+
+ /*
+ * Try to find a vertical resolution
+ */
+ for (y = x->modey; y->y_res; y++) {
+ /*
+ * Is this resolution one we're looking for?
+ */
+ if (y->y_res != var->yres)
+ continue;
+
+ /*
+ * Is the vsync frequency ok for our monitor?
+ */
+ if (y->vf > fb_info.monspecs.vfmax ||
+ y->vf < fb_info.monspecs.vfmin)
+ continue;
+
+ goto found;
+ }
+ }
+
+ var->pixclock = 0;
+
+ return -EINVAL;
+
+found:
+ /*
+ * Why is pixclock in picoseconds?
+ */
+ switch (x->clock) {
+ case 36000: var->pixclock = 27778; break;
+ case 25175: var->pixclock = 39722; break;
+ case 24000: var->pixclock = 41667; break;
+ case 16000: var->pixclock = 62500; break;
+ case 12000: var->pixclock = 83333; break;
+ case 8000: var->pixclock = 125000; break;
+ default: var->pixclock = 0; break;
+ }
+
+#ifdef DEBUG_MODE_SELECTION
+ printk(KERN_DEBUG "Found %dx%d at %d.%3dkHz, %dHz, pix %d\n",
+ x->x_res, y->y_res,
+ x->hf / 1000, x->hf % 1000,
+ y->vf, var->pixclock);
+#endif
+
+ var->left_margin = x->l_margin;
+ var->right_margin = x->r_margin;
+ var->upper_margin = y->u_margin;
+ var->lower_margin = y->b_margin;
+ var->hsync_len = x->hsync_len;
+ var->vsync_len = y->vsync_len;
+ var->sync = 0;
+
+ /*
+ * Now adjust the parameters we found
+ */
+ acornfb_post_adjust_timing(var);
+
+ return 0;
}
-__initfunc(unsigned long
-acornfb_init(unsigned long mem_start))
+__initfunc(static void
+acornfb_init_fbinfo(void))
{
+ static int first = 1;
+
+ if (!first)
+ return;
+ first = 0;
+
strcpy(fb_info.modename, "Acorn");
- fb_info.node = -1;
- fb_info.fbops = &acornfb_ops;
- fb_info.disp = &disp;
- fb_info.monspecs.hfmin = 0;
- fb_info.monspecs.hfmax = 0;
- fb_info.monspecs.vfmin = 0;
- fb_info.monspecs.vfmax = 0;
- fb_info.monspecs.dpms = 0;
strcpy(fb_info.fontname, "Acorn8x8");
- fb_info.changevar = NULL;
- fb_info.switch_con = acornfb_switch;
- fb_info.updatevar = acornfb_update_var;
- fb_info.blank = acornfb_blank;
- fb_info.flags = FBINFO_FLAG_DEFAULT;
-
- acornfb_set_disp(-1);
- fb_set_cmap(fb_default_cmap(current_par.palette_size),
- 1, acornfb_vidc20_setcolreg, &fb_info);
- register_framebuffer(&fb_info);
- return mem_start;
+ fb_info.node = -1;
+ fb_info.fbops = &acornfb_ops;
+ fb_info.disp = &global_disp;
+ fb_info.changevar = NULL;
+ fb_info.switch_con = acornfb_switch;
+ fb_info.updatevar = acornfb_updatevar;
+ fb_info.blank = acornfb_blank;
+ fb_info.flags = FBINFO_FLAG_DEFAULT;
+
+ global_disp.dispsw = &fbcon_dummy;
+
+ /*
+ * setup initial parameters
+ */
+ memset(&init_var, 0, sizeof(init_var));
+ init_var.xres = DEFAULT_XRES;
+ init_var.yres = DEFAULT_YRES;
+
+#if defined(FBCON_HAS_CFB4)
+ init_var.bits_per_pixel = 4;
+#elif defined(FBCON_HAS_CFB8)
+ init_var.bits_per_pixel = 8;
+#elif defined(FBCON_HAS_CFB2)
+ init_var.bits_per_pixel = 2;
+#elif defined(FBCON_HAS_MFB)
+ init_var.bits_per_pixel = 1;
+#else
+#error No suitable framebuffers configured
+#endif
+
+#if defined(HAS_VIDC20)
+ init_var.red.length = 8;
+ init_var.transp.length = 4;
+#elif defined(HAS_VIDC)
+ init_var.red.length = 4;
+ init_var.transp.length = 1;
+#endif
+ init_var.green = init_var.red;
+ init_var.blue = init_var.red;
+ init_var.nonstd = 0;
+ init_var.activate = FB_ACTIVATE_NOW;
+ init_var.height = -1;
+ init_var.width = -1;
+ init_var.vmode = FB_VMODE_NONINTERLACED;
+
+ current_par.dram_size = 0;
+ current_par.montype = -1;
+ current_par.dpms = 0;
+}
+
+/*
+ * setup acornfb options:
+ *
+ * font:fontname
+ * Set fontname
+ *
+ * mon:hmin-hmax:vmin-vmax:dpms:width:height
+ * Set monitor parameters:
+ * hmin = horizontal minimum frequency (Hz)
+ * hmax = horizontal maximum frequency (Hz) (optional)
+ * vmin = vertical minimum frequency (Hz)
+ * vmax = vertical maximum frequency (Hz) (optional)
+ * dpms = DPMS supported? (optional)
+ * width = width of picture in mm. (optional)
+ * height = height of picture in mm. (optional)
+ *
+ * montype:type
+ * Set RISC-OS style monitor type:
+ * 0 (or tv) - TV frequency
+ * 1 (or multi) - Multi frequency
+ * 2 (or hires) - Hi-res monochrome
+ * 3 (or vga) - VGA
+ * 4 (or svga) - SVGA
+ * auto, or option missing
+ * - try hardware detect
+ *
+ * dram:size
+ * Set the amount of DRAM to use for the frame buffer
+ * (even if you have VRAM).
+ * size can optionally be followed by 'M' or 'K' for
+ * MB or KB respectively.
+ */
+__initfunc(static void
+acornfb_parse_font(char *opt))
+{
+ strcpy(fb_info.fontname, opt);
+}
+
+__initfunc(static void
+acornfb_parse_mon(char *opt))
+{
+ fb_info.monspecs.hfmin = simple_strtoul(opt, &opt, 0);
+ if (*opt == '-')
+ fb_info.monspecs.hfmax = simple_strtoul(opt + 1, &opt, 0);
+ else
+ fb_info.monspecs.hfmax = fb_info.monspecs.hfmin;
+
+ if (*opt != ':')
+ return;
+
+ fb_info.monspecs.vfmin = simple_strtoul(opt + 1, &opt, 0);
+ if (*opt == '-')
+ fb_info.monspecs.vfmax = simple_strtoul(opt + 1, &opt, 0);
+ else
+ fb_info.monspecs.vfmax = fb_info.monspecs.vfmin;
+
+ if (*opt != ':')
+ return;
+
+ fb_info.monspecs.dpms = simple_strtoul(opt + 1, &opt, 0);
+
+ if (*opt != ':')
+ return;
+
+ init_var.width = simple_strtoul(opt + 1, &opt, 0);
+
+ if (*opt != ':')
+ return;
+
+ init_var.height = simple_strtoul(opt + 1, NULL, 0);
+}
+
+__initfunc(static void
+acornfb_parse_montype(char *opt))
+{
+ current_par.montype = -2;
+
+ if (strncmp(opt, "tv", 2) == 0) {
+ opt += 2;
+ current_par.montype = 0;
+ } else if (strncmp(opt, "multi", 5) == 0) {
+ opt += 5;
+ current_par.montype = 1;
+ } else if (strncmp(opt, "hires", 5) == 0) {
+ opt += 5;
+ current_par.montype = 2;
+ } else if (strncmp(opt, "vga", 3) == 0) {
+ opt += 3;
+ current_par.montype = 3;
+ } else if (strncmp(opt, "svga", 4) == 0) {
+ opt += 4;
+ current_par.montype = 4;
+ } else if (strncmp(opt, "auto", 4) == 0) {
+ opt += 4;
+ current_par.montype = -1;
+ } else if (isdigit(*opt))
+ current_par.montype = simple_strtoul(opt, &opt, 0);
+
+ if (current_par.montype == -2 ||
+ current_par.montype > NR_MONTYPES) {
+ printk(KERN_ERR "acornfb: unknown monitor type: %s\n",
+ opt);
+ current_par.montype = -1;
+ } else
+ if (opt && *opt) {
+ if (strcmp(opt, ",dpms") == 0)
+ current_par.dpms = 1;
+ else
+ printk(KERN_ERR
+ "acornfb: unknown monitor option: %s\n",
+ opt);
+ }
+}
+
+__initfunc(static void
+acornfb_parse_dram(char *opt))
+{
+ unsigned int size;
+
+ size = simple_strtoul(opt, &opt, 0);
+
+ if (opt) {
+ switch (*opt) {
+ case 'M':
+ case 'm':
+ size *= 1024;
+ case 'K':
+ case 'k':
+ size *= 1024;
+ default:
+ break;
+ }
+ }
+
+ current_par.dram_size = size;
+}
+
+static struct options {
+ char *name;
+ void (*parse)(char *opt);
+} opt_table[] __initdata = {
+ { "font", acornfb_parse_font },
+ { "mon", acornfb_parse_mon },
+ { "montype", acornfb_parse_montype },
+ { "dram", acornfb_parse_dram },
+ { NULL, NULL }
+};
+
+__initfunc(void
+acornfb_setup(char *options, int *ints))
+{
+ struct options *optp;
+ char *opt;
+
+ if (!options || !*options)
+ return;
+
+ acornfb_init_fbinfo();
+
+ for (opt = strtok(options, ","); opt; opt = strtok(NULL, ",")) {
+ if (!*opt)
+ continue;
+
+ for (optp = opt_table; optp->name; optp++) {
+ int optlen;
+
+ optlen = strlen(optp->name);
+
+ if (strncmp(opt, optp->name, optlen) == 0 &&
+ opt[optlen] == ':') {
+ optp->parse(opt + optlen + 1);
+ break;
+ }
+ }
+
+ if (!optp->name)
+ printk(KERN_ERR "acornfb: unknown parameter: %s\n",
+ opt);
+ }
+}
+
+/*
+ * Detect type of monitor connected
+ * For now, we just assume SVGA
+ */
+__initfunc(static int
+acornfb_detect_monitortype(void))
+{
+ return 4;
+}
+
+/*
+ * This enables the unused memory to be freed on older Acorn machines.
+ */
+static inline void
+free_unused_pages(unsigned int virtual_start, unsigned int virtual_end)
+{
+ int mb_freed = 0;
+
+ /*
+ * Align addresses
+ */
+ virtual_start = PAGE_ALIGN(virtual_start);
+ virtual_end = PAGE_ALIGN(virtual_end);
+
+ while (virtual_start < virtual_end) {
+ /*
+ * Clear page reserved bit,
+ * set count to 1, and free
+ * the page.
+ */
+ clear_bit(PG_reserved, &mem_map[MAP_NR(virtual_start)].flags);
+ atomic_set(&mem_map[MAP_NR(virtual_start)].count, 1);
+ free_page(virtual_start);
+
+ virtual_start += PAGE_SIZE;
+ mb_freed += PAGE_SIZE / 1024;
+ }
+
+ printk("acornfb: freed %dK memory\n", mb_freed);
+}
+
+__initfunc(void
+acornfb_init(void))
+{
+ unsigned long size;
+ u_int h_sync, v_sync;
+
+ acornfb_init_fbinfo();
+
+ if (current_par.montype == -1)
+ current_par.montype = acornfb_detect_monitortype();
+
+ if (current_par.montype < 0 || current_par.montype > NR_MONTYPES)
+ current_par.montype = 4;
+
+ fb_info.monspecs = monspecs[current_par.montype];
+ fb_info.monspecs.dpms = current_par.dpms;
+
+ current_par.currcon = -1;
+ current_par.screen_base = SCREEN2_BASE;
+ current_par.screen_base_p = SCREEN_START;
+ current_par.using_vram = 0;
+
+ /*
+ * If vram_size is set, we are using VRAM in
+ * a Risc PC. However, if the user has specified
+ * an amount of DRAM then use that instead.
+ */
+ if (vram_size && !current_par.dram_size) {
+ size = vram_size;
+ current_par.vram_half_sam = vram_size / 1024;
+ current_par.using_vram = 1;
+ } else if (current_par.dram_size)
+ size = current_par.dram_size;
+ else
+ size = (init_var.xres * init_var.yres *
+ init_var.bits_per_pixel) / 8;
+
+ size = PAGE_ALIGN(size);
+
+#ifdef CONFIG_ARCH_RPC
+ if (!current_par.using_vram) {
+ /*
+ * RiscPC needs to allocate the DRAM memory
+ * for the framebuffer if we are not using
+ * VRAM. Archimedes/A5000 machines use a
+ * fixed address for their framebuffers.
+ */
+ current_par.screen_base = (unsigned long)kmalloc(size, GFP_KERNEL);
+ if (current_par.screen_base == 0) {
+ printk(KERN_ERR "acornfb: unable to allocate screen "
+ "memory\n");
+ return;
+ }
+ current_par.screen_base_p =
+ virt_to_phys(current_par.screen_base);
+ }
+#endif
+#if defined(CONFIG_ARCH_A5K) || defined(CONFIG_ARCH_ARC)
+#define MAX_SIZE 480*1024
+ /*
+ * Limit maximum screen size.
+ */
+ if (size > MAX_SIZE)
+ size = MAX_SIZE;
+
+ /*
+ * Free unused pages
+ */
+ free_unused_pages(PAGE_OFFSET + size, PAGE_OFFSET + MAX_SIZE);
+#endif
+
+ current_par.screen_size = size;
+ current_par.palette_size = VIDC_PALETTE_SIZE;
+ current_par.allow_modeset = 1;
+
+ /*
+ * Lookup the timing for this resolution. If we can't
+ * find it, then we can't restore it if we change
+ * the resolution, so we disable this feature.
+ */
+ if (acornfb_lookup_timing(&init_var))
+ current_par.allow_modeset = 0;
+
+ /*
+ * Again, if this does not succeed, then we disallow
+ * changes to the resolution parameters.
+ */
+ if (acornfb_set_var(&init_var, -1, &fb_info))
+ current_par.allow_modeset = 0;
+
+ h_sync = 1953125000 / init_var.pixclock;
+ h_sync = h_sync * 512 / (init_var.xres + init_var.left_margin +
+ init_var.right_margin + init_var.hsync_len);
+ v_sync = h_sync / (init_var.yres + init_var.upper_margin +
+ init_var.lower_margin + init_var.vsync_len);
+
+ printk("Acornfb: %ldkB %cRAM, %s, using %dx%d, %d.%03dkHz, %dHz\n",
+ current_par.screen_size / 1024,
+ current_par.using_vram ? 'V' : 'D',
+ VIDC_NAME, init_var.xres, init_var.yres,
+ h_sync / 1000, h_sync % 1000, v_sync);
+
+ register_framebuffer(&fb_info);
}
--- /dev/null
+/*
+ * linux/drivers/video/cyber2000fb.c
+ *
+ * Integraphics Cyber2000 frame buffer device
+ *
+ * Based on cyberfb.c
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/malloc.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+#include <video/fbcon.h>
+#include <video/fbcon-cfb8.h>
+#include <video/fbcon-cfb16.h>
+#include <video/fbcon-cfb24.h>
+
+/*
+ * Some defaults
+ */
+#define DEFAULT_XRES 640
+#define DEFAULT_YRES 480
+#define DEFAULT_BPP 8
+
+static volatile unsigned char *CyberRegs;
+
+#include "cyber2000fb.h"
+
+static struct display global_disp;
+static struct fb_info fb_info;
+static struct cyber2000fb_par current_par;
+static struct display_switch *dispsw;
+static struct fb_var_screeninfo __initdata init_var = {};
+
+#ifdef DEBUG
+static void debug_printf(char *fmt, ...)
+{
+ char buffer[128];
+ va_list ap;
+
+ va_start(ap, fmt);
+ vsprintf(buffer, fmt, ap);
+ va_end(ap);
+
+ printascii(buffer);
+}
+#else
+#define debug_printf(x...) do { } while (0)
+#endif
+
+/*
+ * Predefined Video Modes
+ */
+static const struct res cyber2000_res[] = {
+ {
+ 640, 480,
+ {
+ 0x5f, 0x4f, 0x50, 0x80, 0x52, 0x9d, 0x0b, 0x3e,
+ 0x00, 0x40,
+ 0xe9, 0x8b, 0xdf, 0x50, 0x00, 0xe6, 0x04, 0xc3
+ },
+ 0x00,
+ { 0xd2, 0xce, 0xdb, 0x54 }
+ },
+
+ {
+ 800, 600,
+ {
+ 0x7f, 0x63, 0x64, 0x00, 0x66, 0x10, 0x6f, 0xf0,
+ 0x00, 0x60,
+ 0x5b, 0x8f, 0x57, 0x64, 0x00, 0x59, 0x6e, 0xe3
+ },
+ 0x00,
+ { 0x52, 0x85, 0xdb, 0x54 }
+ },
+
+ {
+ 1024, 768,
+ {
+ 0x9f, 0x7f, 0x80, 0x80, 0x8b, 0x94, 0x1e, 0xfd,
+ 0x00, 0x60,
+ 0x03, 0x86, 0xff, 0x80, 0x0f, 0x00, 0x1e, 0xe3
+ },
+ 0x00,
+ { 0xd0, 0x52, 0xdb, 0x54 }
+ },
+#if 0
+ {
+ 1152, 886,
+ {
+ },
+ {
+ }
+ },
+#endif
+ {
+ 1280, 1024,
+ {
+ 0xce, 0x9f, 0xa0, 0x8f, 0xa2, 0x1f, 0x28, 0x52,
+ 0x00, 0x40,
+ 0x08, 0x8f, 0xff, 0xa0, 0x00, 0x03, 0x27, 0xe3
+ },
+ 0x1d,
+ { 0xb4, 0x4b, 0xdb, 0x54 }
+ },
+
+ {
+ 1600, 1200,
+ {
+ 0xff, 0xc7, 0xc9, 0x9f, 0xcf, 0xa0, 0xfe, 0x10,
+ 0x00, 0x40,
+ 0xcf, 0x89, 0xaf, 0xc8, 0x00, 0xbc, 0xf1, 0xe3
+ },
+ 0x1f,
+ { 0xbd, 0x10, 0xdb, 0x54 }
+ }
+};
+
+#define NUM_TOTAL_MODES arraysize(cyber2000_res)
+
+static const char igs_regs[] = {
+ 0x10, 0x10, 0x12, 0x00, 0x13, 0x00,
+ 0x30, 0x21, 0x31, 0x00, 0x32, 0x00, 0x33, 0x01,
+ 0x50, 0x00, 0x51, 0x00, 0x52, 0x00, 0x53, 0x00,
+ 0x54, 0x00, 0x55, 0x00, 0x56, 0x00, 0x57, 0x01,
+ 0x58, 0x00, 0x59, 0x00, 0x5a, 0x00,
+ 0x70, 0x0b, 0x71, 0x10, 0x72, 0x45, 0x73, 0x30,
+ 0x74, 0x1b, 0x75, 0x1e, 0x76, 0x00, 0x7a, 0xc8
+};
+
+static const char crtc_idx[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17
+};
+
+static void cyber2000_init_hw(const struct res *res)
+{
+ int i;
+
+ debug_printf("init vga hw for %dx%d\n", res->xres, res->yres);
+
+ cyber2000_outb(0xef, 0x3c2);
+ cyber2000_crtcw(0x0b, 0x11);
+ cyber2000_attrw(0x00, 0x11);
+
+ cyber2000_seqw(0x01, 0x00);
+ cyber2000_seqw(0x01, 0x01);
+ cyber2000_seqw(0x0f, 0x02);
+ cyber2000_seqw(0x00, 0x03);
+ cyber2000_seqw(0x0e, 0x04);
+ cyber2000_seqw(0x03, 0x00);
+
+ for (i = 0; i < sizeof(crtc_idx); i++)
+ cyber2000_crtcw(res->crtc_regs[i], crtc_idx[i]);
+
+ for (i = 0x0a; i < 0x10; i++)
+ cyber2000_crtcw(0, i);
+
+ cyber2000_crtcw(0xff, 0x18);
+
+ cyber2000_grphw(0x00, 0x00);
+ cyber2000_grphw(0x00, 0x01);
+ cyber2000_grphw(0x00, 0x02);
+ cyber2000_grphw(0x00, 0x03);
+ cyber2000_grphw(0x00, 0x04);
+ cyber2000_grphw(0x60, 0x05);
+ cyber2000_grphw(0x05, 0x06);
+ cyber2000_grphw(0x0f, 0x07);
+ cyber2000_grphw(0xff, 0x08);
+
+ for (i = 0; i < 16; i++)
+ cyber2000_attrw(i, i);
+
+ cyber2000_attrw(0x01, 0x10);
+ cyber2000_attrw(0x00, 0x11);
+ cyber2000_attrw(0x0f, 0x12);
+ cyber2000_attrw(0x00, 0x13);
+ cyber2000_attrw(0x00, 0x14);
+
+ for (i = 0; i < sizeof(igs_regs); i += 2)
+ cyber2000_grphw(igs_regs[i+1], igs_regs[i]);
+
+ cyber2000_grphw(res->crtc_ofl, 0x11);
+
+ for (i = 0; i < 4; i += 1)
+ cyber2000_grphw(res->clk_regs[i], 0xb0 + i);
+
+ cyber2000_grphw(0x01, 0x90);
+ cyber2000_grphw(0x80, 0xb9);
+ cyber2000_grphw(0x00, 0xb9);
+
+ cyber2000_outb(0x56, 0x3ce);
+ i = cyber2000_inb(0x3cf);
+ cyber2000_outb(i | 4, 0x3cf);
+ cyber2000_outb(0x04, 0x3c6);
+ cyber2000_outb(i, 0x3cf);
+
+ cyber2000_outb(0x20, 0x3c0);
+ cyber2000_outb(0xff, 0x3c6);
+
+ for (i = 0; i < 256; i++) {
+ cyber2000_outb(i, 0x3c8);
+ cyber2000_outb(0, 0x3c9);
+ cyber2000_outb(0, 0x3c9);
+ cyber2000_outb(0, 0x3c9);
+ }
+}
+
+
+static struct fb_ops cyber2000fb_ops;
+
+/* -------------------- Hardware specific routines ------------------------- */
+
+/*
+ * Hardware Cyber2000 Acceleration
+ */
+static void cyber2000_accel_wait(void)
+{
+ int count = 10000;
+
+ while (cyber2000_inb(0xbf011) & 0x80) {
+ if (!count--) {
+ debug_printf("accel_wait timed out\n");
+ cyber2000_outb(0, 0xbf011);
+ return;
+ }
+ udelay(10);
+ }
+}
+
+static void
+cyber2000_accel_setup(struct display *p)
+{
+ dispsw->setup(p);
+}
+
+static void
+cyber2000_accel_bmove(struct display *p, int sy, int sx, int dy, int dx,
+ int height, int width)
+{
+ unsigned long src, dst, chwidth = p->var.xres_virtual * fontheight(p);
+ int v = 0x8000;
+
+ if (sx < dx) {
+ sx += width - 1;
+ dx += width - 1;
+ v |= 4;
+ }
+
+ if (sy < dy) {
+ sy += height - 1;
+ dy += height - 1;
+ v |= 2;
+ }
+
+ sx *= fontwidth(p);
+ dx *= fontwidth(p);
+ src = sx + sy * chwidth;
+ dst = dx + dy * chwidth;
+ width = width * fontwidth(p) - 1;
+ height = height * fontheight(p) - 1;
+
+ cyber2000_accel_wait();
+ cyber2000_outb(0x00, 0xbf011);
+ cyber2000_outb(0x03, 0xbf048);
+ cyber2000_outw(width, 0xbf060);
+
+ if (p->var.bits_per_pixel != 24) {
+ cyber2000_outl(dst, 0xbf178);
+ cyber2000_outl(src, 0xbf170);
+ } else {
+ cyber2000_outl(dst * 3, 0xbf178);
+ cyber2000_outb(dst, 0xbf078);
+ cyber2000_outl(src * 3, 0xbf170);
+ }
+
+ cyber2000_outw(height, 0xbf062);
+ cyber2000_outw(v, 0xbf07c);
+ cyber2000_outw(0x2800, 0xbf07e);
+}
+
+static void
+cyber2000_accel_clear(struct vc_data *conp, struct display *p, int sy, int sx,
+ int height, int width)
+{
+ unsigned long dst;
+ u32 bgx = attr_bgcol_ec(p, conp);
+
+ dst = sx * fontwidth(p) + sy * p->var.xres_virtual * fontheight(p);
+ width = width * fontwidth(p) - 1;
+ height = height * fontheight(p) - 1;
+
+ cyber2000_accel_wait();
+ cyber2000_outb(0x00, 0xbf011);
+ cyber2000_outb(0x03, 0xbf048);
+ cyber2000_outw(width, 0xbf060);
+ cyber2000_outw(height, 0xbf062);
+
+ switch (p->var.bits_per_pixel) {
+ case 16:
+ bgx = ((u16 *)p->dispsw_data)[bgx];
+ case 8:
+ cyber2000_outl(dst, 0xbf178);
+ break;
+
+ case 24:
+ cyber2000_outl(dst * 3, 0xbf178);
+ cyber2000_outb(dst, 0xbf078);
+ bgx = ((u32 *)p->dispsw_data)[bgx];
+ break;
+ }
+
+ cyber2000_outl(bgx, 0xbf058);
+ cyber2000_outw(0x8000, 0xbf07c);
+ cyber2000_outw(0x0800, 0xbf07e);
+}
+
+static void
+cyber2000_accel_putc(struct vc_data *conp, struct display *p, int c, int yy, int xx)
+{
+ cyber2000_accel_wait();
+ dispsw->putc(conp, p, c, yy, xx);
+}
+
+static void
+cyber2000_accel_putcs(struct vc_data *conp, struct display *p,
+ const unsigned short *s, int count, int yy, int xx)
+{
+ cyber2000_accel_wait();
+ dispsw->putcs(conp, p, s, count, yy, xx);
+}
+
+static void
+cyber2000_accel_revc(struct display *p, int xx, int yy)
+{
+ cyber2000_accel_wait();
+ dispsw->revc(p, xx, yy);
+}
+
+static void
+cyber2000_accel_clear_margins(struct vc_data *conp, struct display *p, int bottom_only)
+{
+ dispsw->clear_margins(conp, p, bottom_only);
+}
+
+static struct display_switch fbcon_cyber_accel = {
+ cyber2000_accel_setup,
+ cyber2000_accel_bmove,
+ cyber2000_accel_clear,
+ cyber2000_accel_putc,
+ cyber2000_accel_putcs,
+ cyber2000_accel_revc,
+ NULL,
+ NULL,
+ cyber2000_accel_clear_margins,
+ FONTWIDTH(8)|FONTWIDTH(16)
+};
+
+/*
+ * Palette
+ */
+static int
+cyber2000_getcolreg(u_int regno, u_int * red, u_int * green, u_int * blue,
+ u_int * transp, struct fb_info *fb_info)
+{
+ int t;
+
+ if (regno >= 256)
+ return 1;
+
+ t = current_par.palette[regno].red;
+ *red = t << 10 | t << 4 | t >> 2;
+
+ t = current_par.palette[regno].green;
+ *green = t << 10 | t << 4 | t >> 2;
+
+ t = current_par.palette[regno].blue;
+ *blue = t << 10 | t << 4 | t >> 2;
+
+ *transp = 0;
+
+ return 0;
+}
+
+/*
+ * Set a single color register. Return != 0 for invalid regno.
+ */
+static int
+cyber2000_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
+ u_int transp, struct fb_info *fb_info)
+{
+ if (regno > 255)
+ return 1;
+
+ red >>= 10;
+ green >>= 10;
+ blue >>= 10;
+
+ current_par.palette[regno].red = red;
+ current_par.palette[regno].green = green;
+ current_par.palette[regno].blue = blue;
+
+ switch (fb_display[current_par.currcon].var.bits_per_pixel) {
+ case 8:
+ cyber2000_outb(regno, 0x3c8);
+ cyber2000_outb(red, 0x3c9);
+ cyber2000_outb(green, 0x3c9);
+ cyber2000_outb(blue, 0x3c9);
+ break;
+
+#ifdef FBCON_HAS_CFB16
+ case 16:
+ if (regno < 64) {
+ /* write green */
+ cyber2000_outb(regno << 2, 0x3c8);
+ cyber2000_outb(current_par.palette[regno >> 1].red, 0x3c9);
+ cyber2000_outb(green, 0x3c9);
+ cyber2000_outb(current_par.palette[regno >> 1].blue, 0x3c9);
+ }
+
+ if (regno < 32) {
+ /* write red,blue */
+ cyber2000_outb(regno << 3, 0x3c8);
+ cyber2000_outb(red, 0x3c9);
+ cyber2000_outb(current_par.palette[regno << 1].green, 0x3c9);
+ cyber2000_outb(blue, 0x3c9);
+ }
+
+ if (regno < 16)
+ current_par.c_table.cfb16[regno] = regno | regno << 5 | regno << 11;
+ break;
+#endif
+
+#ifdef FBCON_HAS_CFB24
+ case 24:
+ cyber2000_outb(regno, 0x3c8);
+ cyber2000_outb(red, 0x3c9);
+ cyber2000_outb(green, 0x3c9);
+ cyber2000_outb(blue, 0x3c9);
+
+ if (regno < 16)
+ current_par.c_table.cfb24[regno] = regno | regno << 8 | regno << 16;
+ break;
+#endif
+
+ default:
+ return 1;
+ }
+
+ return 0;
+}
+
+static int cyber2000fb_set_timing(struct fb_var_screeninfo *var)
+{
+ int width = var->xres_virtual;
+ int scr_pitch, fetchrow;
+ int i;
+ char b, col;
+
+ switch (var->bits_per_pixel) {
+ case 8: /* PSEUDOCOLOUR, 256 */
+ b = 0;
+ col = 1;
+ scr_pitch = var->xres_virtual / 8;
+ break;
+
+ case 16:/* DIRECTCOLOUR, 64k */
+ b = 1;
+ col = 2;
+ scr_pitch = var->xres_virtual / 8 * 2;
+ break;
+ case 24:/* TRUECOLOUR, 16m */
+ b = 2;
+ col = 4;
+ scr_pitch = var->xres_virtual / 8 * 3;
+ width *= 3;
+ break;
+
+ default:
+ return 1;
+ }
+
+ for (i = 0; i < NUM_TOTAL_MODES; i++)
+ if (var->xres == cyber2000_res[i].xres &&
+ var->yres == cyber2000_res[i].yres)
+ break;
+
+ if (i < NUM_TOTAL_MODES)
+ cyber2000_init_hw(cyber2000_res + i);
+
+ fetchrow = scr_pitch + 1;
+
+ debug_printf("Setting regs: pitch=%X, fetchrow=%X, col=%X, b=%X\n",
+ scr_pitch, fetchrow, col, b);
+
+ cyber2000_outb(0x13, 0x3d4);
+ cyber2000_outb(scr_pitch, 0x3d5);
+ cyber2000_outb(0x14, 0x3ce);
+ cyber2000_outb(fetchrow, 0x3cf);
+ cyber2000_outb(0x15, 0x3ce);
+ /* FIXME: is this the right way round? */
+ cyber2000_outb(((fetchrow >> 4) & 0xf0) | ((scr_pitch >> 8) & 0x0f), 0x3cf);
+ cyber2000_outb(0x77, 0x3ce);
+ cyber2000_outb(col, 0x3cf);
+
+
+ cyber2000_outb(0x33, 0x3ce);
+ cyber2000_outb(0x1c, 0x3cf);
+
+ cyber2000_outw(width - 1, 0xbf018);
+ cyber2000_outw(width - 1, 0xbf218);
+ cyber2000_outb(b, 0xbf01c);
+
+ return 0;
+}
+
+static inline void
+cyber2000fb_update_start(struct fb_var_screeninfo *var)
+{
+#if 0
+ unsigned int base;
+
+ base = var->yoffset * var->xres_virtual + var->xoffset;
+
+ cyber2000_outb(0x0c, 0x3d4);
+ cyber2000_outb(base, 0x3d5);
+ cyber2000_outb(0x0d, 0x3d4);
+ cyber2000_outb(base >> 8, 0x3d5);
+ /* FIXME: need the upper bits of the start offset */
+/* cyber2000_outb(0x??, 0x3d4);
+ cyber2000_outb(base >> 16, 0x3d5);*/
+#endif
+}
+
+/*
+ * Open/Release the frame buffer device
+ */
+static int cyber2000fb_open(struct fb_info *info, int user)
+{
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int cyber2000fb_release(struct fb_info *info, int user)
+{
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+/*
+ * Get the Colormap
+ */
+static int
+cyber2000fb_get_cmap(struct fb_cmap *cmap, int kspc, int con,
+ struct fb_info *info)
+{
+ int err = 0;
+
+ if (con == current_par.currcon) /* current console? */
+ err = fb_get_cmap(cmap, kspc, cyber2000_getcolreg, info);
+ else if (fb_display[con].cmap.len) /* non default colormap? */
+ fb_copy_cmap(&fb_display[con].cmap, cmap, kspc ? 0 : 2);
+ else
+ fb_copy_cmap(fb_default_cmap(1 << fb_display[con].var.bits_per_pixel),
+ cmap, kspc ? 0 : 2);
+ return err;
+}
+
+
+/*
+ * Set the Colormap
+ */
+static int
+cyber2000fb_set_cmap(struct fb_cmap *cmap, int kspc, int con,
+ struct fb_info *info)
+{
+ struct display *disp = &fb_display[con];
+ int err = 0;
+
+ if (!disp->cmap.len) { /* no colormap allocated? */
+ int size;
+
+ if (disp->var.bits_per_pixel == 16)
+ size = 32;
+ else
+ size = 256;
+
+ err = fb_alloc_cmap(&disp->cmap, size, 0);
+ }
+ if (!err) {
+ if (con == current_par.currcon) /* current console? */
+ err = fb_set_cmap(cmap, kspc, cyber2000_setcolreg,
+ info);
+ else
+ fb_copy_cmap(cmap, &disp->cmap, kspc ? 0 : 1);
+ }
+
+ return err;
+}
+
+static int
+cyber2000fb_decode_var(struct fb_var_screeninfo *var, int con, int *visual)
+{
+ switch (var->bits_per_pixel) {
+#ifdef FBCON_HAS_CFB8
+ case 8:
+ *visual = FB_VISUAL_PSEUDOCOLOR;
+ break;
+#endif
+#ifdef FBCON_HAS_CFB16
+ case 16:
+ *visual = FB_VISUAL_DIRECTCOLOR;
+ break;
+#endif
+#ifdef FBCON_HAS_CFB24
+ case 24:
+ *visual = FB_VISUAL_TRUECOLOR;
+ break;
+#endif
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Get the Fixed Part of the Display
+ */
+static int
+cyber2000fb_get_fix(struct fb_fix_screeninfo *fix, int con,
+ struct fb_info *fb_info)
+{
+ struct display *display;
+
+ memset(fix, 0, sizeof(struct fb_fix_screeninfo));
+ strcpy(fix->id, "Cyber2000");
+
+ if (con >= 0)
+ display = fb_display + con;
+ else
+ display = &global_disp;
+
+ fix->smem_start = (char *)current_par.screen_base_p;
+ fix->smem_len = current_par.screen_size;
+ fix->mmio_start = (char *)current_par.regs_base_p;
+ fix->mmio_len = 0x000c0000;
+ fix->type = display->type;
+ fix->type_aux = display->type_aux;
+ fix->xpanstep = 0;
+ fix->ypanstep = display->ypanstep;
+ fix->ywrapstep = display->ywrapstep;
+ fix->visual = display->visual;
+ fix->line_length = display->line_length;
+ fix->accel = 22; /*FB_ACCEL_IGS_CYBER2000*/
+
+ return 0;
+}
+
+
+/*
+ * Get the User Defined Part of the Display
+ */
+static int
+cyber2000fb_get_var(struct fb_var_screeninfo *var, int con,
+ struct fb_info *fb_info)
+{
+ if (con == -1)
+ *var = global_disp.var;
+ else
+ *var = fb_display[con].var;
+
+ return 0;
+}
+
+/*
+ * Set the User Defined Part of the Display
+ */
+static int
+cyber2000fb_set_var(struct fb_var_screeninfo *var, int con, struct fb_info *info)
+{
+ struct display *display;
+ int err, chgvar = 0, visual;
+
+ if (con >= 0)
+ display = fb_display + con;
+ else
+ display = &global_disp;
+
+ err = cyber2000fb_decode_var(var, con, &visual);
+ if (err)
+ return err;
+
+ switch (var->activate & FB_ACTIVATE_MASK) {
+ case FB_ACTIVATE_TEST:
+ return 0;
+
+ case FB_ACTIVATE_NXTOPEN:
+ case FB_ACTIVATE_NOW:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (con >= 0) {
+ if (display->var.xres != var->xres)
+ chgvar = 1;
+ if (display->var.yres != var->yres)
+ chgvar = 1;
+ if (display->var.xres_virtual != var->xres_virtual)
+ chgvar = 1;
+ if (display->var.yres_virtual != var->yres_virtual)
+ chgvar = 1;
+ if (display->var.accel_flags != var->accel_flags)
+ chgvar = 1;
+ if (memcmp(&display->var.red, &var->red, sizeof(var->red)))
+ chgvar = 1;
+ if (memcmp(&display->var.green, &var->green, sizeof(var->green)))
+ chgvar = 1;
+ if (memcmp(&display->var.blue, &var->blue, sizeof(var->green)))
+ chgvar = 1;
+ }
+
+ display->var = *var;
+
+ display->screen_base = (char *)current_par.screen_base;
+ display->visual = visual;
+ display->type = FB_TYPE_PACKED_PIXELS;
+ display->type_aux = 0;
+ display->ypanstep = 0;
+ display->ywrapstep = 0;
+ display->line_length =
+ display->next_line = (var->xres_virtual * var->bits_per_pixel) / 8;
+ display->can_soft_blank = 1;
+ display->inverse = 0;
+
+ switch (display->var.bits_per_pixel) {
+#ifdef FBCON_HAS_CFB8
+ case 8:
+ dispsw = &fbcon_cfb8;
+ display->dispsw_data = NULL;
+ break;
+#endif
+#ifdef FBCON_HAS_CFB16
+ case 16:
+ dispsw = &fbcon_cfb16;
+ display->dispsw_data = current_par.c_table.cfb16;
+ break;
+#endif
+#ifdef FBCON_HAS_CFB24
+ case 24:
+ dispsw = &fbcon_cfb24;
+ display->dispsw_data = current_par.c_table.cfb24;
+ break;
+#endif
+ default:
+ printk(KERN_WARNING "cyber2000: no support for %dbpp\n",
+ display->var.bits_per_pixel);
+ dispsw = &fbcon_dummy;
+ break;
+ }
+
+ if (display->var.accel_flags & FB_ACCELF_TEXT &&
+ dispsw != &fbcon_dummy)
+ display->dispsw = &fbcon_cyber_accel;
+ else
+ display->dispsw = dispsw;
+
+ if (chgvar && info && info->changevar)
+ info->changevar(con);
+
+ if (con == current_par.currcon) {
+ struct fb_cmap *cmap;
+
+ cyber2000fb_update_start(var);
+ cyber2000fb_set_timing(var);
+
+ if (display->cmap.len)
+ cmap = &display->cmap;
+ else
+ cmap = fb_default_cmap(current_par.palette_size);
+
+ fb_set_cmap(cmap, 1, cyber2000_setcolreg, info);
+ }
+ return 0;
+}
+
+
+/*
+ * Pan or Wrap the Display
+ */
+static int cyber2000fb_pan_display(struct fb_var_screeninfo *var, int con,
+ struct fb_info *info)
+{
+ u_int y_bottom;
+
+ y_bottom = var->yoffset;
+
+ if (!(var->vmode & FB_VMODE_YWRAP))
+ y_bottom += var->yres;
+
+ if (var->xoffset > (var->xres_virtual - var->xres))
+ return -EINVAL;
+ if (y_bottom > fb_display[con].var.yres_virtual)
+ return -EINVAL;
+return -EINVAL;
+
+ cyber2000fb_update_start(var);
+
+ fb_display[con].var.xoffset = var->xoffset;
+ fb_display[con].var.yoffset = var->yoffset;
+ if (var->vmode & FB_VMODE_YWRAP)
+ fb_display[con].var.vmode |= FB_VMODE_YWRAP;
+ else
+ fb_display[con].var.vmode &= ~FB_VMODE_YWRAP;
+
+ return 0;
+}
+
+
+static int cyber2000fb_ioctl(struct inode *inode, struct file *file,
+ u_int cmd, u_long arg, int con, struct fb_info *info)
+{
+ return -EINVAL;
+}
+
+
+/*
+ * Update the `var' structure (called by fbcon.c)
+ *
+ * This call looks only at yoffset and the FB_VMODE_YWRAP flag in `var'.
+ * Since it's called by a kernel driver, no range checking is done.
+ */
+static int
+cyber2000fb_updatevar(int con, struct fb_info *info)
+{
+ if (con == current_par.currcon)
+ cyber2000fb_update_start(&fb_display[con].var);
+ return 0;
+}
+
+static int
+cyber2000fb_switch(int con, struct fb_info *info)
+{
+ struct fb_cmap *cmap;
+
+ if (current_par.currcon >= 0) {
+ cmap = &fb_display[current_par.currcon].cmap;
+
+ if (cmap->len)
+ fb_get_cmap(cmap, 1, cyber2000_getcolreg, info);
+ }
+
+ current_par.currcon = con;
+
+ fb_display[con].var.activate = FB_ACTIVATE_NOW;
+
+ cyber2000fb_set_var(&fb_display[con].var, con, info);
+
+ return 0;
+}
+
+/*
+ * (Un)Blank the display.
+ */
+static void cyber2000fb_blank(int blank, struct fb_info *fb_info)
+{
+ int i;
+
+ if (blank) {
+ for (i = 0; i < 256; i++) {
+ cyber2000_outb(i, 0x3c8);
+ cyber2000_outb(0, 0x3c9);
+ cyber2000_outb(0, 0x3c9);
+ cyber2000_outb(0, 0x3c9);
+ }
+ } else {
+ for (i = 0; i < 256; i++) {
+ cyber2000_outb(i, 0x3c8);
+ cyber2000_outb(current_par.palette[i].red, 0x3c9);
+ cyber2000_outb(current_par.palette[i].green, 0x3c9);
+ cyber2000_outb(current_par.palette[i].blue, 0x3c9);
+ }
+ }
+}
+
+__initfunc(void cyber2000fb_setup(char *options, int *ints))
+{
+}
+
+static struct fb_ops cyber2000fb_ops =
+{
+ cyber2000fb_open,
+ cyber2000fb_release,
+ cyber2000fb_get_fix,
+ cyber2000fb_get_var,
+ cyber2000fb_set_var,
+ cyber2000fb_get_cmap,
+ cyber2000fb_set_cmap,
+ cyber2000fb_pan_display,
+ cyber2000fb_ioctl
+};
+
+__initfunc(static void
+cyber2000fb_init_fbinfo(void))
+{
+ static int first = 1;
+
+ if (!first)
+ return;
+ first = 0;
+
+ strcpy(fb_info.modename, "Cyber2000");
+ strcpy(fb_info.fontname, "Acorn8x8");
+
+ fb_info.node = -1;
+ fb_info.fbops = &cyber2000fb_ops;
+ fb_info.disp = &global_disp;
+ fb_info.changevar = NULL;
+ fb_info.switch_con = cyber2000fb_switch;
+ fb_info.updatevar = cyber2000fb_updatevar;
+ fb_info.blank = cyber2000fb_blank;
+ fb_info.flags = FBINFO_FLAG_DEFAULT;
+
+ /*
+ * setup initial parameters
+ */
+ memset(&init_var, 0, sizeof(init_var));
+ init_var.xres_virtual =
+ init_var.xres = DEFAULT_XRES;
+ init_var.yres_virtual =
+ init_var.yres = DEFAULT_YRES;
+ init_var.bits_per_pixel = DEFAULT_BPP;
+
+ init_var.red.msb_right = 0;
+ init_var.green.msb_right = 0;
+ init_var.blue.msb_right = 0;
+
+ switch(init_var.bits_per_pixel) {
+ case 8:
+ init_var.bits_per_pixel = 8;
+ init_var.red.offset = 0;
+ init_var.red.length = 8;
+ init_var.green.offset = 0;
+ init_var.green.length = 8;
+ init_var.blue.offset = 0;
+ init_var.blue.length = 8;
+ break;
+
+ case 16:
+ init_var.bits_per_pixel = 16;
+ init_var.red.offset = 11;
+ init_var.red.length = 5;
+ init_var.green.offset = 5;
+ init_var.green.length = 6;
+ init_var.blue.offset = 0;
+ init_var.blue.length = 5;
+ break;
+
+ case 24:
+ init_var.bits_per_pixel = 24;
+ init_var.red.offset = 16;
+ init_var.red.length = 8;
+ init_var.green.offset = 8;
+ init_var.green.length = 8;
+ init_var.blue.offset = 0;
+ init_var.blue.length = 8;
+ break;
+ }
+
+ init_var.nonstd = 0;
+ init_var.activate = FB_ACTIVATE_NOW;
+ init_var.height = -1;
+ init_var.width = -1;
+ init_var.accel_flags = FB_ACCELF_TEXT;
+ init_var.sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT;
+ init_var.vmode = FB_VMODE_NONINTERLACED;
+}
+
+/*
+ * Initialization
+ */
+__initfunc(void cyber2000fb_init(void))
+{
+ struct pci_dev *dev;
+ u_int h_sync, v_sync;
+
+ dev = pci_find_device(PCI_VENDOR_ID_INTERG, 0x2000, NULL);
+ if (!dev)
+ return;
+
+ CyberRegs = bus_to_virt(dev->base_address[0]) + 0x00800000;/*FIXME*/
+
+ cyber2000_outb(0x18, 0x46e8);
+ cyber2000_outb(0x01, 0x102);
+ cyber2000_outb(0x08, 0x46e8);
+
+ cyber2000fb_init_fbinfo();
+
+ current_par.currcon = -1;
+ current_par.screen_base_p = 0x80000000 + dev->base_address[0];
+ current_par.screen_base = (u_int)bus_to_virt(dev->base_address[0]);
+ current_par.screen_size = 0x00200000;
+ current_par.regs_base_p = 0x80800000 + dev->base_address[0];
+
+ cyber2000fb_set_var(&init_var, -1, &fb_info);
+
+ h_sync = 1953125000 / init_var.pixclock;
+ h_sync = h_sync * 512 / (init_var.xres + init_var.left_margin +
+ init_var.right_margin + init_var.hsync_len);
+ v_sync = h_sync / (init_var.yres + init_var.upper_margin +
+ init_var.lower_margin + init_var.vsync_len);
+
+ printk("Cyber2000: %ldkB VRAM, using %dx%d, %d.%03dkHz, %dHz\n",
+ current_par.screen_size >> 10,
+ init_var.xres, init_var.yres,
+ h_sync / 1000, h_sync % 1000, v_sync);
+
+ if (register_framebuffer(&fb_info) < 0)
+ return;
+
+ MOD_INC_USE_COUNT; /* TODO: This driver cannot be unloaded yet */
+}
+
+
+
+#ifdef MODULE
+int init_module(void)
+{
+ cyber2000fb_init();
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ /* Not reached because the usecount will never be
+ decremented to zero */
+ unregister_framebuffer(&fb_info);
+ /* TODO: clean up ... */
+}
+
+#endif /* MODULE */
--- /dev/null
+/*
+ * linux/drivers/video/cyber2000fb.h
+ *
+ * Integraphics Cyber2000 frame buffer device
+ */
+
+#define arraysize(x) (sizeof(x)/sizeof(*(x)))
+#define cyber2000_outb(dat,reg) (CyberRegs[reg] = dat)
+#define cyber2000_outw(dat,reg) (*(unsigned short *)&CyberRegs[reg] = dat)
+#define cyber2000_outl(dat,reg) (*(unsigned long *)&CyberRegs[reg] = dat)
+
+#define cyber2000_inb(reg) (CyberRegs[reg])
+#define cyber2000_inw(reg) (*(unsigned short *)&CyberRegs[reg])
+#define cyber2000_inl(reg) (*(unsigned long *)&CyberRegs[reg])
+
+static inline void cyber2000_crtcw(int val, int reg)
+{
+ cyber2000_outb(reg, 0x3d4);
+ cyber2000_outb(val, 0x3d5);
+}
+
+static inline void cyber2000_grphw(int val, int reg)
+{
+ cyber2000_outb(reg, 0x3ce);
+ cyber2000_outb(val, 0x3cf);
+}
+
+static inline void cyber2000_attrw(int val, int reg)
+{
+ cyber2000_inb(0x3da);
+ cyber2000_outb(reg, 0x3c0);
+ cyber2000_inb(0x3c1);
+ cyber2000_outb(val, 0x3c0);
+}
+
+static inline void cyber2000_seqw(int val, int reg)
+{
+ cyber2000_outb(reg, 0x3c4);
+ cyber2000_outb(val, 0x3c5);
+}
+
+struct cyber2000fb_par {
+ unsigned long screen_base;
+ unsigned long screen_base_p;
+ unsigned long regs_base;
+ unsigned long regs_base_p;
+ unsigned long screen_end;
+ unsigned long screen_size;
+ unsigned int palette_size;
+ signed int currcon;
+ /*
+ * palette
+ */
+ struct {
+ u8 red;
+ u8 green;
+ u8 blue;
+ } palette[256];
+ /*
+ * colour mapping table
+ */
+ union {
+#ifdef FBCON_HAS_CFB16
+ u16 cfb16[16];
+#endif
+#ifdef FBCON_HAS_CFB24
+ u32 cfb24[16];
+#endif
+ } c_table;
+};
+
+struct res {
+ int xres;
+ int yres;
+ unsigned char crtc_regs[18];
+ unsigned char crtc_ofl;
+ unsigned char clk_regs[4];
+};
* available, usually until fbcon takes console over.
*/
-#include <linux/config.h>
#include <linux/types.h>
#include <linux/kdev_t.h>
#include <linux/tty.h>
* Dummy console driver
*/
-#if defined(CONFIG_ARM)
+#if defined(__arm__)
#define DUMMY_COLUMNS ORIG_VIDEO_COLS
#define DUMMY_ROWS ORIG_VIDEO_LINES
#else
* Frame buffer device initialization and setup routines
*/
-extern unsigned long acornfb_init(void);
+extern void acornfb_init(void);
extern void acornfb_setup(char *options, int *ints);
extern void amifb_init(void);
extern void amifb_setup(char *options, int *ints);
extern void cyberfb_setup(char *options, int *ints);
extern void pm2fb_init(void);
extern void pm2fb_setup(char *options, int *ints);
+extern void cyber2000fb_init(void);
+extern void cyber2000fb_setup(char *options, int *ints);
extern void retz3fb_init(void);
extern void retz3fb_setup(char *options, int *ints);
extern void clgenfb_init(void);
#ifdef CONFIG_FB_CYBER
{ "cyber", cyberfb_init, cyberfb_setup },
#endif
+#ifdef CONFIG_FB_CYBER2000
+ { "cyber2000", cyber2000fb_init, cyber2000fb_setup },
+#endif
#ifdef CONFIG_FB_PM2
{ "pm2fb", pm2fb_init, pm2fb_setup },
#endif
#elif defined(__mips__)
pgprot_val(vma->vm_page_prot) &= ~_CACHE_MASK;
pgprot_val(vma->vm_page_prot) |= _CACHE_UNCACHED;
+#elif defined(__arm__)
+#if defined(CONFIG_CPU_32) && !defined(CONFIG_ARCH_ACORN)
+ /* On Acorn architectures, we want to keep the framebuffer
+ * cached.
+ */
+ pgprot_val(vma->vm_page_prot) &= ~(PTE_CACHEABLE | PTE_BUFFERABLE);
+#endif
#else
#warning What do we have to do here??
#endif
__initfunc(static const char *vgacon_startup(void))
{
const char *display_desc = NULL;
- u16 saved;
+ u16 saved1, saved2;
u16 *p;
if (ORIG_VIDEO_ISVGA == VIDEO_TYPE_VLFB) {
* Are there smarter methods around?
*/
p = (u16 *)vga_vram_base;
- saved = scr_readw(p);
+ saved1 = scr_readw(p);
+ saved2 = scr_readw(p + 1);
scr_writew(0xAA55, p);
- if (scr_readw(p) != 0xAA55) {
- scr_writew(saved, p);
+ scr_writew(0x55AA, p + 1);
+ if (scr_readw(p) != 0xAA55 || scr_readw(p + 1) != 0x55AA) {
+ scr_writew(saved1, p);
+ scr_writew(saved2, p + 1);
goto no_vga;
}
scr_writew(0x55AA, p);
- if (scr_readw(p) != 0x55AA) {
- scr_writew(saved, p);
+ scr_writew(0xAA55, p + 1);
+ if (scr_readw(p) != 0x55AA || scr_readw(p + 1) != 0xAA55) {
+ scr_writew(saved1, p);
+ scr_writew(saved2, p + 1);
goto no_vga;
}
- scr_writew(saved, p);
+ scr_writew(saved1, p);
+ scr_writew(saved2, p + 1);
if (vga_video_type == VIDEO_TYPE_EGAC
|| vga_video_type == VIDEO_TYPE_VGAC
return val;
}
+static unsigned int adfs_filetype (unsigned int load)
+{
+ if ((load & 0xfff00000) != 0xfff00000)
+ return (unsigned int) -1;
+ return (load >> 8) & 0xfff;
+}
+
static unsigned int adfs_time (unsigned int load, unsigned int exec)
{
unsigned int high, low;
- high = ((load << 24) | (exec >> 8)) - 0x336e996a;
+ /* Check for unstamped files. */
+ if ((load & 0xfff00000) != 0xfff00000)
+ return 0;
+
+ high = ((load << 24) | (exec >> 8));
low = exec & 255;
+ /* Files dated pre 1970. */
+ if (high < 0x336e996a)
+ return 0;
+
+ high -= 0x336e996a;
+
+ /* Files dated post 2038 ish. */
+ if (high > 0x31ffffff)
+ return 0x7fffffff;
+
/* 65537 = h256,l1
* (h256 % 100) = 56 h256 / 100 = 2
* 56 << 8 = 14336 2 * 256 = 512
brelse (bhp[i]);
}
+/* convert a disk-based directory entry to a Linux ADFS directory entry */
+static inline void
+adfs_dirent_to_idirent(struct adfs_idir_entry *ide, struct adfs_direntry *de)
+{
+ ide->name_len = adfs_readname(ide->name, de->dirobname, ADFS_NAME_LEN);
+ ide->file_id = adfs_val(de->dirinddiscadd, 3);
+ ide->size = adfs_val(de->dirlen, 4);
+ ide->mode = de->newdiratts;
+ ide->mtime = adfs_time(adfs_val(de->dirload, 4), adfs_val(de->direxec, 4));
+ ide->filetype = adfs_filetype(adfs_val(de->dirload, 4));
+}
+
int adfs_dir_get (struct super_block *sb, struct buffer_head **bhp,
int buffers, int pos, unsigned long parent_object_id,
struct adfs_idir_entry *ide)
if (!de.dirobname[0])
return 0;
- ide->name_len = adfs_readname (ide->name, de.dirobname, ADFS_NAME_LEN);
ide->inode_no = adfs_inode_generate (parent_object_id, pos);
- ide->file_id = adfs_val (de.dirinddiscadd, 3);
- ide->size = adfs_val (de.dirlen, 4);
- ide->mode = de.newdiratts;
- ide->mtime = adfs_time (adfs_val (de.dirload, 4), adfs_val (de.direxec, 4));
- ide->filetype = (adfs_val (de.dirload, 4) >> 8) & 0xfff;
+ adfs_dirent_to_idirent(ide, &de);
return 1;
}
if (!de.dirobname[0])
return 0;
- ide->name_len = adfs_readname (ide->name, de.dirobname, ADFS_NAME_LEN);
- ide->size = adfs_val (de.dirlen, 4);
- ide->mode = de.newdiratts;
- ide->file_id = adfs_val (de.dirinddiscadd, 3);
- ide->mtime = adfs_time (adfs_val (de.dirload, 4), adfs_val (de.direxec, 4));
- ide->filetype = (adfs_val (de.dirload, 4) >> 8) & 0xfff;
+ adfs_dirent_to_idirent(ide, &de);
return 1;
}
};
struct autofs_wait_queue {
- struct wait_queue *queue;
+ wait_queue_head_t queue;
struct autofs_wait_queue *next;
autofs_wqt_t wait_queue_token;
/* We use the following to see what we are waiting for */
return -ENOMEM;
}
wq->wait_queue_token = autofs_next_wait_queue++;
- init_waitqueue(&wq->queue);
+ init_waitqueue_head(&wq->queue);
wq->hash = name->hash;
wq->len = name->len;
wq->status = -EINTR; /* Status return if interrupted */
static struct buffer_head * unused_list = NULL;
static struct buffer_head * reuse_list = NULL;
-static struct wait_queue * buffer_wait = NULL;
+static DECLARE_WAIT_QUEUE_HEAD(buffer_wait);
static int nr_buffers = 0;
static int nr_buffers_type[NR_LIST] = {0,};
void __wait_on_buffer(struct buffer_head * bh)
{
struct task_struct *tsk = current;
- struct wait_queue wait;
+ DECLARE_WAITQUEUE(wait, tsk);
bh->b_count++;
- wait.task = tsk;
add_wait_queue(&bh->b_wait, &wait);
repeat:
tsk->state = TASK_UNINTERRUPTIBLE;
}
memset(bh,0,sizeof(*bh));
+ init_waitqueue_head(&bh->b_wait);
nr_unused_buffer_heads++;
bh->b_next_free = unused_list;
unused_list = bh;
*/
if((bh = kmem_cache_alloc(bh_cachep, SLAB_BUFFER)) != NULL) {
memset(bh, 0, sizeof(*bh));
+ init_waitqueue_head(&bh->b_wait);
nr_buffer_heads++;
return bh;
}
if(!async &&
(bh = kmem_cache_alloc(bh_cachep, SLAB_KERNEL)) != NULL) {
memset(bh, 0, sizeof(*bh));
+ init_waitqueue_head(&bh->b_wait);
nr_buffer_heads++;
return bh;
}
static struct buffer_head * create_buffers(unsigned long page,
unsigned long size, int async)
{
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
struct buffer_head *bh, *head;
long offset;
* response to dirty buffers. Once this process is activated, we write back
* a limited number of buffers to the disks and then go back to sleep again.
*/
-static struct wait_queue * bdflush_done = NULL;
+static DECLARE_WAIT_QUEUE_HEAD(bdflush_done);
struct task_struct *bdflush_tsk = 0;
void wakeup_bdflush(int wait)
static int dquot_updating[NR_DQHASH];
static struct dqstats dqstats;
-static struct wait_queue *dquot_wait = (struct wait_queue *)NULL,
- *update_wait = (struct wait_queue *)NULL;
+static DECLARE_WAIT_QUEUE_HEAD(dquot_wait);
+static DECLARE_WAIT_QUEUE_HEAD(update_wait);
static inline char is_enabled(struct vfsmount *vfsmnt, short type)
{
unlock_super (sb);
}
-/*
- * This function increments the inode version number
- *
- * This may be used one day by the NFS server
- */
-static void inc_inode_version (struct inode * inode,
- struct ext2_group_desc *gdp,
- int mode)
-{
- inode->u.ext2_i.i_version++;
- mark_inode_dirty(inode);
-
- return;
-}
-
/*
* There are two policies for allocating an inode. If the new inode is
* a directory, then a forward search is made for a block group with both
if (inode->u.ext2_i.i_flags & EXT2_SYNC_FL)
inode->i_flags |= MS_SYNCHRONOUS;
insert_inode_hash(inode);
+ inode->i_generation++;
mark_inode_dirty(inode);
- inc_inode_version (inode, gdp, mode);
unlock_super (sb);
if(DQUOT_ALLOC_INODE(sb, inode)) {
<< 32;
#endif
}
- inode->u.ext2_i.i_version = le32_to_cpu(raw_inode->i_version);
+ inode->i_generation = le32_to_cpu(raw_inode->i_generation);
inode->u.ext2_i.i_block_group = block_group;
inode->u.ext2_i.i_next_alloc_block = 0;
inode->u.ext2_i.i_next_alloc_goal = 0;
raw_inode->i_size_high = cpu_to_le32(inode->i_size >> 32);
#endif
}
- raw_inode->i_version = cpu_to_le32(inode->u.ext2_i.i_version);
+ raw_inode->i_generation = cpu_to_le32(inode->i_generation);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
raw_inode->i_block[0] = cpu_to_le32(kdev_t_to_nr(inode->i_rdev));
else if (S_ISLNK(inode->i_mode) && !inode->i_blocks)
mark_inode_dirty(inode);
return 0;
case EXT2_IOC_GETVERSION:
- return put_user(inode->u.ext2_i.i_version, (int *) arg);
+ return put_user(inode->i_generation, (int *) arg);
case EXT2_IOC_SETVERSION:
if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
return -EPERM;
if (IS_RDONLY(inode))
return -EROFS;
- if (get_user(inode->u.ext2_i.i_version, (int *) arg))
+ if (get_user(inode->i_generation, (int *) arg))
return -EFAULT;
inode->i_ctime = CURRENT_TIME;
mark_inode_dirty(inode);
PIPE_BASE(*inode) = NULL;
PIPE_START(*inode) = PIPE_LEN(*inode) = 0;
PIPE_RD_OPENERS(*inode) = PIPE_WR_OPENERS(*inode) = 0;
- PIPE_WAIT(*inode) = NULL;
+ init_waitqueue_head(&PIPE_WAIT(*inode));
PIPE_READERS(*inode) = PIPE_WRITERS(*inode) = 0;
}
static void __wait_on_inode(struct inode * inode)
{
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
add_wait_queue(&inode->i_wait, &wait);
repeat:
static inline void init_once(struct inode * inode)
{
memset(inode, 0, sizeof(*inode));
- init_waitqueue(&inode->i_wait);
+ init_waitqueue_head(&inode->i_wait);
INIT_LIST_HEAD(&inode->i_hash);
INIT_LIST_HEAD(&inode->i_dentry);
sema_init(&inode->i_sem, 1);
*/
mm_segment_t old_fs=get_fs();
inode_fake.i_rdev=dev;
+ init_waitqueue_head(&inode_fake.i_wait);
ms_info.addr_format=CDROM_LBA;
set_fs(KERNEL_DS);
i=get_blkfops(MAJOR(dev))->ioctl(&inode_fake,
*/
struct nlm_wait {
struct nlm_wait * b_next; /* linked list */
- struct wait_queue * b_wait; /* where to wait on */
+ wait_queue_head_t b_wait; /* where to wait on */
struct nlm_host * b_host;
struct file_lock * b_lock; /* local file lock */
unsigned short b_reclaim; /* got to reclaim lock */
block.b_host = host;
block.b_lock = fl;
- block.b_wait = NULL;
+ init_waitqueue_head(&block.b_wait);
block.b_status = NLM_LCK_BLOCKED;
block.b_next = nlm_blocked;
nlm_blocked = █
static struct nlm_host * nlm_hosts[NLM_HOST_NRHASH];
static unsigned long next_gc = 0;
static int nrhosts = 0;
-static struct semaphore nlm_host_sema = MUTEX;
+static DECLARE_MUTEX(nlm_host_sema);
static void nlm_gc_hosts(void);
host->h_proto = proto;
host->h_authflavor = RPC_AUTH_UNIX;
host->h_rpcclnt = NULL;
- host->h_sema = MUTEX;
+ init_MUTEX(&host->h_sema);
host->h_nextrebind = jiffies + NLM_HOST_REBIND;
host->h_expires = jiffies + NLM_HOST_EXPIRE;
host->h_count = 1;
extern struct svc_program nlmsvc_program;
struct nlmsvc_binding * nlmsvc_ops = NULL;
-static struct semaphore nlmsvc_sema = MUTEX;
+static DECLARE_MUTEX(nlmsvc_sema);
static unsigned int nlmsvc_users = 0;
static pid_t nlmsvc_pid = 0;
unsigned long nlmsvc_grace_period = 0;
unsigned long nlmsvc_timeout = 0;
-static struct semaphore lockd_start = MUTEX_LOCKED;
-static struct wait_queue * lockd_exit = NULL;
+static DECLARE_MUTEX_LOCKED(lockd_start);
+static DECLARE_WAIT_QUEUE_HEAD(lockd_exit);
/*
* Currently the following can be set only at insmod time.
init_module(void)
{
/* Init the static variables */
- nlmsvc_sema = MUTEX;
+ init_MUTEX(&nlmsvc_sema);
nlmsvc_users = 0;
nlmsvc_pid = 0;
lockd_exit = NULL;
#define FILE_NRHASH 32
#define FILE_HASH_BITS 5
static struct nlm_file * nlm_files[FILE_NRHASH];
-static struct semaphore nlm_file_sema = MUTEX;
+static DECLARE_MUTEX(nlm_file_sema);
static unsigned int file_hash(dev_t dev, ino_t ino)
{
memset(file, 0, sizeof(*file));
file->f_handle = *fh;
- file->f_sema = MUTEX;
+ init_MUTEX(&file->f_sema);
/* Open the file. Note that this must not sleep for too long, else
* we would lock up lockd:-) So no NFS re-exports, folks.
tfl.fl_flags = FL_POSIX | FL_ACCESS;
tfl.fl_owner = current->files;
tfl.fl_pid = current->pid;
+ init_waitqueue_head(&tfl.fl_wait);
tfl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
tfl.fl_start = offset;
tfl.fl_end = offset + count - 1;
memset(fl, 0, sizeof(*fl));
+ init_waitqueue_head(&fl->fl_wait);
fl->fl_flags = FL_POSIX;
switch (l->l_type) {
{
memset(fl, 0, sizeof(*fl));
+ init_waitqueue_head(&fl->fl_wait);
if (!filp->f_dentry) /* just in case */
return (0);
memset(new, 0, sizeof(*new));
new->fl_owner = fl->fl_owner;
new->fl_pid = fl->fl_pid;
+ init_waitqueue_head(&new->fl_wait);
new->fl_file = fl->fl_file;
new->fl_flags = fl->fl_flags;
new->fl_type = fl->fl_type;
unsigned int size; /* # of entries */
unsigned long age; /* last used */
unsigned long mtime; /* last attr stamp */
- struct wait_queue * wait;
+ wait_queue_head_t wait;
__u32 * entry; /* three __u32's per entry */
};
{
struct dentry *dentry = filp->f_dentry;
struct inode *inode = dentry->d_inode;
- static struct wait_queue *readdir_wait = NULL;
- struct wait_queue **waitp = NULL;
+ static DECLARE_WAIT_QUEUE_HEAD(readdir_wait);
+ wait_queue_head_t *waitp = NULL;
struct nfs_dirent *cache, *free;
unsigned long age, dead;
u32 cookie;
cache->valid = 0;
cache->dev = inode->i_dev;
cache->ino = inode->i_ino;
+ init_waitqueue_head(&cache->wait);
if (!cache->entry) {
result = -ENOMEM;
cache->entry = (__u32 *) get_free_page(GFP_KERNEL);
wreq->wb_file = file;
wreq->wb_pid = current->pid;
wreq->wb_page = page;
+ init_waitqueue_head(&wreq->wb_wait);
wreq->wb_offset = offset;
wreq->wb_bytes = bytes;
wreq->wb_count = 2; /* One for the IO, one for us */
struct dentry *dentry = file->f_dentry;
struct inode *inode = dentry->d_inode;
struct rpc_clnt *clnt = NFS_CLIENT(inode);
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
sigset_t oldmask;
int retval;
} else {
PIPE_BASE(*inode) = (char *) page;
inode->i_op = &pipe_inode_operations;
- PIPE_WAIT(*inode) = NULL;
+ init_waitqueue_head(&PIPE_WAIT(*inode));
PIPE_START(*inode) = PIPE_LEN(*inode) = 0;
PIPE_RD_OPENERS(*inode) = PIPE_WR_OPENERS(*inode) = 0;
PIPE_READERS(*inode) = PIPE_WRITERS(*inode) = 1;
}
} while (count++ < 16);
}
-#elif defined (CONFIG_ARM)
+#elif defined(__arm__)
{
unsigned long fp, lr;
unsigned long stack_page;
# define KSTK_EIP(tsk) \
(*(unsigned long *)(PT_REG(pc) + PAGE_SIZE + (unsigned long)(tsk)))
# define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->tss.usp)
-#elif defined(CONFIG_ARM)
+#elif defined(__arm__)
# define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1022])
# define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1020])
#elif defined(__mc68000__)
#include <asm/io.h>
extern unsigned long log_size;
-extern struct wait_queue * log_wait;
+extern wait_queue_head_t log_wait;
extern int do_syslog(int type, char * bug, int count);
}
}
-void __pollwait(struct file * filp, struct wait_queue ** wait_address, poll_table *p)
+void __pollwait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
{
for (;;) {
if (p->nr < __MAX_POLL_TABLE_ENTRIES) {
entry->filp = filp;
filp->f_count++;
entry->wait_address = wait_address;
- entry->wait.task = current;
- entry->wait.next = NULL;
+ init_waitqueue_entry(&entry->wait, current);
add_wait_queue(wait_address,&entry->wait);
p->nr++;
return;
* unmounting a filesystem and re-mounting it (or something
* else).
*/
-static struct semaphore mount_sem = MUTEX;
+static DECLARE_MUTEX(mount_sem);
extern void wait_for_keypress(void);
extern struct file_operations * get_blkfops(unsigned int major);
void __wait_on_super(struct super_block * sb)
{
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
add_wait_queue(&sb->s_wait, &wait);
repeat:
memset(s, 0, sizeof(struct super_block));
INIT_LIST_HEAD(&s->s_dirty);
list_add (&s->s_list, super_blocks.prev);
+ init_waitqueue_head(&s->s_wait);
}
return s;
}
* 26-Jan-1999 PJB Don't use IACK on CATS
* 16-Mar-1999 RMK Added autodetect of ISA PICs
*/
-#include <linux/config.h>
#include <asm/hardware.h>
#include <asm/dec21285.h>
#include <asm/irq.h>
#ifndef __ASM_ARCH_MMU_H
#define __ASM_ARCH_MMU_H
-#include <linux/config.h>
-
#if defined(CONFIG_HOST_FOOTBRIDGE)
/*
* Dynamic IO functions - let the compiler
* optimize the expressions
*/
-#define DECLARE_DYN_OUT(fnsuffix,instr) \
-extern __inline__ void __out##fnsuffix (unsigned int value, unsigned int port) \
-{ \
- unsigned long temp; \
- __asm__ __volatile__( \
- "tst %2, #0x80000000\n\t" \
- "mov %0, %4\n\t" \
- "addeq %0, %0, %3\n\t" \
- "str" ##instr## " %1, [%0, %2, lsl #2] @ out"###fnsuffix \
- : "=&r" (temp) \
- : "r" (value), "r" (port), "Ir" (PCIO_BASE - IO_BASE), "Ir" (IO_BASE) \
- : "cc"); \
+extern __inline__ void __outb (unsigned int value, unsigned int port)
+{
+ unsigned long temp;
+ __asm__ __volatile__(
+ "tst %2, #0x80000000\n\t"
+ "mov %0, %4\n\t"
+ "addeq %0, %0, %3\n\t"
+ "strb %1, [%0, %2, lsl #2] @ outb"
+ : "=&r" (temp)
+ : "r" (value), "r" (port), "Ir" (PCIO_BASE - IO_BASE), "Ir" (IO_BASE)
+ : "cc");
+}
+
+extern __inline__ void __outw (unsigned int value, unsigned int port)
+{
+ unsigned long temp;
+ __asm__ __volatile__(
+ "tst %2, #0x80000000\n\t"
+ "mov %0, %4\n\t"
+ "addeq %0, %0, %3\n\t"
+ "str %1, [%0, %2, lsl #2] @ outw"
+ : "=&r" (temp)
+ : "r" (value|value<<16), "r" (port), "Ir" (PCIO_BASE - IO_BASE), "Ir" (IO_BASE)
+ : "cc");
+}
+
+extern __inline__ void __outl (unsigned int value, unsigned int port)
+{
+ unsigned long temp;
+ __asm__ __volatile__(
+ "tst %2, #0x80000000\n\t"
+ "mov %0, %4\n\t"
+ "addeq %0, %0, %3\n\t"
+ "str %1, [%0, %2, lsl #2] @ outl"
+ : "=&r" (temp)
+ : "r" (value), "r" (port), "Ir" (PCIO_BASE - IO_BASE), "Ir" (IO_BASE)
+ : "cc");
}
#define DECLARE_DYN_IN(sz,fnsuffix,instr) \
}
#define DECLARE_IO(sz,fnsuffix,instr) \
- DECLARE_DYN_OUT(fnsuffix,instr) \
DECLARE_DYN_IN(sz,fnsuffix,instr)
DECLARE_IO(char,b,"b")
DECLARE_IO(long,l,"")
#undef DECLARE_IO
-#undef DECLARE_DYN_OUT
#undef DECLARE_DYN_IN
/*
#ifdef __KERNEL__
-/* forward-decare task_struct */
+/* forward-declare task_struct */
struct task_struct;
/*
#define __ASM_ARM_SEMAPHORE_H
#include <linux/linkage.h>
-#include <asm/system.h>
#include <asm/atomic.h>
struct semaphore {
#include <asm/system.h>
#include <asm/atomic.h>
#include <asm/spinlock.h>
+#include <linux/wait.h>
struct semaphore {
atomic_t count;
int waking;
- struct wait_queue * wait;
+ wait_queue_head_t wait;
+#if WAITQUEUE_DEBUG
+ int __magic;
+#endif
};
-#define MUTEX ((struct semaphore) { ATOMIC_INIT(1), 0, NULL })
-#define MUTEX_LOCKED ((struct semaphore) { ATOMIC_INIT(0), 0, NULL })
+#if WAITQUEUE_DEBUG
+# define __SEM_DEBUG_INIT(name) \
+ , (int)&(name).__magic
+#else
+# define __SEM_DEBUG_INIT(name)
+#endif
+
+#define __SEMAPHORE_INITIALIZER(name,count) \
+{ ATOMIC_INIT(count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
+ __SEM_DEBUG_INIT(name) }
+
+#define __MUTEX_INITIALIZER(name) \
+ __SEMAPHORE_INITIALIZER(name,1)
+
+#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
+ struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
+
+#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
+#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
+
+extern inline void sema_init (struct semaphore *sem, int val)
+{
+/*
+ * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
+ *
+ * i'd rather use the more flexible initialization above, but sadly
+ * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
+ */
+ atomic_set(&sem->count, val);
+ sem->waking = 0;
+ init_waitqueue_head(&sem->wait);
+#if WAITQUEUE_DEBUG
+ sem->__magic = (int)&sem->__magic;
+#endif
+}
+
+static inline void init_MUTEX (struct semaphore *sem)
+{
+ sema_init(sem, 1);
+}
+
+static inline void init_MUTEX_LOCKED (struct semaphore *sem)
+{
+ sema_init(sem, 0);
+}
asmlinkage void __down_failed(void /* special register calling convention */);
asmlinkage int __down_failed_interruptible(void /* params in registers */);
extern spinlock_t semaphore_wake_lock;
-#define sema_init(sem, val) atomic_set(&((sem)->count), (val))
-
/*
* This is ugly, but we want the default case to fall through.
* "down_failed" is a special asm handler that calls the C
*/
extern inline void down(struct semaphore * sem)
{
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
__asm__ __volatile__(
"# atomic down operation\n\t"
#ifdef __SMP__
{
int result;
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
__asm__ __volatile__(
"# atomic interruptible down operation\n\t"
#ifdef __SMP__
{
int result;
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
__asm__ __volatile__(
"# atomic interruptible down operation\n\t"
#ifdef __SMP__
*/
extern inline void up(struct semaphore * sem)
{
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
__asm__ __volatile__(
"# atomic up operation\n\t"
#ifdef __SMP__
extern struct sec_size * blk_sec[MAX_BLKDEV];
extern struct blk_dev_struct blk_dev[MAX_BLKDEV];
-extern struct wait_queue * wait_for_request;
+extern wait_queue_head_t wait_for_request;
extern void resetup_one_dev(struct gendisk *dev, int drive);
extern void unplug_device(void * data);
extern void make_request(int major,int rw, struct buffer_head * bh);
} masix1;
} osd1; /* OS dependent 1 */
__u32 i_block[EXT2_N_BLOCKS];/* Pointers to blocks */
- __u32 i_version; /* File version (for NFS) */
+ __u32 i_generation; /* File version (for NFS) */
__u32 i_file_acl; /* File ACL */
__u32 i_dir_acl; /* Directory ACL */
__u32 i_faddr; /* Fragment address */
__u32 i_file_acl;
__u32 i_dir_acl;
__u32 i_dtime;
- __u32 i_version;
+ __u32 not_used_1; /* FIX: not used/ 2.2 placeholder */
__u32 i_block_group;
__u32 i_next_alloc_block;
__u32 i_next_alloc_goal;
unsigned int b_list; /* List that this buffer appears */
unsigned long b_flushtime; /* Time when this (dirty) buffer
* should be written */
- struct wait_queue * b_wait;
+ wait_queue_head_t b_wait;
struct buffer_head ** b_pprev; /* doubly linked list of hash-queue */
struct buffer_head * b_prev_free; /* doubly linked list of buffers */
struct buffer_head * b_reqnext; /* request queue */
struct semaphore i_atomic_write;
struct inode_operations *i_op;
struct super_block *i_sb;
- struct wait_queue *i_wait;
+ wait_queue_head_t i_wait;
struct file_lock *i_flock;
struct vm_area_struct *i_mmap;
struct page *i_pages;
struct file_lock *fl_prevblock;
fl_owner_t fl_owner;
unsigned int fl_pid;
- struct wait_queue *fl_wait;
+ wait_queue_head_t fl_wait;
struct file *fl_file;
unsigned char fl_flags;
unsigned char fl_type;
unsigned long s_magic;
unsigned long s_time;
struct dentry *s_root;
- struct wait_queue *s_wait;
+ wait_queue_head_t s_wait;
struct inode *s_ibasket;
short int s_ibasket_count;
/*
* hfs_wait_queue
*/
-typedef struct wait_queue *hfs_wait_queue;
+typedef wait_queue_head_t hfs_wait_queue;
extern inline void hfs_init_waitqueue(hfs_wait_queue *queue) {
- init_waitqueue(queue);
+ init_waitqueue_head(queue);
}
extern inline void hfs_sleep_on(hfs_wait_queue *queue) {
#ifndef __LINUX_KEYBOARD_H
#define __LINUX_KEYBOARD_H
+#include <linux/wait.h>
+
#define KG_SHIFT 0
#define KG_CTRL 2
#define KG_ALT 3
extern const int max_vals[];
extern unsigned short *key_maps[MAX_NR_KEYMAPS];
extern unsigned short plain_map[NR_KEYS];
-extern struct wait_queue * keypress_wait;
+extern wait_queue_head_t keypress_wait;
extern unsigned char keyboard_type;
#endif
unsigned short h_reclaiming : 1,
h_inuse : 1,
h_monitored : 1;
- struct wait_queue * h_gracewait; /* wait while reclaiming */
+ wait_queue_head_t h_gracewait; /* wait while reclaiming */
u32 h_state; /* pseudo-state counter */
u32 h_nsmstate; /* true remote NSM state */
unsigned int h_count; /* reference count */
struct md_thread {
void (*run) (void *data);
void *data;
- struct wait_queue *wqueue;
+ wait_queue_head_t wqueue;
unsigned long flags;
struct semaphore *sem;
struct task_struct *tsk;
struct page *next_hash;
atomic_t count;
unsigned long flags; /* atomic flags, some possibly updated asynchronously */
- struct wait_queue *wait;
+ wait_queue_head_t wait;
struct page **pprev_hash;
struct buffer_head * buffers;
} mem_map_t;
__kernel_time_t msg_stime; /* last msgsnd time */
__kernel_time_t msg_rtime; /* last msgrcv time */
__kernel_time_t msg_ctime; /* last change time */
- struct wait_queue *wwait;
- struct wait_queue *rwait;
+ wait_queue_head_t wwait;
+ wait_queue_head_t rwait;
unsigned short msg_cbytes; /* current number of bytes on queue */
unsigned short msg_qnum; /* number of messages in queue */
unsigned short msg_qbytes; /* max number of bytes on queue */
#define _LINUX_NET_H
#include <linux/socket.h>
+#include <linux/wait.h>
struct poll_table_struct;
struct fasync_struct *fasync_list; /* Asynchronous wake up list */
struct file *file; /* File back pointer for gc */
struct sock *sk;
- struct wait_queue *wait;
+ wait_queue_head_t wait;
short type;
unsigned char passcred;
struct rpc_task wb_task; /* RPC task */
struct file * wb_file; /* dentry referenced */
struct page * wb_page; /* page to be written */
- struct wait_queue * wb_wait; /* wait for completion */
+ wait_queue_head_t wb_wait; /* wait for completion */
unsigned int wb_offset; /* offset within page */
unsigned int wb_bytes; /* dirty range */
unsigned int wb_count; /* user count */
__u32 fb_dev; /* our device */
__u32 fb_xdev;
__u32 fb_xino;
+ __u32 fb_generation;
};
#define NFS_FH_PADDING (NFS_FHSIZE - sizeof(struct nfs_fhbase))
#define fh_dev fh_base.fb_dev
#define fh_xdev fh_base.fb_xdev
#define fh_xino fh_base.fb_xino
+#define fh_generation fh_base.fb_generation
#ifdef __KERNEL__
struct nfsiod_req {
struct nfsiod_req * rq_next;
struct nfsiod_req * rq_prev;
- struct wait_queue * rq_wait;
+ wait_queue_head_t rq_wait;
struct rpc_ioreq rq_rpcreq;
nfsiod_callback_t rq_callback;
struct nfs_server * rq_server;
#define _LINUX_PIPE_FS_I_H
struct pipe_inode_info {
- struct wait_queue * wait;
+ wait_queue_head_t wait;
char * base;
unsigned int start;
unsigned int lock;
struct poll_table_entry {
struct file * filp;
- struct wait_queue wait;
- struct wait_queue ** wait_address;
+ wait_queue_t wait;
+ wait_queue_head_t * wait_address;
};
typedef struct poll_table_struct {
#define __MAX_POLL_TABLE_ENTRIES ((PAGE_SIZE - sizeof (poll_table)) / sizeof (struct poll_table_entry))
-extern void __pollwait(struct file * filp, struct wait_queue ** wait_address, poll_table *p);
+extern void __pollwait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p);
-extern inline void poll_wait(struct file * filp, struct wait_queue ** wait_address, poll_table *p)
+extern inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
{
if (p && wait_address)
__pollwait(filp, wait_address, p);
#ifdef __KERNEL__
extern atomic_t rtnl_rlockct;
-extern struct wait_queue *rtnl_wait;
+extern wait_queue_head_t rtnl_wait;
extern __inline__ int rtattr_strcmp(struct rtattr *rta, char *str)
{
#define TASK_ZOMBIE 4
#define TASK_STOPPED 8
#define TASK_SWAPPING 16
+/*
+ * 'exclusive' tasks are the ones that expect 'wake-one' behavior
+ * on __wake_up(). They are special because __wake_up() removes
+ * them from the waitqueue immediately, this way we have O(1) addition,
+ * scheduling and removal from waitqueues, no matter how long they are.
+ */
+#define TASK_EXCLUSIVE 32
/*
* Scheduling policies
void * segments;
};
-#define INIT_MM { \
+#define INIT_MM(name) { \
&init_mmap, NULL, NULL, \
swapper_pg_dir, \
ATOMIC_INIT(1), 1, \
- MUTEX, \
+ __MUTEX_INITIALIZER(name.mmap_sem), \
0, \
0, 0, 0, 0, \
0, 0, 0, \
/* Pointer to task[] array linkage. */
struct task_struct **tarray_ptr;
- struct wait_queue *wait_chldexit; /* for wait4() */
+ wait_queue_head_t wait_chldexit; /* for wait4() */
struct semaphore *vfork_sem; /* for vfork() */
unsigned long policy, rt_priority;
unsigned long it_real_value, it_prof_value, it_virt_value;
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
*/
-#define INIT_TASK \
+#define INIT_TASK(name) \
/* state etc */ { 0,0,0,KERNEL_DS,&default_exec_domain,0, \
/* counter */ DEF_PRIORITY,DEF_PRIORITY,0, \
/* SMP */ 0,0,0,-1, \
/* proc links*/ &init_task,&init_task,NULL,NULL,NULL, \
/* pidhash */ NULL, NULL, \
/* tarray */ &task[0], \
-/* chld wait */ NULL, NULL, \
+/* chld wait */ __WAIT_QUEUE_HEAD_INITIALIZER(name.wait_chldexit), NULL, \
/* timeout */ SCHED_OTHER,0,0,0,0,0,0,0, \
/* timer */ { NULL, NULL, 0, 0, it_real_fn }, \
/* utime */ {0,0,0,0},0, \
#define CURRENT_TIME (xtime.tv_sec)
-extern void FASTCALL(__wake_up(struct wait_queue ** p, unsigned int mode));
-extern void FASTCALL(sleep_on(struct wait_queue ** p));
-extern long FASTCALL(sleep_on_timeout(struct wait_queue ** p,
+extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode));
+extern void FASTCALL(sleep_on(wait_queue_head_t *q));
+extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q,
signed long timeout));
-extern void FASTCALL(interruptible_sleep_on(struct wait_queue ** p));
-extern long FASTCALL(interruptible_sleep_on_timeout(struct wait_queue ** p,
+extern void FASTCALL(interruptible_sleep_on(wait_queue_head_t *q));
+extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q,
signed long timeout));
extern void FASTCALL(wake_up_process(struct task_struct * tsk));
extern int do_execve(char *, char **, char **, struct pt_regs *);
extern int do_fork(unsigned long, unsigned long, struct pt_regs *);
-/*
- * The wait-queues are circular lists, and you have to be *very* sure
- * to keep them correct. Use only these two functions to add/remove
- * entries in the queues.
- */
-extern inline void __add_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
-{
- wait->next = *p ? : WAIT_QUEUE_HEAD(p);
- *p = wait;
-}
-
-extern rwlock_t waitqueue_lock;
-
-extern inline void add_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
+extern inline void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
{
unsigned long flags;
- write_lock_irqsave(&waitqueue_lock, flags);
- __add_wait_queue(p, wait);
- write_unlock_irqrestore(&waitqueue_lock, flags);
+ wq_write_lock_irqsave(&q->lock, flags);
+ __add_wait_queue(q, wait);
+ wq_write_unlock_irqrestore(&q->lock, flags);
}
-extern inline void __remove_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
+extern inline void add_wait_queue_exclusive(wait_queue_head_t *q,
+ wait_queue_t * wait)
{
- struct wait_queue * next = wait->next;
- struct wait_queue * head = next;
- struct wait_queue * tmp;
+ unsigned long flags;
- while ((tmp = head->next) != wait) {
- head = tmp;
- }
- head->next = next;
+ wq_write_lock_irqsave(&q->lock, flags);
+ __add_wait_queue_tail(q, wait);
+ wq_write_unlock_irqrestore(&q->lock, flags);
}
-extern inline void remove_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
+extern inline void remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
{
unsigned long flags;
- write_lock_irqsave(&waitqueue_lock, flags);
- __remove_wait_queue(p, wait);
- write_unlock_irqrestore(&waitqueue_lock, flags);
+ wq_write_lock_irqsave(&q->lock, flags);
+ __remove_wait_queue(q, wait);
+ wq_write_unlock_irqrestore(&q->lock, flags);
}
#define __wait_event(wq, condition) \
do { \
- struct wait_queue __wait; \
+ wait_queue_t __wait; \
+ init_waitqueue_entry(&__wait, current); \
\
- __wait.task = current; \
add_wait_queue(&wq, &__wait); \
for (;;) { \
current->state = TASK_UNINTERRUPTIBLE; \
#define __wait_event_interruptible(wq, condition, ret) \
do { \
- struct wait_queue __wait; \
+ wait_queue_t __wait; \
+ init_waitqueue_entry(&__wait, current); \
\
- __wait.task = current; \
add_wait_queue(&wq, &__wait); \
for (;;) { \
current->state = TASK_INTERRUPTIBLE; \
struct sem_queue {
struct sem_queue * next; /* next entry in the queue */
struct sem_queue ** prev; /* previous entry in the queue, *(q->prev) == q */
- struct wait_queue * sleeper; /* sleeping process */
+ wait_queue_head_t sleeper; /* sleeping process */
struct sem_undo * undo; /* undo structure */
int pid; /* process id of requesting process */
int status; /* completion status of operation */
int xmit_tail;
int xmit_cnt;
struct tq_struct tqueue;
- struct wait_queue *open_wait;
- struct wait_queue *close_wait;
- struct wait_queue *delta_msr_wait;
+ wait_queue_head_t open_wait;
+ wait_queue_head_t close_wait;
+ wait_queue_head_t delta_msr_wait;
struct async_struct *next_port; /* For the linked list */
struct async_struct *prev_port;
};
#include <linux/timer.h>
#include <linux/tqueue.h>
#include <linux/sunrpc/types.h>
+#include <linux/wait.h>
/*
* Define this if you want to test the fast scheduler for async calls.
* you have a pathological interest in kernel oopses.
*/
struct timer_list tk_timer; /* kernel timer */
- struct wait_queue * tk_wait; /* sync: sleep on this q */
+ wait_queue_head_t tk_wait; /* sync: sleep on this q */
unsigned long tk_timeout; /* timeout for rpc_sleep() */
unsigned short tk_flags; /* misc flags */
#ifdef RPC_DEBUG
struct svc_client * rq_client; /* RPC peer info */
struct svc_cacherep * rq_cacherep; /* cache info */
- struct wait_queue * rq_wait; /* synchronozation */
+ wait_queue_head_t rq_wait; /* synchronozation */
};
/*
struct tty_flip_buffer flip;
int max_flip_cnt;
int alt_speed; /* For magic substitution of 38400 bps */
- struct wait_queue *write_wait;
- struct wait_queue *read_wait;
+ wait_queue_head_t write_wait;
+ wait_queue_head_t read_wait;
struct tq_struct tq_hangup;
void *disc_data;
void *driver_data;
struct vt_mode vt_mode;
int vt_pid;
int vt_newvt;
- struct wait_queue *paste_wait;
+ wait_queue_head_t paste_wait;
} *vt_cons[MAX_NR_CONSOLES];
void (*kd_mksound)(unsigned int hz, unsigned int ticks);
#ifdef __KERNEL__
#include <asm/page.h>
+#include <asm/spinlock.h>
+#include <linux/list.h>
+#include <linux/stddef.h>
-struct wait_queue {
+/*
+ * Temporary debugging help until all code is converted to the new
+ * waitqueue usage.
+ */
+#define WAITQUEUE_DEBUG 1
+
+#if WAITQUEUE_DEBUG
+extern int printk(const char *fmt, ...);
+#define WQ_BUG() do { \
+ printk("wq bug, forcing oops.\n"); \
+ *(int*)0 = 0; \
+} while (0)
+
+#define CHECK_MAGIC(x) if (x != (int)&(x)) \
+ { printk("bad magic %08x (should be %08x), ", x, (int)&(x)); WQ_BUG(); }
+
+#define CHECK_MAGIC_WQHEAD(x) do { \
+ if (x->__magic != (int)&(x->__magic)) { \
+ printk("bad magic %08x (should be %08x, creator %08x), ", \
+ x->__magic, (int)&(x->__magic), x->__creator); \
+ WQ_BUG(); \
+ } \
+} while (0)
+#endif
+
+struct __wait_queue {
+ unsigned int compiler_warning;
struct task_struct * task;
- struct wait_queue * next;
+ struct list_head task_list;
+#if WAITQUEUE_DEBUG
+ int __magic;
+ int __waker;
+#endif
+};
+typedef struct __wait_queue wait_queue_t;
+
+/*
+ * 'dual' spinlock architecture. Can be switched between spinlock_t and
+ * rwlock_t locks via changing this define. Since waitqueues are quite
+ * decoupled in the new architecture, lightweight 'simple' spinlocks give
+ * us slightly better latencies and smaller waitqueue structure size.
+ */
+#define USE_RW_WAIT_QUEUE_SPINLOCK 0
+
+#if USE_RW_WAIT_QUEUE_SPINLOCK
+# define wq_lock_t rwlock_t
+# define WAITQUEUE_RW_LOCK_UNLOCKED RW_LOCK_UNLOCKED
+
+# define wq_read_lock read_lock
+# define wq_read_lock_irqsave read_lock_irqsave
+# define wq_read_unlock_irqrestore read_unlock_irqrestore
+# define wq_read_unlock read_unlock
+# define wq_write_lock_irq write_lock_irq
+# define wq_write_lock_irqsave write_lock_irqsave
+# define wq_write_unlock_irqrestore write_unlock_irqrestore
+# define wq_write_unlock write_unlock
+#else
+# define wq_lock_t spinlock_t
+# define WAITQUEUE_RW_LOCK_UNLOCKED SPIN_LOCK_UNLOCKED
+
+# define wq_read_lock spin_lock
+# define wq_read_lock_irqsave spin_lock_irqsave
+# define wq_read_unlock spin_unlock
+# define wq_read_unlock_irqrestore spin_unlock_irqrestore
+# define wq_write_lock_irq spin_lock_irq
+# define wq_write_lock_irqsave spin_lock_irqsave
+# define wq_write_unlock_irqrestore spin_unlock_irqrestore
+# define wq_write_unlock spin_unlock
+#endif
+
+struct __wait_queue_head {
+ wq_lock_t lock;
+ struct list_head task_list;
+#if WAITQUEUE_DEBUG
+ int __magic;
+ int __creator;
+#endif
};
+typedef struct __wait_queue_head wait_queue_head_t;
+
+#if WAITQUEUE_DEBUG
+# define __WAITQUEUE_DEBUG_INIT(name) \
+ , (int)&(name).__magic, 0
+# define __WAITQUEUE_HEAD_DEBUG_INIT(name) \
+ , (int)&(name).__magic, (int)&(name).__magic
+#else
+# define __WAITQUEUE_DEBUG_INIT(name)
+# define __WAITQUEUE_HEAD_DEBUG_INIT(name)
+#endif
+
+#define __WAITQUEUE_INITIALIZER(name,task) \
+ { 0x1234567, task, { NULL, NULL } __WAITQUEUE_DEBUG_INIT(name)}
+#define DECLARE_WAITQUEUE(name,task) \
+ wait_queue_t name = __WAITQUEUE_INITIALIZER(name,task)
+
+#define __WAIT_QUEUE_HEAD_INITIALIZER(name) \
+{ WAITQUEUE_RW_LOCK_UNLOCKED, { &(name).task_list, &(name).task_list } \
+ __WAITQUEUE_HEAD_DEBUG_INIT(name)}
-#define WAIT_QUEUE_HEAD(x) ((struct wait_queue *)((x)-1))
+#define DECLARE_WAIT_QUEUE_HEAD(name) \
+ wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
-static inline void init_waitqueue(struct wait_queue **q)
+static inline void init_waitqueue_head(wait_queue_head_t *q)
{
- *q = WAIT_QUEUE_HEAD(q);
+#if WAITQUEUE_DEBUG
+ __label__ __x;
+ if (!q)
+ WQ_BUG();
+#endif
+ q->lock = WAITQUEUE_RW_LOCK_UNLOCKED;
+ INIT_LIST_HEAD(&q->task_list);
+#if WAITQUEUE_DEBUG
+ q->__magic = (int)&q->__magic;
+ __x: q->__creator = (int)&&__x;
+#endif
}
-static inline int waitqueue_active(struct wait_queue **q)
+static inline void init_waitqueue_entry(wait_queue_t *q,
+ struct task_struct *p)
{
- struct wait_queue *head = *q;
- return head && head != WAIT_QUEUE_HEAD(q);
+#if WAITQUEUE_DEBUG
+ if (!q || !p)
+ WQ_BUG();
+#endif
+ q->task = p;
+#if WAITQUEUE_DEBUG
+ q->__magic = (int)&q->__magic;
+#endif
+}
+
+static inline int waitqueue_active(wait_queue_head_t *q)
+{
+#if WAITQUEUE_DEBUG
+ if (!q)
+ WQ_BUG();
+ CHECK_MAGIC_WQHEAD(q);
+#endif
+
+ return !list_empty(&q->task_list);
+}
+
+extern inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
+{
+#if WAITQUEUE_DEBUG
+ if (!head || !new)
+ WQ_BUG();
+ CHECK_MAGIC_WQHEAD(head);
+ CHECK_MAGIC(new->__magic);
+ if (!head->task_list.next || !head->task_list.prev)
+ WQ_BUG();
+#endif
+ list_add(&new->task_list, &head->task_list);
+}
+
+/*
+ * Used for wake-one threads:
+ */
+extern inline void __add_wait_queue_tail(wait_queue_head_t *head,
+ wait_queue_t *new)
+{
+#if WAITQUEUE_DEBUG
+ if (!head || !new)
+ WQ_BUG();
+ CHECK_MAGIC_WQHEAD(head);
+ CHECK_MAGIC(new->__magic);
+ if (!head->task_list.next || !head->task_list.prev)
+ WQ_BUG();
+#endif
+ list_add(&new->task_list, head->task_list.prev);
+}
+
+extern inline void __remove_wait_queue(wait_queue_head_t *head,
+ wait_queue_t *old)
+{
+#if WAITQUEUE_DEBUG
+ if (!old)
+ WQ_BUG();
+ CHECK_MAGIC(old->__magic);
+#endif
+ list_del(&old->task_list);
}
#endif /* __KERNEL__ */
atomic_t sock_readers; /* User count */
int rcvbuf; /* Size of receive buffer in bytes */
- struct wait_queue **sleep; /* Sock wait queue */
+ wait_queue_head_t *sleep; /* Sock wait queue */
struct dst_entry *dst_cache; /* Destination cache */
atomic_t rmem_alloc; /* Receive queue bytes committed */
struct sk_buff_head receive_queue; /* Incoming packets */
static unsigned short msg_seq = 0;
static int used_queues = 0;
static int max_msqid = 0;
-static struct wait_queue *msg_lock = NULL;
+static DECLARE_WAIT_QUEUE_HEAD(msg_lock);
void __init msg_init (void)
{
for (id = 0; id < MSGMNI; id++)
msgque[id] = (struct msqid_ds *) IPC_UNUSED;
msgbytes = msghdrs = msg_seq = max_msqid = used_queues = 0;
- msg_lock = NULL;
+ init_waitqueue_head(&msg_lock);
return;
}
ipcp->gid = ipcp->cgid = current->egid;
msq->msg_perm.seq = msg_seq;
msq->msg_first = msq->msg_last = NULL;
- msq->rwait = msq->wwait = NULL;
+ init_waitqueue_head(&msq->wwait);
+ init_waitqueue_head(&msq->rwait);
msq->msg_cbytes = msq->msg_qnum = 0;
msq->msg_lspid = msq->msg_lrpid = 0;
msq->msg_stime = msq->msg_rtime = 0;
static struct semid_ds *semary[SEMMNI];
static int used_sems = 0, used_semids = 0;
-static struct wait_queue *sem_lock = NULL;
+static DECLARE_WAIT_QUEUE_HEAD(sem_lock);
static int max_semid = 0;
static unsigned short sem_seq = 0;
{
int i;
- sem_lock = NULL;
+ init_waitqueue_head(&sem_lock);
used_sems = used_semids = max_semid = sem_seq = 0;
for (i = 0; i < SEMMNI; i++)
semary[i] = (struct semid_ds *) IPC_UNUSED;
for (;;) {
queue.status = -EINTR;
- queue.sleeper = NULL;
+ init_waitqueue_head(&queue.sleeper);
interruptible_sleep_on(&queue.sleeper);
/*
static int shm_rss = 0; /* number of shared memory pages that are in memory */
static int shm_swp = 0; /* number of shared memory pages that are in swap */
static int max_shmid = 0; /* every used id is <= max_shmid */
-static struct wait_queue *shm_lock = NULL; /* calling findkey() may need to wait */
+static DECLARE_WAIT_QUEUE_HEAD(shm_lock); /* calling findkey() may need to wait */
static struct shmid_kernel *shm_segs[SHMMNI];
static unsigned short shm_seq = 0; /* incremented, for recognizing stale ids */
for (id = 0; id < SHMMNI; id++)
shm_segs[id] = (struct shmid_kernel *) IPC_UNUSED;
shm_tot = shm_rss = shm_seq = max_shmid = used_segs = 0;
- shm_lock = NULL;
+ init_waitqueue_head(&shm_lock);
return;
}
asmlinkage int sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru)
{
int flag, retval;
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
struct task_struct *p;
if (options & ~(WNOHANG|WUNTRACED|__WCLONE))
atomic_set(&mm->count, 1);
mm->map_count = 0;
mm->def_flags = 0;
- mm->mmap_sem = MUTEX_LOCKED;
+ init_MUTEX_LOCKED(&mm->mmap_sem);
/*
* Leave mm->pgd set to the parent's pgd
* so that pgd_offset() is always valid.
int nr;
int retval = -ENOMEM;
struct task_struct *p;
- struct semaphore sem = MUTEX_LOCKED;
+ DECLARE_MUTEX_LOCKED(sem);
current->vfork_sem = &sem;
p->p_pptr = p->p_opptr = current;
p->p_cptr = NULL;
- init_waitqueue(&p->wait_chldexit);
+ init_waitqueue_head(&p->wait_chldexit);
p->vfork_sem = NULL;
p->sigpending = 0;
#ifdef __SMP__
/* Various random spinlocks we want to export */
EXPORT_SYMBOL(tqueue_lock);
-EXPORT_SYMBOL(waitqueue_lock);
#endif
/* autoirq from drivers/net/auto_irq.c */
#define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */
unsigned long log_size = 0;
-struct wait_queue * log_wait = NULL;
+DECLARE_WAIT_QUEUE_HEAD(log_wait);
/* Keep together for sysctl support */
int console_loglevel = DEFAULT_CONSOLE_LOGLEVEL;
return;
}
-rwlock_t waitqueue_lock = RW_LOCK_UNLOCKED;
-
-/*
- * wake_up doesn't wake up stopped processes - they have to be awakened
- * with signals or similar.
- *
- * Note that we only need a read lock for the wait queue (and thus do not
- * have to protect against interrupts), as the actual removal from the
- * queue is handled by the process itself.
- */
-void __wake_up(struct wait_queue **q, unsigned int mode)
+void __wake_up(wait_queue_head_t *q, unsigned int mode)
{
+ struct list_head *tmp, *head;
struct task_struct *p;
- struct wait_queue *head, *next;
+ unsigned long flags;
if (!q)
goto out;
- /*
- * this is safe to be done before the check because it
- * means no deference, just pointer operations.
- */
- head = WAIT_QUEUE_HEAD(q);
- read_lock(&waitqueue_lock);
- next = *q;
- if (!next)
- goto out_unlock;
+ wq_write_lock_irqsave(&q->lock, flags);
+
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC_WQHEAD(q);
+#endif
- while (next != head) {
- p = next->task;
- next = next->next;
+ head = &q->task_list;
+#if WAITQUEUE_DEBUG
+ if (!head->next || !head->prev)
+ WQ_BUG();
+#endif
+ tmp = head->next;
+ while (tmp != head) {
+ wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list);
+
+ tmp = tmp->next;
+
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(curr->__magic);
+#endif
+ p = curr->task;
if (p->state & mode) {
- /*
- * We can drop the read-lock early if this
- * is the only/last process.
- */
- if (next == head) {
- read_unlock(&waitqueue_lock);
+ if (p->state & TASK_EXCLUSIVE) {
+ __remove_wait_queue(q, curr);
+ wq_write_unlock_irqrestore(&q->lock, flags);
+
+ curr->task_list.next = NULL;
+ curr->__waker = 0;
wake_up_process(p);
goto out;
}
+#if WAITQUEUE_DEBUG
+ curr->__waker = (int)__builtin_return_address(0);
+#endif
wake_up_process(p);
}
+ if (p->state & TASK_EXCLUSIVE)
+ break;
}
-out_unlock:
- read_unlock(&waitqueue_lock);
+ wq_write_unlock_irqrestore(&q->lock, flags);
out:
return;
}
#define DOWN_VAR \
struct task_struct *tsk = current; \
- struct wait_queue wait = { tsk, NULL };
+ wait_queue_t wait; \
+ init_waitqueue_entry(&wait, tsk);
#define DOWN_HEAD(task_state) \
\
int __down_interruptible(struct semaphore * sem)
{
- DOWN_VAR
int ret = 0;
+ DOWN_VAR
DOWN_HEAD(TASK_INTERRUPTIBLE)
ret = waking_non_zero_interruptible(sem, tsk);
#define SLEEP_ON_VAR \
unsigned long flags; \
- struct wait_queue wait;
+ wait_queue_t wait; \
+ init_waitqueue_entry(&wait, current);
#define SLEEP_ON_HEAD \
- wait.task = current; \
- write_lock_irqsave(&waitqueue_lock,flags); \
- __add_wait_queue(p, &wait); \
- write_unlock(&waitqueue_lock);
+ wq_write_lock_irqsave(&q->lock,flags); \
+ __add_wait_queue(q, &wait); \
+ wq_write_unlock(&q->lock);
#define SLEEP_ON_TAIL \
- write_lock_irq(&waitqueue_lock); \
- __remove_wait_queue(p, &wait); \
- write_unlock_irqrestore(&waitqueue_lock,flags);
+ wq_write_lock_irq(&q->lock); \
+ __remove_wait_queue(q, &wait); \
+ wq_write_unlock_irqrestore(&q->lock,flags);
-void interruptible_sleep_on(struct wait_queue **p)
+void interruptible_sleep_on(wait_queue_head_t *q)
{
SLEEP_ON_VAR
SLEEP_ON_TAIL
}
-long interruptible_sleep_on_timeout(struct wait_queue **p, long timeout)
+long interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
{
SLEEP_ON_VAR
return timeout;
}
-void sleep_on(struct wait_queue **p)
+void sleep_on(wait_queue_head_t *q)
{
SLEEP_ON_VAR
SLEEP_ON_TAIL
}
-long sleep_on_timeout(struct wait_queue **p, long timeout)
+long sleep_on_timeout(wait_queue_head_t *q, long timeout)
{
SLEEP_ON_VAR
read_lock(&tasklist_lock);
for_each_task(p) {
if ((p->state == TASK_RUNNING ||
- p->state == TASK_UNINTERRUPTIBLE ||
- p->state == TASK_SWAPPING))
+ (p->state & TASK_UNINTERRUPTIBLE) ||
+ (p->state & TASK_SWAPPING)))
nr += FIXED_1;
}
read_unlock(&tasklist_lock);
out:
spin_unlock_irqrestore(&t->sigmask_lock, flags);
- if (t->state == TASK_INTERRUPTIBLE && signal_pending(t))
+ if ((t->state & TASK_INTERRUPTIBLE) && signal_pending(t))
wake_up_process(t);
out_nolock:
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/prctl.h>
+#include <linux/init.h>
#include <asm/uaccess.h>
#include <asm/io.h>
* rather than a semaphore. Anybody want to implement
* one?
*/
-struct semaphore uts_sem = MUTEX;
+DECLARE_MUTEX(uts_sem);
asmlinkage int sys_newuname(struct new_utsname * name)
{
};
static struct pio_request *pio_first = NULL, **pio_last = &pio_first;
static kmem_cache_t *pio_request_cache;
-static struct wait_queue *pio_wait = NULL;
+static DECLARE_WAIT_QUEUE_HEAD(pio_wait);
static inline void
make_pio_request(struct file *, unsigned long, unsigned long);
void __wait_on_page(struct page *page)
{
struct task_struct *tsk = current;
- struct wait_queue wait;
+ DECLARE_WAITQUEUE(wait, tsk);
- wait.task = tsk;
add_wait_queue(&page->wait, &wait);
repeat:
tsk->state = TASK_UNINTERRUPTIBLE;
int kpiod(void * unused)
{
struct task_struct *tsk = current;
- struct wait_queue wait = { tsk, };
+ DECLARE_WAITQUEUE(wait, tsk);
struct inode * inode;
struct dentry * dentry;
struct pio_request * p;
tsk->pgrp = 1;
strcpy(tsk->comm, "kpiod");
sigfillset(&tsk->blocked);
- init_waitqueue(&pio_wait);
/*
* Mark this task as a memory allocator - we don't want to get caught
* up in the regular mm freeing frenzy if we have to allocate memory
--p;
atomic_set(&p->count, 0);
p->flags = (1 << PG_DMA) | (1 << PG_reserved);
+ init_waitqueue_head(&p->wait);
} while (p > mem_map);
for (i = 0 ; i < NR_MEM_LISTS ; i++) {
#include <asm/pgtable.h>
-static struct wait_queue * lock_queue = NULL;
+static DECLARE_WAIT_QUEUE_HEAD(lock_queue);
/*
* Reads or writes a swap page.
#undef kmem_slab_offset
#undef kmem_slab_diff
- cache_chain_sem = MUTEX;
+ init_MUTEX(&cache_chain_sem);
size = cache_cache.c_offset + sizeof(kmem_bufctl_t);
size += (L1_CACHE_BYTES-1);
static inline void wait_for_packet(struct sock * sk)
{
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
add_wait_queue(sk->sleep, &wait);
current->state = TASK_INTERRUPTIBLE;
#include <net/pkt_sched.h>
atomic_t rtnl_rlockct;
-struct wait_queue *rtnl_wait;
+DECLARE_WAIT_QUEUE_HEAD(rtnl_wait);
void rtnl_lock()
*/
static void sock_wait_for_wmem(struct sock * sk)
{
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
sk->socket->flags &= ~SO_NOSPACE;
add_wait_queue(sk->sleep, &wait);
sk->type = sock->type;
sk->sleep = &sock->wait;
sock->sk = sk;
- }
+ } else
+ sk->sleep = NULL;
sk->state_change = sock_def_wakeup;
sk->data_ready = sock_def_readable;
static void inet_wait_for_connect(struct sock *sk)
{
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
add_wait_queue(sk->sleep, &wait);
current->state = TASK_INTERRUPTIBLE;
icmp_inode.i_sock = 1;
icmp_inode.i_uid = 0;
icmp_inode.i_gid = 0;
+ init_waitqueue_head(&icmp_inode.i_wait);
+ init_waitqueue_head(&icmp_inode.u.socket_i.wait);
icmp_socket->inode = &icmp_inode;
icmp_socket->state = SS_UNCONNECTED;
struct quake_priv_data {
/* Have we seen a client connect message */
- char cl_connect;
+ signed char cl_connect;
};
static int
static int wait_for_tcp_connect(struct sock * sk, int flags)
{
struct task_struct *tsk = current;
- struct wait_queue wait = { tsk, NULL };
+ DECLARE_WAITQUEUE(wait, tsk);
while((1 << sk->state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
if(sk->err)
{
release_sock(sk);
if (!tcp_memory_free(sk)) {
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
sk->socket->flags &= ~SO_NOSPACE;
add_wait_queue(sk->sleep, &wait);
int len, int nonblock, int flags, int *addr_len)
{
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
int copied = 0;
u32 peek_seq;
volatile u32 *seq; /* So gcc doesn't overoptimise */
if (timeout) {
struct task_struct *tsk = current;
- struct wait_queue wait = { tsk, NULL };
+ DECLARE_WAITQUEUE(wait, current);
add_wait_queue(sk->sleep, &wait);
release_sock(sk);
static struct open_request * wait_for_connect(struct sock * sk,
struct open_request **pprev)
{
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
struct open_request *req;
- add_wait_queue(sk->sleep, &wait);
+ /*
+ * True wake-one mechanism for incoming connections: only
+ * one process gets woken up, not the 'whole herd'.
+ * Since we do not 'race & poll' for established sockets
+ * anymore, the common case will execute the loop only once.
+ */
for (;;) {
- current->state = TASK_INTERRUPTIBLE;
+ add_wait_queue_exclusive(sk->sleep, &wait);
+ current->state = TASK_EXCLUSIVE | TASK_INTERRUPTIBLE;
release_sock(sk);
schedule();
lock_sock(sk);
break;
}
current->state = TASK_RUNNING;
- remove_wait_queue(sk->sleep, &wait);
+#if WAITQUEUE_DEBUG
+ /*
+ * hm, gotta do something about 'mixed mode' waitqueues. Eg.
+ * if we get a signal above then we are not removed from the
+ * waitqueue... Maybe wake_up_process() could leave the
+ * TASK_EXCLUSIVE flag intact if it was a true wake-one?
+ */
+ if (wait.task_list.next) {
+ printk("<%08x>", wait.__waker);
+ remove_wait_queue(sk->sleep, &wait);
+ }
+#endif
return req;
}
tcp_inode.i_sock = 1;
tcp_inode.i_uid = 0;
tcp_inode.i_gid = 0;
+ init_waitqueue_head(&tcp_inode.i_wait);
+ init_waitqueue_head(&tcp_inode.u.socket_i.wait);
tcp_socket->inode = &tcp_inode;
tcp_socket->state = SS_UNCONNECTED;
inode->i_gid = current->fsgid;
sock->inode = inode;
- init_waitqueue(&sock->wait);
+ init_waitqueue_head(&sock->wait);
sock->fasync_list = NULL;
sock->state = SS_UNCONNECTED;
sock->flags = 0;
# define RPCDBG_FACILITY RPCDBG_CALL
#endif
-static struct wait_queue * destroy_wait = NULL;
+static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
static void call_bind(struct rpc_task *task);
/*
* rpciod-related stuff
*/
-static struct wait_queue * rpciod_idle = NULL;
-static struct wait_queue * rpciod_killer = NULL;
-static struct semaphore rpciod_sema = MUTEX;
+static DECLARE_WAIT_QUEUE_HEAD(rpciod_idle);
+static DECLARE_WAIT_QUEUE_HEAD(rpciod_killer);
+static DECLARE_MUTEX(rpciod_sema);
static unsigned int rpciod_users = 0;
static pid_t rpciod_pid = 0;
static int rpc_inhibit = 0;
task->tk_client = clnt;
task->tk_flags = RPC_TASK_RUNNING | flags;
task->tk_exit = callback;
+ init_waitqueue_head(&task->tk_wait);
if (current->uid != current->fsuid || current->gid != current->fsgid)
task->tk_flags |= RPC_TASK_SETUID;
rpc_inhibit--;
}
-static struct semaphore rpciod_running = MUTEX_LOCKED;
+static DECLARE_MUTEX_LOCKED(rpciod_running);
/*
* This is the rpciod kernel thread
static int
rpciod(void *ptr)
{
- struct wait_queue **assassin = (struct wait_queue **) ptr;
+ wait_queue_head_t *assassin = (wait_queue_head_t*) ptr;
unsigned long oldflags;
int rounds = 0;
goto out;
memset(rqstp, 0, sizeof(*rqstp));
+ init_waitqueue_head(&rqstp->rq_wait);
+
if (!(rqstp->rq_argp = (u32 *) kmalloc(serv->sv_xdrsize, GFP_KERNEL))
|| !(rqstp->rq_resp = (u32 *) kmalloc(serv->sv_xdrsize, GFP_KERNEL))
|| !svc_init_buffer(&rqstp->rq_defbuf, serv->sv_bufsz))
{
struct svc_sock *svsk;
int len;
- struct wait_queue wait = { current, NULL };
+ DECLARE_WAITQUEUE(wait, current);
dprintk("svc: server %p waiting for data (to = %ld)\n",
rqstp, timeout);
unix_socket *unix_socket_table[UNIX_HASH_SIZE+1];
static atomic_t unix_nr_socks = ATOMIC_INIT(0);
-static struct wait_queue * unix_ack_wqueue = NULL;
-static struct wait_queue * unix_dgram_wqueue = NULL;
+static DECLARE_WAIT_QUEUE_HEAD(unix_ack_wqueue);
+static DECLARE_WAIT_QUEUE_HEAD(unix_dgram_wqueue);
#define unix_sockets_unbound (unix_socket_table[UNIX_HASH_SIZE])
sk->destruct = unix_destruct_addr;
sk->protinfo.af_unix.family=PF_UNIX;
sk->protinfo.af_unix.dentry=NULL;
- sk->protinfo.af_unix.readsem=MUTEX; /* single task reading lock */
+ init_MUTEX(&sk->protinfo.af_unix.readsem);/* single task reading lock */
sk->protinfo.af_unix.list=&unix_sockets_unbound;
unix_insert_socket(sk);