S: Australia
N: Hugh Blemings
-E: hugh@linuxcare.com
-W: http://www.linuxcare.com.au/hugh/
+E: hugh@misc.nu
+W: http://misc.nu/hugh
D: Author and maintainer of the Keyspan USB to Serial drivers
S: Po Box 234
S: Belconnen ACT 2616
S: USA
N: Kai Petzke
-E: wpp@marie.physik.tu-berlin.de
-W: http://physik.tu-berlin.de/~wpp
-P: 1024/B42868C1 D9 59 B9 98 BB 93 05 38 2E 3E 31 79 C3 65 5D E1
+E: petzke@teltarif.de
+W: http://www.teltarif.de/
+P: 1024/B42868C1 D9 59 B9 98 BB 93 05 38 2E 3E 31 79 C3 65 5D E1
D: Driver for Laser Magnetic Storage CD-ROM
D: Some kernel bug fixes
D: Port of the database Postgres
-D: "Unix fuer Jedermann" a German introduction to linux (see my web page)
-S: M"ullerstr. 69
-S: 13349 Berlin
+D: Book: "Linux verstehen und anwenden" (Hanser-Verlag)
+S: Triftstra=DFe 55
+S: 13353 Berlin
S: Germany
N: Ken Pizzini
VERSION = 2
PATCHLEVEL = 2
SUBLEVEL = 19
-EXTRAVERSION = pre13
+EXTRAVERSION = pre15
ARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/)
chnchk = 1;
} /* endif */
- if ( (dp->ii.irb.scsw.stctl == SCSW_STCTL_STATUS_PEND)
+ if( dp->ii.irb.scsw.ectl==0)
+ {
+ issense=0;
+ }
+ else if ( (dp->ii.irb.scsw.stctl == SCSW_STCTL_STATUS_PEND)
&& (dp->ii.irb.scsw.eswf == 0 ))
{
issense = 0;
#ifdef CONFIG_SPARCAUDIO
extern int sparcaudio_init(void);
#endif
-#ifdef CONFIG_ISDN
-int isdn_init(void);
-#endif
#ifdef CONFIG_PHONE
extern int telephony_init(void);
#endif
#if CONFIG_QIC02_TAPE
qic02_tape_init();
#endif
-#if CONFIG_ISDN
- isdn_init();
-#endif
#ifdef CONFIG_FTAPE
ftape_init();
#endif
if [ "$CONFIG_ISDN_AUDIO" != "n" ]; then
bool ' Support AT-Fax Class 1 and 2 commands' CONFIG_ISDN_TTY_FAX
fi
-if [ "$CONFIG_X25" != "n" ]; then
+
+# CONFIG_X25 is defined only when CONFIG_EXPERIMENTAL=y
+if [ "$CONFIG_EXPERIMENTAL" = "y" -a "$CONFIG_X25" != "n" ]; then
bool ' X.25 PLP on top of ISDN' CONFIG_ISDN_X25
fi
This takes less than 10usec and will easily finish before the next
action. */
outl(PortReset, ioaddr + SCBPort);
+ inl(ioaddr + SCBPort);
/* Honor PortReset timing. */
udelay(10);
#endif /* kernel_bloat */
outl(PortReset, ioaddr + SCBPort);
+ inl(ioaddr + SCBPort);
/* Honor PortReset timing. */
udelay(10);
/* Set the segment registers to '0'. */
wait_for_cmd_done(ioaddr + SCBCmd);
outl(0, ioaddr + SCBPointer);
+ /* impose a delay to avoid a bug */
+ inl(ioaddr + SCBPointer);
+ udelay(10);
outb(RxAddrLoad, ioaddr + SCBCmd);
wait_for_cmd_done(ioaddr + SCBCmd);
outb(CUCmdBase, ioaddr + SCBCmd);
* 01/17/01 fixed PL020234MVE problem accessing DASD 68-127, 133-191,...
* 01/23/01 fixed sleep_on_request to terminate when signal_pending
* 01/25/01 added code for error recovery with PAC'0x1D' = long busy
+ * 02/08/01 fixed PL020237RMI
+ * 02/08/01 fixed PL020265TDI
*/
#include <linux/module.h>
delta = cqr->expires - cqr->startclk;
printk (KERN_ERR PRINTK_HEADER
" devno 0x%04X on subchannel %d = /dev/%s (%d:%d)"
- " I/O operation outstanding longer than 0x%08x%08x usecs on req %p\n",
+ " I/O operation outstanding longer than 0x%08lx%08lx usecs on req %p\n",
devno, irq, device->name, major_from_devindex (devindex), devindex << DASD_PARTN_BITS, (long)(delta >> 44), (long)(delta >> 12), cqr);
cqr->expires += delta;
#if 0
while ((!rc ) &&
((req = device->discipline->format_device (device, &temp)) != NULL ) ) {
- if ( rc=sleep_on_req(req) ) {
+ if ( (rc=sleep_on_req(req)) != 0 ) {
printk (KERN_WARNING PRINTK_HEADER
" devno 0x%04X on subchannel %d = /dev/%s (%d:%d)"
" Formatting failed with rc = %d\n",
if (major_info->blksize_size[minor + i] < 1024 )
major_info->blksize_size[minor + i] = 1024;
- major_info->max_sectors[minor + i] = 200 << device->sizes.s2b_shift; /* FIXME !!! */
+ major_info->max_sectors[minor + i] = device->discipline->max_blocks << device->sizes.s2b_shift;
}
atomic_compare_and_swap_debug (&device->level,
DASD_DEVICE_LEVEL_ANALYSIS_PREPARED,
dasd_unregister_major(major_info);
}
}
- emergency_failed:
dasd_cleanup_emergency_req();
- failed:
printk (KERN_INFO PRINTK_HEADER "initialization not performed due to errors\n");
out:
printk (KERN_INFO PRINTK_HEADER "initialization finished\n");
dasd_discipline_t dasd_diag_discipline = {
name : "DIAG",
ebcname : "DIAG",
+ max_blocks: PAGE_SIZE/sizeof(diag_bio_t),
check_characteristics: dasd_diag_check_characteristics,
do_analysis: dasd_diag_do_analysis,
fill_geometry: dasd_diag_fill_geometry,
dasd_discipline_t dasd_eckd_discipline = {
name : "ECKD",
ebcname : "ECKD",
+ max_blocks: 255,
id_check: dasd_eckd_id_check,
check_characteristics: dasd_eckd_check_characteristics,
init_analysis: dasd_eckd_init_analysis,
dasd_discipline_t dasd_fba_discipline = {
name : "FBA ",
ebcname : "FBA ",
+ max_blocks: PAGE_SIZE/sizeof(ccw1_t),
id_check: dasd_fba_id_check,
check_characteristics: dasd_fba_check_characteristics,
do_analysis: dasd_fba_do_analysis,
done(SCpnt); return 0;});
if(*cmd == REQUEST_SENSE){
-#ifndef DEBUG
+#if 0
+ /* scsi_request_sense() provides a buffer of size 256,
+ so there is no reason to expect equality */
if (bufflen != sizeof(SCpnt->sense_buffer)) {
printk("Wrong buffer length supplied for request sense (%d)\n",bufflen);
};
if(*cmd == REQUEST_SENSE)
{
+#if 0
if (bufflen != sizeof(SCpnt->sense_buffer))
{
printk("Wrong buffer length supplied for request sense (%d)\n",
bufflen);
}
+#endif
SCpnt->result = 0;
done(SCpnt);
return 0;
#define U32 unsigned long
#endif
+#ifndef VIRT_TO_BUS
+#define VIRT_TO_BUS(x) (unsigned long)virt_to_bus((void *) x)
+#endif
+
+#ifndef VIRT_TO_BUS
+#define VIRT_TO_BUS(x) (unsigned int)virt_to_bus((void *) x)
+#endif
+
#ifndef NULL
#define NULL 0 /* zero */
#endif
/* Interrupt polling pipe */
struct urb *urb;
- char buffer[USB_MAXCHILDREN / 8];
+ char buffer[(USB_MAXCHILDREN + 1 + 7) / 8]; /* add 1 bit for hub status change */
+ /* and add 7 bits to round up to byte boundary */
/* List of hubs */
struct list_head hub_list;
typedef struct dasd_discipline_t {
char ebcname[8]; /* a name used for tagging and printks */
char name[8]; /* a name used for tagging and printks */
+ int max_blocks; /* how many blocks are allowed to be chained */
struct dasd_discipline_t *next; /* used for list of disciplines */
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
+#define BUG() do { \
+ printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
+ __asm__ __volatile__(".word 0x0000"); \
+} while (0)
+
extern __inline__ int get_order(unsigned long size)
{
int order;
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
#define MAP_NR(addr) (__pa(addr) >> PAGE_SHIFT)
-
#endif /* __KERNEL__ */
#endif /* _S390_PAGE_H */
extern signed long FASTCALL(schedule_timeout(signed long timeout));
asmlinkage void schedule(void);
+extern int schedule_task(struct tq_struct *task);
+extern void flush_scheduled_tasks(void);
+extern int start_context_thread(void);
+extern int current_is_keventd(void);
+
/*
* The default fd array needs to be at least BITS_PER_LONG,
* as this is the granularity returned by copy_fdset().
#define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE | TASK_EXCLUSIVE)
#define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE | TASK_EXCLUSIVE)
+#define __set_task_state(tsk, state_value) do { (tsk)->state = state_value; } while (0)
+#ifdef __SMP__
+#define set_task_state(tsk, state_value) do { __set_task_state(tsk, state_value); mb(); } while (0)
+#else
+#define set_task_state(tsk, state_value) __set_task_state(tsk, state_value)
+#endif
+
#define __set_current_state(state_value) do { current->state = state_value; } while (0)
#ifdef __SMP__
#define set_current_state(state_value) do { __set_current_state(state_value); mb(); } while (0)
typedef struct tq_struct * task_queue;
#define DECLARE_TASK_QUEUE(q) task_queue q = NULL
+#define TQ_ACTIVE(q) ((q) != NULL)
extern task_queue tq_timer, tq_immediate, tq_scheduler, tq_disk;
extern spinlock_t tqueue_lock;
/*
- * queue_task
+ * Queue a task on a tq. Return non-zero if it was successfully
+ * added.
*/
-extern __inline__ void queue_task(struct tq_struct *bh_pointer,
+extern __inline__ int queue_task(struct tq_struct *bh_pointer,
task_queue *bh_list)
{
+ int ret = 0;
if (!test_and_set_bit(0,&bh_pointer->sync)) {
unsigned long flags;
spin_lock_irqsave(&tqueue_lock, flags);
bh_pointer->next = *bh_list;
*bh_list = bh_pointer;
spin_unlock_irqrestore(&tqueue_lock, flags);
+ ret = 1;
}
+ return ret;
}
+
/*
* Call all "bottom halfs" on a given list.
*/
struct list_head inodes;
};
-#define USB_MAXCHILDREN (8) /* This is arbitrary */
+#define USB_MAXCHILDREN (16) /* This is arbitrary */
struct usb_device {
int devnum; /* Device number on USB bus */
#define WNOHANG 0x00000001
#define WUNTRACED 0x00000002
-#define __WCLONE 0x80000000
+#define __WALL 0x40000000 /* Wait on all children, regardless of type */
+#define __WCLONE 0x80000000 /* Wait only on non-SIGCHLD children */
#ifdef __KERNEL__
else \
{ \
X##_f0 = (X##_f1 >> ((N) - _FP_W_TYPE_SIZE) | \
- (((X##_f1 << (sz - (N))) | X##_f0) != 0)); \
+ (((X##_f1 << (2*_FP_W_TYPE_SIZE - (N))) | X##_f0) != 0)); \
X##_f1 = 0; \
} \
} while (0)
else mount_initrd =0;
#endif
+ /* Start the event daemon thread */
+ start_context_thread();
+
/* Set up devices .. */
device_setup();
module.o exit.o itimer.o info.o time.o softirq.o resource.o \
sysctl.o acct.o capability.o
-OX_OBJS += signal.o
+OX_OBJS += context.o signal.o
ifeq ($(CONFIG_KMOD),y)
O_OBJS += kmod.o
--- /dev/null
+/*
+ * linux/kernel/context.c
+ *
+ * Mechanism for running arbitrary tasks in process context
+ *
+ * dwmw2@redhat.com: Genesis
+ *
+ * andrewm@uow.edu.au: 2.4.0-test12
+ * - Child reaping
+ * - Support for tasks which re-add themselves
+ * - flush_scheduled_tasks.
+ */
+
+#define __KERNEL_SYSCALLS__
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/unistd.h>
+#include <linux/signal.h>
+
+static DECLARE_TASK_QUEUE(tq_context);
+static DECLARE_WAIT_QUEUE_HEAD(context_task_wq);
+static DECLARE_WAIT_QUEUE_HEAD(context_task_done);
+static int keventd_running;
+static struct task_struct *keventd_task;
+
+static int need_keventd(const char *who)
+{
+ if (keventd_running == 0)
+ printk(KERN_ERR "%s(): keventd has not started\n", who);
+ return keventd_running;
+}
+
+int current_is_keventd(void)
+{
+ int ret = 0;
+ if (need_keventd(__FUNCTION__))
+ ret = (current == keventd_task);
+ return ret;
+}
+
+/**
+ * schedule_task - schedule a function for subsequent execution in process context.
+ * @task: pointer to a &tq_struct which defines the function to be scheduled.
+ *
+ * May be called from interrupt context. The scheduled function is run at some
+ * time in the near future by the keventd kernel thread. If it can sleep, it
+ * should be designed to do so for the minimum possible time, as it will be
+ * stalling all other scheduled tasks.
+ *
+ * schedule_task() returns non-zero if the task was successfully scheduled.
+ * If @task is already residing on a task queue then schedule_task() fails
+ * to schedule your task and returns zero.
+ */
+int schedule_task(struct tq_struct *task)
+{
+ int ret;
+ need_keventd(__FUNCTION__);
+ ret = queue_task(task, &tq_context);
+ wake_up(&context_task_wq);
+ return ret;
+}
+
+static int context_thread(void *dummy)
+{
+ struct task_struct *curtask = current;
+ DECLARE_WAITQUEUE(wait, curtask);
+ struct k_sigaction sa;
+
+ daemonize();
+ strcpy(curtask->comm, "keventd");
+ keventd_running = 1;
+ keventd_task = curtask;
+
+ spin_lock_irq(&curtask->sigmask_lock);
+ siginitsetinv(&curtask->blocked, sigmask(SIGCHLD));
+ recalc_sigpending(curtask);
+ spin_unlock_irq(&curtask->sigmask_lock);
+
+ /* Install a handler so SIGCLD is delivered */
+ sa.sa.sa_handler = SIG_IGN;
+ sa.sa.sa_flags = 0;
+ siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
+ do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
+
+ /*
+ * If one of the functions on a task queue re-adds itself
+ * to the task queue we call schedule() in state TASK_RUNNING
+ */
+ for (;;) {
+ set_task_state(curtask, TASK_INTERRUPTIBLE);
+ add_wait_queue(&context_task_wq, &wait);
+ if (TQ_ACTIVE(tq_context))
+ set_task_state(curtask, TASK_RUNNING);
+ schedule();
+ remove_wait_queue(&context_task_wq, &wait);
+ run_task_queue(&tq_context);
+ wake_up(&context_task_done);
+ if (signal_pending(curtask)) {
+ while (waitpid(-1, (unsigned int *)0, __WALL|WNOHANG) > 0)
+ ;
+ flush_signals(curtask);
+ recalc_sigpending(curtask);
+ }
+ }
+}
+
+/**
+ * flush_scheduled_tasks - ensure that any scheduled tasks have run to completion.
+ *
+ * Forces execution of the schedule_task() queue and blocks until its completion.
+ *
+ * If a kernel subsystem uses schedule_task() and wishes to flush any pending
+ * tasks, it should use this function. This is typically used in driver shutdown
+ * handlers.
+ *
+ * The caller should hold no spinlocks and should hold no semaphores which could
+ * cause the scheduled tasks to block.
+ */
+static struct tq_struct dummy_task;
+
+void flush_scheduled_tasks(void)
+{
+ int count;
+ DECLARE_WAITQUEUE(wait, current);
+
+ /*
+ * Do it twice. It's possible, albeit highly unlikely, that
+ * the caller queued a task immediately before calling us,
+ * and that the eventd thread was already past the run_task_queue()
+ * but not yet into wake_up(), so it woke us up before completing
+ * the caller's queued task or our new dummy task.
+ */
+ add_wait_queue(&context_task_done, &wait);
+ for (count = 0; count < 2; count++) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+
+ /* Queue a dummy task to make sure we get kicked */
+ schedule_task(&dummy_task);
+
+ /* Wait for it to complete */
+ schedule();
+ }
+ remove_wait_queue(&context_task_done, &wait);
+}
+
+int start_context_thread(void)
+{
+ kernel_thread(context_thread, NULL, CLONE_FS | CLONE_FILES);
+ return 0;
+}
+
+EXPORT_SYMBOL(schedule_task);
+EXPORT_SYMBOL(flush_scheduled_tasks);
+
struct wait_queue wait = { current, NULL };
struct task_struct *p;
- if (options & ~(WNOHANG|WUNTRACED|__WCLONE))
+ if (options & ~(WNOHANG|WUNTRACED|__WCLONE|__WALL))
return -EINVAL;
add_wait_queue(¤t->wait_chldexit,&wait);
if (p->pgrp != -pid)
continue;
}
- /* wait for cloned processes iff the __WCLONE flag is set */
- if ((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
+ /* Wait for all children (clone and not) if __WALL is set;
+ * otherwise, wait for clone children *only* if __WCLONE is
+ * set; otherwise, wait for non-clone children *only*. (Note:
+ * A "clone" child here is one that reports to its parent
+ * using a signal other than SIGCHLD.) */
+ if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
+ && !(options & __WALL))
continue;
flag = 1;
switch (p->state) {
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp_input.c,v 1.164.2.19 2001/02/02 01:50:39 davem Exp $
+ * Version: $Id: tcp_input.c,v 1.164.2.20 2001/02/23 20:20:22 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* of data (and SYN, FIN, of course) is checked separately.
* See tcp_data_queue(), for example.
*
- * Also, controls (RST is main one) are accepted using RCV.WUP instead
+ * Also, controls (RST is main one) are accepted using last_ack_sent instead
* of RCV.NXT. Peer still did not advance his SND.UNA when we
- * delayed ACK, so that hisSND.UNA<=ourRCV.WUP.
+ * delayed ACK, so that hisSND.UNA<=last_ack_sent.
* (borrowed from freebsd)
*/
static inline int tcp_sequence(struct tcp_opt *tp, u32 seq, u32 end_seq)
{
- return !before(end_seq, tp->rcv_wup) &&
+ return !before(end_seq, tp->last_ack_sent) &&
!after(seq, tp->rcv_nxt + tcp_receive_window(tp));
}
SOCK_DEBUG(sk, "retransmit received: seq %X\n", TCP_SKB_CB(skb)->seq);
tcp_enter_quickack_mode(tp);
out_of_window:
+ tp->delayed_acks++;
kfree_skb(skb);
return;
}
if (after(tp->copied_seq, ptr))
return;
+ if (before(ptr, tp->rcv_nxt))
+ return;
+
/* Do we already have a newer (or duplicate) urgent pointer? */
if (tp->urg_data && !after(ptr, tp->urg_seq))
return;
* as data, nor can we alter copied_seq until this data arrives
* or we break the sematics of SIOCATMARK (and thus sockatmark())
*/
- if (tp->urg_seq == tp->copied_seq)
- tp->copied_seq++; /* Move the copied sequence on correctly */
+ if (tp->urg_seq == tp->copied_seq && tp->urg_data &&
+ !sk->urginline &&
+ tp->copied_seq != tp->rcv_nxt)
+ tp->copied_seq++;
tp->urg_data = URG_NOTYET;
tp->urg_seq = ptr;
/* Do we wait for any urgent data? - normally not... */
if (tp->urg_data == URG_NOTYET) {
- u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff*4);
+ u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff*4) - th->syn;
/* Is the urgent pointer pointing into this packet? */
if (ptr < len) {