VERSION = 1
PATCHLEVEL = 3
-SUBLEVEL = 29
+SUBLEVEL = 30
ARCH = i386
EB66+ CONFIG_ALPHA_EB66P \
EB64 CONFIG_ALPHA_EB64 \
EB64+ CONFIG_ALPHA_EB64P" Cabriolet
-if [ "$CONFIG_ALPHA_NONAME" = "y" ]; then
+if [ "$CONFIG_ALPHA_NONAME" = "y" -o "$CONFIG_ALPHA_EB66" = "y" \
+ -o "$CONFIG_ALPHA_EB66P" = "y" ]
+then
define_bool CONFIG_PCI y
define_bool CONFIG_ALPHA_LCA y
fi
-if [ "$CONFIG_ALPHA_CABRIOLET" = "y" -o "$CONFIG_ALPHA_EB66" = "y" \
+if [ "$CONFIG_ALPHA_CABRIOLET" = "y" \
-o "$CONFIG_ALPHA_EB64" = "y" -o "$CONFIG_ALPHA_EB64P" = "y" ]
then
define_bool CONFIG_PCI y
$(CC) -D__ASSEMBLY__ -traditional -c $< -o $*.o
OBJS = entry.o traps.o process.o osf_sys.o irq.o signal.o setup.o \
- bios32.o ptrace.o apecs.o lca.o
+ bios32.o ptrace.o time.o apecs.o lca.o
all: kernel.o head.o
noname_fixup();
#elif defined(CONFIG_ALPHA_CABRIOLET)
cabriolet_fixup();
+#elif defined(CONFIG_ALPHA_EB66P)
+ eb66p_fixup();
#elif defined(CONFIG_ALPHA_EB66)
eb66_and_eb64p_fixup();
#elif defined(CONFIG_ALPHA_EB64P)
kstat.interrupts[irq]++;
action = irq_action + irq;
+#ifdef CONFIG_RANDOM
+ if (action->flags & SA_SAMPLE_RANDOM)
+ add_interrupt_randomness(irq);
+#endif
/* quick interrupts get executed with no extra overhead */
if (action->flags & SA_INTERRUPT) {
action->handler(irq, regs);
--- /dev/null
+/*
+ * linux/arch/alpha/kernel/time.c
+ *
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
+ *
+ * This file contains the PC-specific time handling details:
+ * reading the RTC at bootup, etc..
+ * 1994-07-02 Alan Modra
+ * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
+ * 1995-03-26 Markus Kuhn
+ * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
+ * precision CMOS clock update
+ */
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+
+#include <asm/segment.h>
+#include <asm/io.h>
+
+#include <linux/mc146818rtc.h>
+#include <linux/timex.h>
+
+#define TIMER_IRQ 0
+
+static int set_rtc_mmss(unsigned long);
+
+/*
+ * timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick
+ */
+static void timer_interrupt(int irq, struct pt_regs * regs)
+{
+ /* last time the cmos clock got updated */
+ static long last_rtc_update=0;
+
+ do_timer(regs);
+
+ /*
+ * If we have an externally synchronized Linux clock, then update
+ * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
+ * called as close as possible to 500 ms before the new second starts.
+ */
+ if (time_state != TIME_BAD && xtime.tv_sec > last_rtc_update + 660 &&
+ xtime.tv_usec > 500000 - (tick >> 1) &&
+ xtime.tv_usec < 500000 + (tick >> 1))
+ if (set_rtc_mmss(xtime.tv_sec) == 0)
+ last_rtc_update = xtime.tv_sec;
+ else
+ last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
+}
+
+/* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
+ * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
+ * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
+ *
+ * [For the Julian calendar (which was used in Russia before 1917,
+ * Britain & colonies before 1752, anywhere else before 1582,
+ * and is still in use by some communities) leave out the
+ * -year/100+year/400 terms, and add 10.]
+ *
+ * This algorithm was first published by Gauss (I think).
+ *
+ * WARNING: this function will overflow on 2106-02-07 06:28:16 on
+ * machines were long is 32-bit! (However, as time_t is signed, we
+ * will already get problems at other places on 2038-01-19 03:14:08)
+ */
+static inline unsigned long mktime(unsigned int year, unsigned int mon,
+ unsigned int day, unsigned int hour,
+ unsigned int min, unsigned int sec)
+{
+ if (0 >= (int) (mon -= 2)) { /* 1..12 -> 11,12,1..10 */
+ mon += 12; /* Puts Feb last since it has leap day */
+ year -= 1;
+ }
+ return (((
+ (unsigned long)(year/4 - year/100 + year/400 + 367*mon/12 + day) +
+ year*365 - 719499
+ )*24 + hour /* now have hours */
+ )*60 + min /* now have minutes */
+ )*60 + sec; /* finally seconds */
+}
+
+void time_init(void)
+{
+ unsigned int year, mon, day, hour, min, sec;
+ int i;
+
+ /* The Linux interpretation of the CMOS clock register contents:
+ * When the Update-In-Progress (UIP) flag goes from 1 to 0, the
+ * RTC registers show the second which has precisely just started.
+ * Let's hope other operating systems interpret the RTC the same way.
+ */
+ /* read RTC exactly on falling edge of update flag */
+ for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */
+ if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)
+ break;
+ for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */
+ if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
+ break;
+ do { /* Isn't this overkill ? UIP above should guarantee consistency */
+ sec = CMOS_READ(RTC_SECONDS);
+ min = CMOS_READ(RTC_MINUTES);
+ hour = CMOS_READ(RTC_HOURS);
+ day = CMOS_READ(RTC_DAY_OF_MONTH);
+ mon = CMOS_READ(RTC_MONTH);
+ year = CMOS_READ(RTC_YEAR);
+ } while (sec != CMOS_READ(RTC_SECONDS));
+ if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+ {
+ BCD_TO_BIN(sec);
+ BCD_TO_BIN(min);
+ BCD_TO_BIN(hour);
+ BCD_TO_BIN(day);
+ BCD_TO_BIN(mon);
+ BCD_TO_BIN(year);
+ }
+#ifdef ALPHA_PRE_V1_2_SRM_CONSOLE
+ /*
+ * The meaning of life, the universe, and everything. Plus
+ * this makes the year come out right on SRM consoles earlier
+ * than v1.2.
+ */
+ year -= 42;
+#endif
+ if ((year += 1900) < 1970)
+ year += 100;
+ xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
+ xtime.tv_usec = 0;
+ if (request_irq(TIMER_IRQ, timer_interrupt, 0, "timer") != 0)
+ panic("Could not allocate timer IRQ!");
+}
+
+/*
+ * We could get better timer accuracy by using the alpha
+ * time counters or something. Now this is limited to
+ * the HZ clock frequency.
+ */
+void do_gettimeofday(struct timeval *tv)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ *tv = xtime;
+ restore_flags(flags);
+}
+
+void do_settimeofday(struct timeval *tv)
+{
+ cli();
+ xtime = *tv;
+ time_state = TIME_BAD;
+ time_maxerror = 0x70000000;
+ time_esterror = 0x70000000;
+ sti();
+}
+
+
+/*
+ * In order to set the CMOS clock precisely, set_rtc_mmss has to be
+ * called 500 ms after the second nowtime has started, because when
+ * nowtime is written into the registers of the CMOS clock, it will
+ * jump to the next second precisely 500 ms later. Check the Motorola
+ * MC146818A or Dallas DS12887 data sheet for details.
+ */
+static int set_rtc_mmss(unsigned long nowtime)
+{
+ int retval = 0;
+ int real_seconds, real_minutes, cmos_minutes;
+ unsigned char save_control, save_freq_select;
+
+ save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
+ CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
+
+ save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
+ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+
+ cmos_minutes = CMOS_READ(RTC_MINUTES);
+ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+ BCD_TO_BIN(cmos_minutes);
+
+ /*
+ * since we're only adjusting minutes and seconds,
+ * don't interfere with hour overflow. This avoids
+ * messing with unknown time zones but requires your
+ * RTC not to be off by more than 15 minutes
+ */
+ real_seconds = nowtime % 60;
+ real_minutes = nowtime / 60;
+ if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
+ real_minutes += 30; /* correct for half hour time zone */
+ real_minutes %= 60;
+
+ if (abs(real_minutes - cmos_minutes) < 30) {
+ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ BIN_TO_BCD(real_seconds);
+ BIN_TO_BCD(real_minutes);
+ }
+ CMOS_WRITE(real_seconds,RTC_SECONDS);
+ CMOS_WRITE(real_minutes,RTC_MINUTES);
+ } else
+ retval = -1;
+
+ /* The following flags have to be released exactly in this order,
+ * otherwise the DS12887 (popular MC146818A clone with integrated
+ * battery and quartz) will not reset the oscillator and will not
+ * update precisely 500 ms later. You won't find this mentioned in
+ * the Dallas Semiconductor data sheets, but who believes data
+ * sheets anyway ... -- Markus Kuhn
+ */
+ CMOS_WRITE(save_control, RTC_CONTROL);
+ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
+
+ return retval;
+}
#include <linux/tty.h>
#include <asm/unaligned.h>
+#include <asm/gentrap.h>
void die_if_kernel(char * str, struct pt_regs * regs, long err)
{
send_sig(SIGTRAP, current, 1);
break;
- case 1: /* bugcheck */
case 2: /* gentrap */
+ /*
+ * The translation from the gentrap error code into a
+ * siginfo structure (see /usr/include/sys/siginfo.h)
+ * is missing as Linux does not presently support the
+ * siginfo argument that is normally passed to a
+ * signal handler.
+ */
+ switch ((long) regs.r16) {
+ case GEN_INTOVF: case GEN_INTDIV: case GEN_FLTOVF:
+ case GEN_FLTDIV: case GEN_FLTUND: case GEN_FLTINV:
+ case GEN_FLTINE:
+ send_sig(SIGFPE, current, 1);
+ break;
+
+ case GEN_DECOVF:
+ case GEN_DECDIV:
+ case GEN_DECINV:
+ case GEN_ROPRAND:
+ case GEN_ASSERTERR:
+ case GEN_NULPTRERR:
+ case GEN_STKOVF:
+ case GEN_STRLENERR:
+ case GEN_SUBSTRERR:
+ case GEN_RANGERR:
+ case GEN_SUBRNG:
+ case GEN_SUBRNG1:
+ case GEN_SUBRNG2:
+ case GEN_SUBRNG3:
+ case GEN_SUBRNG4:
+ case GEN_SUBRNG5:
+ case GEN_SUBRNG6:
+ case GEN_SUBRNG7:
+ send_sig(SIGILL, current, 1);
+ break;
+ }
+ break;
+
+ case 1: /* bugcheck */
case 3: /* FEN fault */
case 4: /* opDEC */
send_sig(SIGILL, current, 1);
dep_tristate 'UltraStor SCSI support' CONFIG_SCSI_ULTRASTOR n $CONFIG_SCSI
dep_tristate '7000FASST SCSI support' CONFIG_SCSI_7000FASST n $CONFIG_SCSI
dep_tristate 'EATA ISA/EISA (DPT PM2011/021/012/022/122/322) support' CONFIG_SCSI_EATA n $CONFIG_SCSI
+dep_tristate 'NCR53c406a SCSI support' CONFIG_SCSI_NCR53C406A n $CONFIG_SCSI
#dep_tristate 'SCSI debugging host adapter' CONFIG_SCSI_DEBUG n $CONFIG_SCSI
fi
all: kernel.o head.o
O_TARGET := kernel.o
O_OBJS := process.o signal.o entry.o traps.o irq.o vm86.o bios32.o \
- ptrace.o ioport.o ldt.o setup.o sys_i386.o
+ ptrace.o ioport.o ldt.o setup.o time.o sys_i386.o
#head.o: head.s
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/timex.h>
+#include <linux/random.h>
#include <asm/system.h>
#include <asm/io.h>
struct irqaction * action = irq + irq_action;
kstat.interrupts[irq]++;
+#ifdef CONFIG_RANDOM
+ if (action->flags & SA_SAMPLE_RANDOM)
+ add_interrupt_randomness(irq);
+#endif
action->handler(irq, regs);
}
struct irqaction * action = irq + irq_action;
kstat.interrupts[irq]++;
+#ifdef CONFIG_RANDOM
+ if (action->flags & SA_SAMPLE_RANDOM)
+ add_interrupt_randomness(irq);
+#endif
action->handler(irq, NULL);
}
--- /dev/null
+/*
+ * linux/arch/i386/kernel/time.c
+ *
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
+ *
+ * This file contains the PC-specific time handling details:
+ * reading the RTC at bootup, etc..
+ * 1994-07-02 Alan Modra
+ * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
+ * 1995-03-26 Markus Kuhn
+ * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
+ * precision CMOS clock update
+ */
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+
+#include <asm/segment.h>
+#include <asm/io.h>
+
+#include <linux/mc146818rtc.h>
+#include <linux/timex.h>
+
+#define TIMER_IRQ 0
+
+static int set_rtc_mmss(unsigned long);
+
+/*
+ * timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick
+ */
+static void timer_interrupt(int irq, struct pt_regs * regs)
+{
+ /* last time the cmos clock got updated */
+ static long last_rtc_update=0;
+
+ do_timer(regs);
+
+ /*
+ * If we have an externally synchronized Linux clock, then update
+ * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
+ * called as close as possible to 500 ms before the new second starts.
+ */
+ if (time_state != TIME_BAD && xtime.tv_sec > last_rtc_update + 660 &&
+ xtime.tv_usec > 500000 - (tick >> 1) &&
+ xtime.tv_usec < 500000 + (tick >> 1))
+ if (set_rtc_mmss(xtime.tv_sec) == 0)
+ last_rtc_update = xtime.tv_sec;
+ else
+ last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
+}
+
+/* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
+ * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
+ * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
+ *
+ * [For the Julian calendar (which was used in Russia before 1917,
+ * Britain & colonies before 1752, anywhere else before 1582,
+ * and is still in use by some communities) leave out the
+ * -year/100+year/400 terms, and add 10.]
+ *
+ * This algorithm was first published by Gauss (I think).
+ *
+ * WARNING: this function will overflow on 2106-02-07 06:28:16 on
+ * machines were long is 32-bit! (However, as time_t is signed, we
+ * will already get problems at other places on 2038-01-19 03:14:08)
+ */
+static inline unsigned long mktime(unsigned int year, unsigned int mon,
+ unsigned int day, unsigned int hour,
+ unsigned int min, unsigned int sec)
+{
+ if (0 >= (int) (mon -= 2)) { /* 1..12 -> 11,12,1..10 */
+ mon += 12; /* Puts Feb last since it has leap day */
+ year -= 1;
+ }
+ return (((
+ (unsigned long)(year/4 - year/100 + year/400 + 367*mon/12 + day) +
+ year*365 - 719499
+ )*24 + hour /* now have hours */
+ )*60 + min /* now have minutes */
+ )*60 + sec; /* finally seconds */
+}
+
+void time_init(void)
+{
+ unsigned int year, mon, day, hour, min, sec;
+ int i;
+
+ /* The Linux interpretation of the CMOS clock register contents:
+ * When the Update-In-Progress (UIP) flag goes from 1 to 0, the
+ * RTC registers show the second which has precisely just started.
+ * Let's hope other operating systems interpret the RTC the same way.
+ */
+ /* read RTC exactly on falling edge of update flag */
+ for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */
+ if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)
+ break;
+ for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */
+ if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
+ break;
+ do { /* Isn't this overkill ? UIP above should guarantee consistency */
+ sec = CMOS_READ(RTC_SECONDS);
+ min = CMOS_READ(RTC_MINUTES);
+ hour = CMOS_READ(RTC_HOURS);
+ day = CMOS_READ(RTC_DAY_OF_MONTH);
+ mon = CMOS_READ(RTC_MONTH);
+ year = CMOS_READ(RTC_YEAR);
+ } while (sec != CMOS_READ(RTC_SECONDS));
+ if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+ {
+ BCD_TO_BIN(sec);
+ BCD_TO_BIN(min);
+ BCD_TO_BIN(hour);
+ BCD_TO_BIN(day);
+ BCD_TO_BIN(mon);
+ BCD_TO_BIN(year);
+ }
+ if ((year += 1900) < 1970)
+ year += 100;
+ xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
+ xtime.tv_usec = 0;
+ if (request_irq(TIMER_IRQ, timer_interrupt, 0, "timer") != 0)
+ panic("Could not allocate timer IRQ!");
+}
+
+/* This function must be called with interrupts disabled
+ * It was inspired by Steve McCanne's microtime-i386 for BSD. -- jrs
+ *
+ * However, the pc-audio speaker driver changes the divisor so that
+ * it gets interrupted rather more often - it loads 64 into the
+ * counter rather than 11932! This has an adverse impact on
+ * do_gettimeoffset() -- it stops working! What is also not
+ * good is that the interval that our timer function gets called
+ * is no longer 10.0002 ms, but 9.9767 ms. To get around this
+ * would require using a different timing source. Maybe someone
+ * could use the RTC - I know that this can interrupt at frequencies
+ * ranging from 8192Hz to 2Hz. If I had the energy, I'd somehow fix
+ * it so that at startup, the timer code in sched.c would select
+ * using either the RTC or the 8253 timer. The decision would be
+ * based on whether there was any other device around that needed
+ * to trample on the 8253. I'd set up the RTC to interrupt at 1024 Hz,
+ * and then do some jiggery to have a version of do_timer that
+ * advanced the clock by 1/1024 s. Every time that reached over 1/100
+ * of a second, then do all the old code. If the time was kept correct
+ * then do_gettimeoffset could just return 0 - there is no low order
+ * divider that can be accessed.
+ *
+ * Ideally, you would be able to use the RTC for the speaker driver,
+ * but it appears that the speaker driver really needs interrupt more
+ * often than every 120 us or so.
+ *
+ * Anyway, this needs more thought.... pjsg (1993-08-28)
+ *
+ * If you are really that interested, you should be reading
+ * comp.protocols.time.ntp!
+ */
+
+#define TICK_SIZE tick
+
+static inline unsigned long do_gettimeoffset(void)
+{
+ int count;
+ unsigned long offset = 0;
+
+ /* timer count may underflow right here */
+ outb_p(0x00, 0x43); /* latch the count ASAP */
+ count = inb_p(0x40); /* read the latched count */
+ count |= inb(0x40) << 8;
+ /* we know probability of underflow is always MUCH less than 1% */
+ if (count > (LATCH - LATCH/100)) {
+ /* check for pending timer interrupt */
+ outb_p(0x0a, 0x20);
+ if (inb(0x20) & 1)
+ offset = TICK_SIZE;
+ }
+ count = ((LATCH-1) - count) * TICK_SIZE;
+ count = (count + LATCH/2) / LATCH;
+ return offset + count;
+}
+
+/*
+ * This version of gettimeofday has near microsecond resolution.
+ */
+void do_gettimeofday(struct timeval *tv)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ *tv = xtime;
+ tv->tv_usec += do_gettimeoffset();
+ if (tv->tv_usec >= 1000000) {
+ tv->tv_usec -= 1000000;
+ tv->tv_sec++;
+ }
+ restore_flags(flags);
+}
+
+void do_settimeofday(struct timeval *tv)
+{
+ cli();
+ /* This is revolting. We need to set the xtime.tv_usec
+ * correctly. However, the value in this location is
+ * is value at the last tick.
+ * Discover what correction gettimeofday
+ * would have done, and then undo it!
+ */
+ tv->tv_usec -= do_gettimeoffset();
+
+ if (tv->tv_usec < 0) {
+ tv->tv_usec += 1000000;
+ tv->tv_sec--;
+ }
+
+ xtime = *tv;
+ time_state = TIME_BAD;
+ time_maxerror = 0x70000000;
+ time_esterror = 0x70000000;
+ sti();
+}
+
+
+/*
+ * In order to set the CMOS clock precisely, set_rtc_mmss has to be
+ * called 500 ms after the second nowtime has started, because when
+ * nowtime is written into the registers of the CMOS clock, it will
+ * jump to the next second precisely 500 ms later. Check the Motorola
+ * MC146818A or Dallas DS12887 data sheet for details.
+ */
+static int set_rtc_mmss(unsigned long nowtime)
+{
+ int retval = 0;
+ int real_seconds, real_minutes, cmos_minutes;
+ unsigned char save_control, save_freq_select;
+
+ save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
+ CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
+
+ save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
+ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+
+ cmos_minutes = CMOS_READ(RTC_MINUTES);
+ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+ BCD_TO_BIN(cmos_minutes);
+
+ /*
+ * since we're only adjusting minutes and seconds,
+ * don't interfere with hour overflow. This avoids
+ * messing with unknown time zones but requires your
+ * RTC not to be off by more than 15 minutes
+ */
+ real_seconds = nowtime % 60;
+ real_minutes = nowtime / 60;
+ if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
+ real_minutes += 30; /* correct for half hour time zone */
+ real_minutes %= 60;
+
+ if (abs(real_minutes - cmos_minutes) < 30) {
+ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ BIN_TO_BCD(real_seconds);
+ BIN_TO_BCD(real_minutes);
+ }
+ CMOS_WRITE(real_seconds,RTC_SECONDS);
+ CMOS_WRITE(real_minutes,RTC_MINUTES);
+ } else
+ retval = -1;
+
+ /* The following flags have to be released exactly in this order,
+ * otherwise the DS12887 (popular MC146818A clone with integrated
+ * battery and quartz) will not reset the oscillator and will not
+ * update precisely 500 ms later. You won't find this mentioned in
+ * the Dallas Semiconductor data sheets, but who believes data
+ * sheets anyway ... -- Markus Kuhn
+ */
+ CMOS_WRITE(save_control, RTC_CONTROL);
+ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
+
+ return retval;
+}
head_shift = (F_SECT_PER_TRACK + 5) / 6;
/* a ``cylinder'' is two tracks plus a little stepping time */
- track_shift = 2 * head_shift + 1;
+ track_shift = 2 * head_shift + 3;
/* position of logical sector 1 on this track */
n = (track_shift * format_req.track + head_shift * format_req.head )
}
set_dor(0, ~0, 8); /* avoid immediate interrupt */
- if (request_irq(FLOPPY_IRQ, floppy_interrupt, SA_INTERRUPT, "floppy")) {
+ if (request_irq(FLOPPY_IRQ, floppy_interrupt,
+ SA_INTERRUPT|SA_SAMPLE_RANDOM, "floppy")) {
DPRINT1("Unable to grab IRQ%d for the floppy driver\n",
FLOPPY_IRQ);
return -1;
*/
save_flags(flags);
cli();
- if (request_irq(hwif->irq, ide_intr, SA_INTERRUPT, hwif->name)) {
+ if (request_irq(hwif->irq, ide_intr,
+ SA_INTERRUPT|SA_SAMPLE_RANDOM, hwif->name)) {
restore_flags(flags);
printk(" -- FAILED!");
return 1;
L_TARGET := char.a
M_OBJS :=
L_OBJS := tty_io.o n_tty.o console.o keyboard.o serial.o \
- tty_ioctl.o pty.o vt.o mem.o vc_screen.o \
+ tty_ioctl.o pty.o vt.o mem.o vc_screen.o random.o \
defkeymap.o consolemap.o vesa_blank.o selection.o
ifdef CONFIG_CYCLADES
else
ch512 = 0; /* Default font is always 256 */
+#ifdef BROKEN_GRAPHICS_PROGRAMS
+ /*
+ * All fonts are loaded in slot 0 (0:1 for 512 ch)
+ */
+
+ if (!arg)
+ return -EINVAL; /* Return to default font not supported */
+
+ video_font_is_default = 0;
+ font_select = ch512 ? 0x04 : 0x00;
+#else
/*
* The default font is kept in slot 0 and is never touched.
* A custom font is loaded in slot 2 (256 ch) or 2:3 (512 ch)
if ( !video_font_is_default )
charmap += 4*cmapsz;
+#endif
cli();
outb_p( 0x00, seq_port_reg ); /* First, the sequencer */
#include <linux/signal.h>
#include <linux/string.h>
#include <linux/ioport.h>
+#include <linux/random.h>
#include <asm/bitops.h>
prev_scancode = 0;
goto end_kbd_intr;
}
+#ifdef CONFIG_RANDOM
+ add_keyboard_randomness(scancode);
+#endif
tty = ttytab[fg_console];
kbd = kbd_table + fg_console;
#include <linux/malloc.h>
#include <linux/mman.h>
#include <linux/mm.h>
+#include <linux/random.h>
#include <asm/segment.h>
#include <asm/io.h>
#define mmap_kmem mmap_mem
#define zero_lseek null_lseek
#define write_zero write_null
+#define write_random write_null
static struct file_operations ram_fops = {
memory_lseek,
NULL /* no special release code */
};
+#ifdef CONFIG_RANDOM
+static struct file_operations random_fops = {
+ memory_lseek,
+ read_random,
+ write_random,
+ NULL, /* full_readdir */
+ NULL, /* full_select */
+ NULL, /* full_ioctl */
+ NULL, /* full_mmap */
+ NULL, /* no special open code */
+ NULL /* no special release code */
+};
+
+static struct file_operations urandom_fops = {
+ memory_lseek,
+ read_random_unlimited,
+ write_random,
+ NULL, /* full_readdir */
+ NULL, /* full_select */
+ NULL, /* full_ioctl */
+ NULL, /* full_mmap */
+ NULL, /* no special open code */
+ NULL /* no special release code */
+};
+#endif
+
static int memory_open(struct inode * inode, struct file * filp)
{
switch (MINOR(inode->i_rdev)) {
case 7:
filp->f_op = &full_fops;
break;
+#ifdef CONFIG_RANDOM
+ case 8:
+ filp->f_op = &random_fops;
+ break;
+ case 9:
+ filp->f_op = &urandom_fops;
+ break;
+#endif
default:
return -ENODEV;
}
{
if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
printk("unable to get major %d for memory devs\n", MEM_MAJOR);
+#ifdef CONFIG_RANDOM
+ rand_initialize();
+#endif
mem_start = tty_init(mem_start);
#ifdef CONFIG_PRINTER
mem_start = lp_init(mem_start);
--- /dev/null
+/*
+ * random.c -- A strong random number generator
+ *
+ * Version 0.92, last modified 21-Sep-95
+ *
+ * Copyright Theodore Ts'o, 1994, 1995. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, and the entire permission notice in its entirety,
+ * including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * ALTERNATIVELY, this product may be distributed under the terms of
+ * the GNU Public License, in which case the provisions of the GPL are
+ * required INSTEAD OF the above restrictions. (This clause is
+ * necessary due to a potential bad interaction between the GPL and
+ * the restrictions contained in a BSD-style copyright.)
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * (now, with legal B.S. out of the way.....)
+ *
+ * This routine gathers environmental noise from device drivers, etc.,
+ * and returns good random numbers, suitable for cryptographic use.
+ * Besides the obvious cryptographic uses, these numbers are also good
+ * for seeding TCP sequence numbers, and other places where it is
+ * desireable to have numbers which are not only random, but hard to
+ * predict by an attacker.
+ *
+ * Theory of operation
+ * ===================
+ *
+ * Computers are very predictable devices. Hence it is extremely hard
+ * to produce truely random numbers on a computer --- as opposed to
+ * pseudo-random numbers, which can easily generated by using a
+ * algorithm. Unfortunately, it is very easy for attackers to guess
+ * the sequence of pseudo-random number generators, and for some
+ * applications this is not acceptable. So instead, we must try to
+ * gather "environmental noise" from the computer's environment, which
+ * must be hard for outside attackers to observe, and use that to
+ * generate random numbers. In a Unix environment, this is best done
+ * from inside the kernel.
+ *
+ * Sources of randomness from the environment include inter-keyboard
+ * timings, inter-interrupt timings from some interrupts, and other
+ * events which are both (a) non-deterministic and (b) hard for an
+ * outside observer to measure. Randomness from these sources are
+ * added to an "entropy pool", which is periodically mixed using the
+ * MD5 compression function in CBC mode. As random bytes are mixed
+ * into the entropy pool, the routines keep an *estimate* of how many
+ * bits of randomness have been stored into the random number
+ * generator's internal state.
+ *
+ * When random bytes are desired, they are obtained by taking the MD5
+ * hash of a counter plus the contents of the "entropy pool". The
+ * reason for the MD5 hash is so that we can avoid exposing the
+ * internal state of random number generator. Although the MD5 hash
+ * does protect the pool, as each random byte which is generated from
+ * the pool reveals some information which was derived from the
+ * internal state, and thus increasing the amount of information an
+ * outside attacker has available to try to make some guesses about
+ * the random number generator's internal state. For this reason,
+ * the routine decreases its internal estimate of how many bits of
+ * "true randomness" are contained in the entropy pool as it outputs
+ * random numbers.
+ *
+ * If this estimate goes to zero, the routine can still generate random
+ * numbers; however it may now be possible for an attacker to analyze
+ * the output of the random number generator, and the MD5 algorithm,
+ * and thus have some success in guessing the output of the routine.
+ * Phil Karn (who devised this mechanism of using MD5 plus a counter
+ * to extract random numbers from an entropy pool) calls this
+ * "practical randomness", since in the worse case this is equivalent
+ * to hashing MD5 with a counter and an undisclosed secret. If MD5 is
+ * a strong cryptographic hash, this should be fairly resistant to attack.
+ *
+ * Exported interfaces ---- output
+ * ===============================
+ *
+ * There are three exported interfaces; the first is one designed to
+ * be used from within the kernel:
+ *
+ * void get_random_bytes(void *buf, int nbytes);
+ *
+ * This interface will return the requested number of random bytes,
+ * and place it in the requested buffer.
+ *
+ * The two other interfaces are two character devices /dev/random and
+ * /dev/urandom. /dev/random is suitable for use when very high
+ * quality randomness is desired (for example, for key generation.),
+ * as it will only return a maximum of the number of bits of
+ * randomness (as estimated by the random number generator) contained
+ * in the entropy pool.
+ *
+ * The /dev/urandom device does not have this limit, and will return
+ * as many bytes as are requested. As more and more random bytes are
+ * requested without giving time for the entropy pool to recharge,
+ * this will result in lower quality random numbers. For many
+ * applications, however, this is acceptable.
+ *
+ * Exported interfaces ---- input
+ * ==============================
+ *
+ * The two current exported interfaces for gathering environmental
+ * noise from the devices are:
+ *
+ * void add_keyboard_randomness(unsigned char scancode);
+ * void add_interrupt_randomness(int irq);
+ *
+ * The first function uses the inter-keypress timing, as well as the
+ * scancode as random inputs into the "entropy pool".
+ *
+ * The second function uses the inter-interrupt timing as random
+ * inputs to the entropy pool. Note that not all interrupts are good
+ * sources of randomness! For example, the timer interrupts is not a
+ * good choice, because the periodicity of the interrupts is to
+ * regular, and hence predictable to an attacker. Disk interrupts are
+ * a better measure, since the timing of the disk interrupts are more
+ * unpredictable. The routines try to estimate how many bits of
+ * randomness a particular interrupt channel offers, by keeping track
+ * of the first and second order deltas in the interrupt timings.
+ *
+ * Acknowledgements:
+ * =================
+ *
+ * Ideas for constructing this random number generator were derived
+ * from the Pretty Good Privacy's random number generator, and from
+ * private discussions with Phil Karn. This design has been further
+ * modified by myself, so any flaws are solely my responsibility, and
+ * should not be attributed to the authors of PGP or to Phil.
+ *
+ * The code for MD5 transform was taken from Colin Plumb's
+ * implementation, which has been placed in the public domain. The
+ * MD5 cryptographic checksum was devised by Ronald Rivest, and is
+ * documented in RFC 1321, "The MD5 Message Digest Algorithm".
+ *
+ * Further background information on this topic may be obtained from
+ * RFC 1750, "Randomness Recommendations for Security", by Donald
+ * Eastlake, Steve Crocker, and Jeff Schiller.
+ */
+
+#ifdef linux
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/random.h>
+
+#include <asm/segment.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#endif
+
+#ifdef CONFIG_RANDOM
+
+#define RANDPOOL 512
+
+struct random_bucket {
+ int add_ptr;
+ int entropy_count;
+ int length;
+ int bit_length;
+ int delay_mix:1;
+ __u8 *pool;
+};
+
+struct timer_rand_state {
+ unsigned long last_time;
+ int last_delta;
+ int nbits;
+};
+
+static struct random_bucket random_state;
+static __u32 rand_pool_key[16];
+static __u8 random_pool[RANDPOOL];
+static __u32 random_counter[16];
+static struct timer_rand_state keyboard_timer_state;
+static struct timer_rand_state irq_timer_state[NR_IRQS];
+
+#ifndef MIN
+#define MIN(a,b) (((a) < (b)) ? (a) : (b))
+#endif
+
+static void flush_random(struct random_bucket *random_state)
+{
+ random_state->add_ptr = 0;
+ random_state->bit_length = random_state->length * 8;
+ random_state->entropy_count = 0;
+ random_state->delay_mix = 0;
+}
+
+void rand_initialize(void)
+{
+ random_state.length = RANDPOOL;
+ random_state.pool = random_pool;
+ flush_random(&random_state);
+}
+
+/*
+ * MD5 transform algorithm, taken from code written by Colin Plumb,
+ * and put into the public domain
+ */
+
+/* The four core functions - F1 is optimized somewhat */
+
+/* #define F1(x, y, z) (x & y | ~x & z) */
+#define F1(x, y, z) (z ^ (x & (y ^ z)))
+#define F2(x, y, z) F1(z, x, y)
+#define F3(x, y, z) (x ^ y ^ z)
+#define F4(x, y, z) (y ^ (x | ~z))
+
+/* This is the central step in the MD5 algorithm. */
+#define MD5STEP(f, w, x, y, z, data, s) \
+ ( w += f(x, y, z) + data, w = w<<s | w>>(32-s), w += x )
+
+/*
+ * The core of the MD5 algorithm, this alters an existing MD5 hash to
+ * reflect the addition of 16 longwords of new data. MD5Update blocks
+ * the data and converts bytes into longwords for this routine.
+ */
+static void MD5Transform(__u32 buf[4],
+ __u32 const in[16])
+{
+ __u32 a, b, c, d;
+
+ a = buf[0];
+ b = buf[1];
+ c = buf[2];
+ d = buf[3];
+
+ MD5STEP(F1, a, b, c, d, in[ 0]+0xd76aa478, 7);
+ MD5STEP(F1, d, a, b, c, in[ 1]+0xe8c7b756, 12);
+ MD5STEP(F1, c, d, a, b, in[ 2]+0x242070db, 17);
+ MD5STEP(F1, b, c, d, a, in[ 3]+0xc1bdceee, 22);
+ MD5STEP(F1, a, b, c, d, in[ 4]+0xf57c0faf, 7);
+ MD5STEP(F1, d, a, b, c, in[ 5]+0x4787c62a, 12);
+ MD5STEP(F1, c, d, a, b, in[ 6]+0xa8304613, 17);
+ MD5STEP(F1, b, c, d, a, in[ 7]+0xfd469501, 22);
+ MD5STEP(F1, a, b, c, d, in[ 8]+0x698098d8, 7);
+ MD5STEP(F1, d, a, b, c, in[ 9]+0x8b44f7af, 12);
+ MD5STEP(F1, c, d, a, b, in[10]+0xffff5bb1, 17);
+ MD5STEP(F1, b, c, d, a, in[11]+0x895cd7be, 22);
+ MD5STEP(F1, a, b, c, d, in[12]+0x6b901122, 7);
+ MD5STEP(F1, d, a, b, c, in[13]+0xfd987193, 12);
+ MD5STEP(F1, c, d, a, b, in[14]+0xa679438e, 17);
+ MD5STEP(F1, b, c, d, a, in[15]+0x49b40821, 22);
+
+ MD5STEP(F2, a, b, c, d, in[ 1]+0xf61e2562, 5);
+ MD5STEP(F2, d, a, b, c, in[ 6]+0xc040b340, 9);
+ MD5STEP(F2, c, d, a, b, in[11]+0x265e5a51, 14);
+ MD5STEP(F2, b, c, d, a, in[ 0]+0xe9b6c7aa, 20);
+ MD5STEP(F2, a, b, c, d, in[ 5]+0xd62f105d, 5);
+ MD5STEP(F2, d, a, b, c, in[10]+0x02441453, 9);
+ MD5STEP(F2, c, d, a, b, in[15]+0xd8a1e681, 14);
+ MD5STEP(F2, b, c, d, a, in[ 4]+0xe7d3fbc8, 20);
+ MD5STEP(F2, a, b, c, d, in[ 9]+0x21e1cde6, 5);
+ MD5STEP(F2, d, a, b, c, in[14]+0xc33707d6, 9);
+ MD5STEP(F2, c, d, a, b, in[ 3]+0xf4d50d87, 14);
+ MD5STEP(F2, b, c, d, a, in[ 8]+0x455a14ed, 20);
+ MD5STEP(F2, a, b, c, d, in[13]+0xa9e3e905, 5);
+ MD5STEP(F2, d, a, b, c, in[ 2]+0xfcefa3f8, 9);
+ MD5STEP(F2, c, d, a, b, in[ 7]+0x676f02d9, 14);
+ MD5STEP(F2, b, c, d, a, in[12]+0x8d2a4c8a, 20);
+
+ MD5STEP(F3, a, b, c, d, in[ 5]+0xfffa3942, 4);
+ MD5STEP(F3, d, a, b, c, in[ 8]+0x8771f681, 11);
+ MD5STEP(F3, c, d, a, b, in[11]+0x6d9d6122, 16);
+ MD5STEP(F3, b, c, d, a, in[14]+0xfde5380c, 23);
+ MD5STEP(F3, a, b, c, d, in[ 1]+0xa4beea44, 4);
+ MD5STEP(F3, d, a, b, c, in[ 4]+0x4bdecfa9, 11);
+ MD5STEP(F3, c, d, a, b, in[ 7]+0xf6bb4b60, 16);
+ MD5STEP(F3, b, c, d, a, in[10]+0xbebfbc70, 23);
+ MD5STEP(F3, a, b, c, d, in[13]+0x289b7ec6, 4);
+ MD5STEP(F3, d, a, b, c, in[ 0]+0xeaa127fa, 11);
+ MD5STEP(F3, c, d, a, b, in[ 3]+0xd4ef3085, 16);
+ MD5STEP(F3, b, c, d, a, in[ 6]+0x04881d05, 23);
+ MD5STEP(F3, a, b, c, d, in[ 9]+0xd9d4d039, 4);
+ MD5STEP(F3, d, a, b, c, in[12]+0xe6db99e5, 11);
+ MD5STEP(F3, c, d, a, b, in[15]+0x1fa27cf8, 16);
+ MD5STEP(F3, b, c, d, a, in[ 2]+0xc4ac5665, 23);
+
+ MD5STEP(F4, a, b, c, d, in[ 0]+0xf4292244, 6);
+ MD5STEP(F4, d, a, b, c, in[ 7]+0x432aff97, 10);
+ MD5STEP(F4, c, d, a, b, in[14]+0xab9423a7, 15);
+ MD5STEP(F4, b, c, d, a, in[ 5]+0xfc93a039, 21);
+ MD5STEP(F4, a, b, c, d, in[12]+0x655b59c3, 6);
+ MD5STEP(F4, d, a, b, c, in[ 3]+0x8f0ccc92, 10);
+ MD5STEP(F4, c, d, a, b, in[10]+0xffeff47d, 15);
+ MD5STEP(F4, b, c, d, a, in[ 1]+0x85845dd1, 21);
+ MD5STEP(F4, a, b, c, d, in[ 8]+0x6fa87e4f, 6);
+ MD5STEP(F4, d, a, b, c, in[15]+0xfe2ce6e0, 10);
+ MD5STEP(F4, c, d, a, b, in[ 6]+0xa3014314, 15);
+ MD5STEP(F4, b, c, d, a, in[13]+0x4e0811a1, 21);
+ MD5STEP(F4, a, b, c, d, in[ 4]+0xf7537e82, 6);
+ MD5STEP(F4, d, a, b, c, in[11]+0xbd3af235, 10);
+ MD5STEP(F4, c, d, a, b, in[ 2]+0x2ad7d2bb, 15);
+ MD5STEP(F4, b, c, d, a, in[ 9]+0xeb86d391, 21);
+
+ buf[0] += a;
+ buf[1] += b;
+ buf[2] += c;
+ buf[3] += d;
+}
+
+#undef F1
+#undef F2
+#undef F3
+#undef F4
+#undef MD5STEP
+
+/*
+ * The function signature should be take a struct random_bucket * as
+ * input, but this makes tqueue unhappy.
+ */
+static void mix_bucket(void *v)
+{
+ struct random_bucket *r = (struct random_bucket *) v;
+ int i, num_passes;
+ __u32 *p;
+ __u32 iv[4];
+
+ r->delay_mix = 0;
+
+ /* Start IV from last block of the random pool */
+ memcpy(iv, r->pool + r->length - sizeof(iv), sizeof(iv));
+
+ num_passes = r->length / 16;
+ for (i = 0, p = (__u32 *) r->pool; i < num_passes; i++) {
+ MD5Transform(iv, rand_pool_key);
+ iv[0] = (*p++ ^= iv[0]);
+ iv[1] = (*p++ ^= iv[1]);
+ iv[2] = (*p++ ^= iv[2]);
+ iv[3] = (*p++ ^= iv[3]);
+ }
+ memcpy(rand_pool_key, r->pool, sizeof(rand_pool_key));
+
+ /* Wipe iv from memory */
+ memset(iv, 0, sizeof(iv));
+
+ r->add_ptr = 0;
+}
+
+/*
+ * This function adds a byte into the entropy "pool". It does not
+ * update the entropy estimate. The caller must do this if appropriate.
+ */
+static inline void add_entropy_byte(struct random_bucket *r,
+ const __u8 ch,
+ int delay)
+{
+ if (!delay && r->delay_mix)
+ mix_bucket(r);
+ r->pool[r->add_ptr++] ^= ch;
+ if (r->add_ptr >= r->length) {
+ if (delay) {
+ r->delay_mix = 1;
+ r->add_ptr = 0;
+ } else
+ mix_bucket(r);
+ }
+}
+
+/*
+ * This function adds some number of bytes into the entropy pool and
+ * updates the entropy count as appropriate.
+ */
+void add_entropy(struct random_bucket *r, const __u8 *ptr,
+ int length, int entropy_level, int delay)
+{
+ while (length-- > 0)
+ add_entropy_byte(r, *ptr++, delay);
+
+ r->entropy_count += entropy_level;
+ if (r->entropy_count > r->length*8)
+ r->entropy_count = r->length * 8;
+}
+
+/*
+ * This function adds entropy to the entropy "pool" by using timing
+ * delays. It uses the timer_rand_state structure to make an estimate
+ * of how many bits of entropy this call has added to the pool.
+ */
+static void add_timer_randomness(struct random_bucket *r,
+ struct timer_rand_state *state, int delay)
+{
+ int delta, delta2;
+ int nbits;
+
+ /*
+ * Calculate number of bits of randomness we probably
+ * added. We take into account the first and second order
+ * delta's in order to make our estimate.
+ */
+ delta = jiffies - state->last_time;
+ delta2 = delta - state->last_delta;
+ state->last_time = jiffies;
+ state->last_delta = delta;
+ if (delta < 0) delta = -delta;
+ if (delta2 < 0) delta2 = -delta2;
+ delta = MIN(delta, delta2) >> 1;
+ for (nbits = 0; delta; nbits++)
+ delta >>= 1;
+
+ add_entropy(r, (__u8 *) &jiffies, sizeof(jiffies),
+ nbits, delay);
+
+#if defined (__i386__)
+ /*
+ * On a 386, read the high resolution timer. We assume that
+ * this gives us 2 bits of randomness. XXX This needs
+ * investigation.
+ */
+ outb_p(0x00, 0x43); /* latch the count ASAP */
+ add_entropy_byte(r, inb_p(0x40), 1);
+ add_entropy_byte(r, inb(0x40), 1);
+ r->entropy_count += 2;
+ if (r->entropy_count > r->bit_length)
+ r->entropy_count = r->bit_length;
+#endif
+}
+
+void add_keyboard_randomness(unsigned char scancode)
+{
+ struct random_bucket *r = &random_state;
+
+ add_timer_randomness(r, &keyboard_timer_state, 0);
+ add_entropy_byte(r, scancode, 0);
+ r->entropy_count += 6;
+ if (r->entropy_count > r->bit_length)
+ r->entropy_count = r->bit_length;
+}
+
+void add_interrupt_randomness(int irq)
+{
+ struct random_bucket *r = &random_state;
+
+ if (irq >= NR_IRQS)
+ return;
+
+ add_timer_randomness(r, &irq_timer_state[irq], 1);
+}
+
+/*
+ * This function extracts randomness from the "entropy pool", and
+ * returns it in a buffer. This function computes how many remaining
+ * bits of entropy are left in the pool, but it does not restrict the
+ * number of bytes that are actually obtained.
+ */
+static inline int extract_entropy(struct random_bucket *r, char * buf,
+ int nbytes, int to_user)
+{
+ int length, ret, passes, i;
+ __u32 tmp[4];
+ u8 *cp;
+
+ add_entropy(r, (u8 *) &jiffies, sizeof(jiffies), 0, 0);
+
+ if (r->entropy_count > r->bit_length)
+ r->entropy_count = r->bit_length;
+ if (nbytes > 32768)
+ nbytes = 32768;
+ ret = nbytes;
+ r->entropy_count -= ret * 8;
+ if (r->entropy_count < 0)
+ r->entropy_count = 0;
+ passes = r->length / 64;
+ while (nbytes) {
+ length = MIN(nbytes, 16);
+ for (i=0; i < 16; i++) {
+ if (++random_counter[i] != 0)
+ break;
+ }
+ tmp[0] = 0x67452301;
+ tmp[1] = 0xefcdab89;
+ tmp[2] = 0x98badcfe;
+ tmp[3] = 0x10325476;
+ MD5Transform(tmp, random_counter);
+ for (i = 0, cp = r->pool; i < passes; i++, cp+=64)
+ MD5Transform(tmp, (__u32 *) cp);
+ if (to_user)
+ memcpy_tofs(buf, tmp, length);
+ else
+ memcpy(buf, tmp, length);
+ nbytes -= length;
+ buf += length;
+ }
+ return ret;
+}
+
+/*
+ * This function is the exported kernel interface. It returns some
+ * number of good random numbers, suitable for seeding TCP sequence
+ * numbers, etc.
+ */
+void get_random_bytes(void *buf, int nbytes)
+{
+ extract_entropy(&random_state, (char *) buf, nbytes, 0);
+}
+
+#ifdef linux
+int read_random(struct inode * inode,struct file * file,char * buf,int nbytes)
+{
+ if ((nbytes * 8) > random_state.entropy_count)
+ nbytes = random_state.entropy_count / 8;
+
+ return extract_entropy(&random_state, buf, nbytes, 1);
+}
+
+int read_random_unlimited(struct inode * inode,struct file * file,
+ char * buf,int nbytes)
+{
+ return extract_entropy(&random_state, buf, nbytes, 1);
+}
+#endif
+
+#endif /* CONFIG_RANDOM */
if (vt_cons[fg_console]->vc_mode != KD_TEXT)
return -EINVAL;
+#ifdef BROKEN_GRAPHICS_PROGRAMS
+ /* With BROKEN_GRAPHICS_PROGRAMS defined, the default
+ font is not saved. */
+ return -ENOSYS;
+#else
+
i = con_set_font(NULL, 0); /* Set font to default */
if (i) return i;
con_set_default_unimap();
return 0;
+#endif
}
case GIO_FONTX:
#include <linux/vt.h>
+/*
+ * Presently, a lot of graphics programs do not restore the contents of
+ * the higher font pages. Defining this flag will avoid use of them, but
+ * will lose support for PIO_FONTRESET. Note that many font operations are
+ * not likely to work with these programs anyway; they need to be
+ * fixed. The linux/Documentation directory includes a code snippet
+ * to save and restore the text font.
+ */
+#define BROKEN_GRAPHICS_PROGRAMS 1
+
extern struct vt_struct {
int vc_num; /* The console number */
unsigned char vc_mode; /* KD_TEXT, ... */
#ifdef TUNNEL_DEBUG
printk("tunnel: calling ip_forward()\n");
#endif
- if(ip_forward(skb2, dev, 0, iph->daddr, 0))
+ if(ip_forward(skb2, dev, 0, iph->daddr))
kfree_skb(skb2, FREE_WRITE);
DEVICE( AL, AL_M1461, "M1461"),
DEVICE( AL, AL_M4803, "M4803"),
DEVICE( IMS, IMS_8849, "8849"),
- DEVICE( REALTEK, REALTEK_8300, "ENW-8300C"),
+ DEVICE( REALTEK, REALTEK_8029, "8029"),
DEVICE( VIA, VIA_82C505, "VT 82C505"),
DEVICE( VIA, VIA_82C561, "VT 82C561"),
DEVICE( VIA, VIA_82C576, "VT 82C576 3V"),
hostdata->state = STATE_HALTED;
/*
* NCR53c700 and NCR53c700-66 change the current SCSI
- * process, hostdata->current, in the Linux driver so
- * cmd = hostdata->current.
+ * process, hostdata->current_cmd, in the Linux driver so
+ * cmd = hostdata->current_cmd.
*
* With other chips, we must look through the commands
* executing and find the command structure which
*/
if (hostdata->options & OPTION_700) {
- cmd = (struct NCR53c7x0_cmd *) hostdata->current;
+ cmd = (struct NCR53c7x0_cmd *) hostdata->current_cmd;
} else {
dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
for (cmd = (struct NCR53c7x0_cmd *)
volatile struct NCR53c7x0_cmd *running_list;
/* commands running, maintained
by Linux driver */
- volatile struct NCR53c7x0_cmd *current; /* currently connected
+ volatile struct NCR53c7x0_cmd *current_cmd; /* currently connected
nexus, ONLY valid for
NCR53c700/NCR53c700-66
*/
endif
endif
+ifeq ($(CONFIG_SCSI_NCR53C406A),y)
+L_OBJS += NCR53c406a.o
+else
+ ifeq ($(CONFIG_SCSI_NCR53C406A),m)
+ M_OBJS += NCR53c406a.o
+ endif
+endif
+
include $(TOPDIR)/Rules.make
ifeq ($(CONFIG_SCSI),m)
--- /dev/null
+/*
+ * NCR53c406.c
+ * Low-level SCSI driver for NCR53c406a chip.
+ * Copyright (C) 1994, 1995 Normunds Saumanis (normunds@tech.swh.lv)
+ *
+ * LILO command line usage: ncr53c406a=<PORTBASE>[,<IRQ>[,<FASTPIO>]]
+ * Specify IRQ = 0 for non-interrupt driven mode.
+ * FASTPIO = 1 for fast pio mode, 0 for slow mode.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#define NCR53C406A_DEBUG 0
+#define VERBOSE_NCR53C406A_DEBUG 0
+
+/* Set this to 1 for PIO mode (recommended) or to 0 for DMA mode */
+#define USE_PIO 1
+
+#define USE_BIOS 0
+/* #define BIOS_ADDR 0xD8000 */ /* define this if autoprobe fails */
+/* #define PORT_BASE 0x330 */ /* define this if autoprobe fails */
+/* #define IRQ_LEV 0 */ /* define this if autoprobe fails */
+#define DMA_CHAN 5 /* this is ignored if DMA is disabled */
+
+/* Set this to 0 if you encounter kernel lockups while transferring
+ * data in PIO mode */
+#define USE_FAST_PIO 1
+
+/* ============= End of user configurable parameters ============= */
+
+#ifdef MODULE
+#include <linux/config.h>
+#include <linux/module.h>
+#endif
+
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/bitops.h>
+#include <asm/irq.h>
+
+#include "../block/blk.h"
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+
+#include "NCR53c406a.h"
+
+/* ============================================================= */
+
+#define WATCHDOG 5000000
+
+#define SYNC_MODE 0 /* Synchrounous transfer mode */
+
+#if DEBUG
+#undef NCR53C406A_DEBUG
+#define NCR53C406A_DEBUG 1
+#endif
+
+#if USE_PIO
+#define USE_DMA 0
+#else
+#define USE_DMA 1
+#endif
+
+/* Default configuration */
+#define C1_IMG 0x07 /* ID=7 */
+#define C2_IMG 0x48 /* FE SCSI2 */
+#if USE_DMA
+#define C3_IMG 0x21 /* CDB TE */
+#else
+#define C3_IMG 0x20 /* CDB */
+#endif
+#define C4_IMG 0x04 /* ANE */
+#define C5_IMG 0xb6 /* AA PI SIE POL */
+
+#define REG0 (outb(C4_IMG, CONFIG4))
+#define REG1 (outb(C5_IMG, CONFIG5))
+
+#if NCR53C406A_DEBUG
+#define DEB(x) x
+#else
+#define DEB(x)
+#endif
+
+#if VERBOSE_NCR53C406A_DEBUG
+#define VDEB(x) x
+#else
+#define VDEB(x)
+#endif
+
+#define LOAD_DMA_COUNT(count) \
+ outb(count & 0xff, TC_LSB); \
+ outb((count >> 8) & 0xff, TC_MSB); \
+ outb((count >> 16) & 0xff, TC_HIGH);
+
+/* Chip commands */
+#define DMA_OP 0x80
+
+#define SCSI_NOP 0x00
+#define FLUSH_FIFO 0x01
+#define CHIP_RESET 0x02
+#define SCSI_RESET 0x03
+#define RESELECT 0x40
+#define SELECT_NO_ATN 0x41
+#define SELECT_ATN 0x42
+#define SELECT_ATN_STOP 0x43
+#define ENABLE_SEL 0x44
+#define DISABLE_SEL 0x45
+#define SELECT_ATN3 0x46
+#define RESELECT3 0x47
+#define TRANSFER_INFO 0x10
+#define INIT_CMD_COMPLETE 0x11
+#define MSG_ACCEPT 0x12
+#define TRANSFER_PAD 0x18
+#define SET_ATN 0x1a
+#define RESET_ATN 0x1b
+#define SEND_MSG 0x20
+#define SEND_STATUS 0x21
+#define SEND_DATA 0x22
+#define DISCONN_SEQ 0x23
+#define TERMINATE_SEQ 0x24
+#define TARG_CMD_COMPLETE 0x25
+#define DISCONN 0x27
+#define RECV_MSG 0x28
+#define RECV_CMD 0x29
+#define RECV_DATA 0x2a
+#define RECV_CMD_SEQ 0x2b
+#define TARGET_ABORT_DMA 0x04
+
+/*----------------------------------------------------------------*/
+/* the following will set the monitor border color (useful to find
+ where something crashed or gets stuck at */
+/* 1 = blue
+ 2 = green
+ 3 = cyan
+ 4 = red
+ 5 = magenta
+ 6 = yellow
+ 7 = white
+*/
+
+#if NCR53C406A_DEBUG
+#define rtrc(i) {inb(0x3da);outb(0x31,0x3c0);outb((i),0x3c0);}
+#else
+#define rtrc(i) {}
+#endif
+/*----------------------------------------------------------------*/
+
+enum Phase {
+ idle,
+ data_out,
+ data_in,
+ command_ph,
+ status_ph,
+ message_out,
+ message_in
+};
+
+/* Static function prototypes */
+static void NCR53c406a_intr(int, struct pt_regs *);
+static void internal_done(Scsi_Cmnd *);
+static void wait_intr(void);
+static void chip_init(void);
+static void calc_port_addr(void);
+#ifndef IRQ_LEV
+static int irq_probe(void);
+#endif
+
+/* ================================================================= */
+
+#if USE_BIOS
+static void *bios_base = (void *)0;
+#endif
+
+#if PORT_BASE
+static int port_base = PORT_BASE;
+#else
+static int port_base = 0;
+#endif
+
+#if IRQ_LEV
+static int irq_level = IRQ_LEV;
+#else
+static int irq_level = -1; /* 0 is 'no irq', so use -1 for 'uninitialized'*/
+#endif
+
+#if USE_DMA
+static int dma_chan = 0;
+#endif
+
+#if USE_PIO
+static int fast_pio = USE_FAST_PIO;
+#endif
+
+static Scsi_Cmnd *current_SC = NULL;
+static volatile int internal_done_flag = 0;
+static volatile int internal_done_errcode = 0;
+static char info_msg[256];
+
+/* ================================================================= */
+
+/* possible BIOS locations */
+#if USE_BIOS
+static void *addresses[] = {
+ (void *)0xd8000,
+ (void *)0xc8000
+};
+#define ADDRESS_COUNT (sizeof( addresses ) / sizeof( unsigned ))
+#endif USE_BIOS
+
+/* possible i/o port addresses */
+static unsigned short ports[] = { 0x230, 0x330 };
+#define PORT_COUNT (sizeof( ports ) / sizeof( unsigned short ))
+
+/* possible interrupt channels */
+static unsigned short intrs[] = { 10, 11, 12, 15 };
+#define INTR_COUNT (sizeof( intrs ) / sizeof( unsigned short ))
+
+/* signatures for NCR 53c406a based controllers */
+#if USE_BIOS
+struct signature {
+ char *signature;
+ int sig_offset;
+ int sig_length;
+} signatures[] = {
+ /* 1 2 3 4 5 6 */
+ /* 123456789012345678901234567890123456789012345678901234567890 */
+ { "Copyright (C) Acculogic, Inc.\r\n2.8M Diskette Extension Bios ver 4.04.03 03/01/1993", 61, 82 },
+};
+#define SIGNATURE_COUNT (sizeof( signatures ) / sizeof( struct signature ))
+#endif USE_BIOS
+
+/* ============================================================ */
+
+/* Control Register Set 0 */
+static int TC_LSB; /* transfer counter lsb */
+static int TC_MSB; /* transfer counter msb */
+static int SCSI_FIFO; /* scsi fifo register */
+static int CMD_REG; /* command register */
+static int STAT_REG; /* status register */
+static int DEST_ID; /* selection/reselection bus id */
+static int INT_REG; /* interrupt status register */
+static int SRTIMOUT; /* select/reselect timeout reg */
+static int SEQ_REG; /* sequence step register */
+static int SYNCPRD; /* synchronous transfer period */
+static int FIFO_FLAGS; /* indicates # of bytes in fifo */
+static int SYNCOFF; /* synchronous offset register */
+static int CONFIG1; /* configuration register */
+static int CLKCONV; /* clock conversion reg */
+/*static int TESTREG;*/ /* test mode register */
+static int CONFIG2; /* Configuration 2 Register */
+static int CONFIG3; /* Configuration 3 Register */
+static int CONFIG4; /* Configuration 4 Register */
+static int TC_HIGH; /* Transfer Counter High */
+/*static int FIFO_BOTTOM;*/ /* Reserve FIFO byte register */
+
+/* Control Register Set 1 */
+/*static int JUMPER_SENSE;*/ /* Jumper sense port reg (r/w) */
+/*static int SRAM_PTR;*/ /* SRAM address pointer reg (r/w) */
+/*static int SRAM_DATA;*/ /* SRAM data register (r/w) */
+static int PIO_FIFO; /* PIO FIFO registers (r/w) */
+/*static int PIO_FIFO1;*/ /* */
+/*static int PIO_FIFO2;*/ /* */
+/*static int PIO_FIFO3;*/ /* */
+static int PIO_STATUS; /* PIO status (r/w) */
+/*static int ATA_CMD;*/ /* ATA command/status reg (r/w) */
+/*static int ATA_ERR;*/ /* ATA features/error register (r/w)*/
+static int PIO_FLAG; /* PIO flag interrupt enable (r/w) */
+static int CONFIG5; /* Configuration 5 register (r/w) */
+/*static int SIGNATURE;*/ /* Signature Register (r) */
+/*static int CONFIG6;*/ /* Configuration 6 register (r) */
+
+/* ============================================================== */
+
+#if USE_DMA
+static __inline__ int
+NCR53c406a_dma_setup (unsigned char *ptr,
+ unsigned int count,
+ unsigned char mode) {
+ unsigned limit;
+ unsigned long flags = 0;
+
+ VDEB(printk("dma: before count=%d ", count));
+ if (dma_chan <=3) {
+ if (count > 65536)
+ count = 65536;
+ limit = 65536 - (((unsigned) ptr) & 0xFFFF);
+ } else {
+ if (count > (65536<<1))
+ count = (65536<<1);
+ limit = (65536<<1) - (((unsigned) ptr) & 0x1FFFF);
+ }
+
+ if (count > limit) count = limit;
+
+ VDEB(printk("after count=%d\n", count));
+ if ((count & 1) || (((unsigned) ptr) & 1))
+ panic ("NCR53c406a: attempted unaligned DMA transfer\n");
+
+ save_flags(flags);
+ cli();
+ disable_dma(dma_chan);
+ clear_dma_ff(dma_chan);
+ set_dma_addr(dma_chan, (long) ptr);
+ set_dma_count(dma_chan, count);
+ set_dma_mode(dma_chan, mode);
+ enable_dma(dma_chan);
+ restore_flags(flags);
+
+ return count;
+}
+
+static __inline__ int
+NCR53c406a_dma_write(unsigned char *src, unsigned int count) {
+ return NCR53c406a_dma_setup (src, count, DMA_MODE_WRITE);
+}
+
+static __inline__ int
+NCR53c406a_dma_read(unsigned char *src, unsigned int count) {
+ return NCR53c406a_dma_setup (src, count, DMA_MODE_READ);
+}
+
+static __inline__ int
+NCR53c406a_dma_residual (void) {
+ register int tmp;
+ unsigned long flags = 0;
+ save_flags(flags);
+ cli();
+ clear_dma_ff(dma_chan);
+ tmp = get_dma_residue(dma_chan);
+ restore_flags(flags);
+
+ return tmp;
+}
+#endif USE_DMA
+
+#if USE_PIO
+static __inline__ int NCR53c406a_pio_read(unsigned char *request,
+ unsigned int reqlen)
+{
+ int i;
+ int len; /* current scsi fifo size */
+ unsigned long flags = 0;
+
+ REG1;
+ while (reqlen) {
+ i = inb(PIO_STATUS);
+/* VDEB(printk("pio_status=%x\n", i)); */
+ if (i & 0x80)
+ return 0;
+
+ switch( i & 0x1e ) {
+ default:
+ case 0x10:
+ len=0; break;
+ case 0x0:
+ len=1; break;
+ case 0x8:
+ len=42; break;
+ case 0xc:
+ len=84; break;
+ case 0xe:
+ len=128; break;
+ }
+
+ if ((i & 0x40) && len == 0) { /* fifo empty and interrupt occured */
+ return 0;
+ }
+
+ if (len) {
+ if( len > reqlen )
+ len = reqlen;
+
+ save_flags(flags);
+ cli();
+ if( fast_pio && len > 3 ) {
+ insl(PIO_FIFO,request,len>>2);
+ request += len & 0xfc;
+ reqlen -= len & 0xfc;
+ }
+ else {
+ while(len--) {
+ *request++ = inb(PIO_FIFO);
+ reqlen--;
+ }
+ }
+ restore_flags(flags);
+ }
+ }
+ return 0;
+}
+
+static __inline__ int NCR53c406a_pio_write(unsigned char *request,
+ unsigned int reqlen)
+{
+ int i = 0;
+ int len; /* current scsi fifo size */
+ unsigned long flags = 0;
+
+ REG1;
+ while (reqlen && !(i&0x40)) {
+ i = inb(PIO_STATUS);
+/* VDEB(printk("pio_status=%x\n", i)); */
+ if (i & 0x80) /* error */
+ return 0;
+
+ switch( i & 0x1e ) {
+ case 0x10:
+ len=128; break;
+ case 0x0:
+ len=84; break;
+ case 0x8:
+ len=42; break;
+ case 0xc:
+ len=1; break;
+ default:
+ case 0xe:
+ len=0; break;
+ }
+
+ if (len) {
+ if( len > reqlen )
+ len = reqlen;
+
+ save_flags(flags);
+ cli();
+ if( fast_pio && len > 3 ) {
+ outsl(PIO_FIFO,request,len>>2);
+ request += len & 0xfc;
+ reqlen -= len & 0xfc;
+ }
+ else {
+ while(len--) {
+ outb(*request++, PIO_FIFO);
+ reqlen--;
+ }
+ }
+ restore_flags(flags);
+ }
+ }
+ return 0;
+}
+#endif USE_PIO
+
+int
+NCR53c406a_detect(Scsi_Host_Template * tpnt){
+ struct Scsi_Host *shpnt;
+#ifndef PORT_BASE
+ int i;
+#endif
+
+#if USE_BIOS
+ int ii, jj;
+ bios_base = 0;
+ /* look for a valid signature */
+ for( ii=0; ii < ADDRESS_COUNT && !bios_base; ii++)
+ for( jj=0; (jj < SIGNATURE_COUNT) && !bios_base; jj++)
+ if(!memcmp((void *) addresses[ii]+signatures[jj].sig_offset,
+ (void *) signatures[jj].signature,
+ (int) signatures[jj].sig_length))
+ bios_base=addresses[ii];
+
+ if(!bios_base){
+ printk("NCR53c406a: BIOS signature not found\n");
+ return 0;
+ }
+
+ DEB(printk("NCR53c406a BIOS found at %X\n", (unsigned int) bios_base););
+#endif USE_BIOS
+
+#ifdef PORT_BASE
+ if (check_region(port_base, 0x10)) /* ports already snatched */
+ port_base = 0;
+
+#else /* autodetect */
+ if (port_base) { /* LILO override */
+ if (check_region(port_base, 0x10))
+ port_base = 0;
+ }
+ else {
+ for(i=0; i<PORT_COUNT && !port_base; i++){
+ if(check_region(ports[i], 0x10)){
+ DEB(printk("NCR53c406a: port %x in use\n", ports[i]));
+ }
+ else {
+ VDEB(printk("NCR53c406a: port %x available\n", ports[i]));
+ outb(C5_IMG, ports[i] + 0x0d); /* reg set 1 */
+ if( (inb(ports[i] + 0x0e) ^ inb(ports[i] + 0x0e)) == 7
+ && (inb(ports[i] + 0x0e) ^ inb(ports[i] + 0x0e)) == 7
+ && (inb(ports[i] + 0x0e) & 0xf8) == 0x58 ) {
+ VDEB(printk("NCR53c406a: Sig register valid\n"));
+ VDEB(printk("port_base=%x\n", port_base));
+ port_base = ports[i];
+ }
+ }
+ }
+ }
+#endif PORT_BASE
+
+ if(!port_base){ /* no ports found */
+ printk("NCR53c406a: no available ports found\n");
+ return 0;
+ }
+
+ DEB(printk("NCR53c406a detected\n"));
+
+ calc_port_addr();
+ chip_init();
+
+#ifndef IRQ_LEV
+ if (irq_level < 0) { /* LILO override if >= 0*/
+ irq_level=irq_probe();
+ if (irq_level < 0) { /* Trouble */
+ printk("NCR53c406a: IRQ problem, irq_level=%d, giving up\n", irq_level);
+ return 0;
+ }
+ }
+#endif
+
+ DEB(printk("NCR53c406a: using port_base %x\n", port_base));
+ request_region(port_base, 0x10, "NCR53c406a");
+
+ if(irq_level > 0) {
+ if(request_irq(irq_level, NCR53c406a_intr, 0, "NCR53c406a")){
+ printk("NCR53c406a: unable to allocate IRQ %d\n", irq_level);
+ return 0;
+ }
+ tpnt->can_queue = 1;
+ DEB(printk("NCR53c406a: allocated IRQ %d\n", irq_level));
+ }
+ else if (irq_level == 0) {
+ tpnt->can_queue = 0;
+ DEB(printk("NCR53c406a: No interrupts detected\n"));
+#if USE_DMA
+ printk("NCR53c406a: No interrupts found and DMA mode defined. Giving up.\n");
+ return 0;
+#endif USE_DMA
+ }
+ else {
+ DEB(printk("NCR53c406a: Shouldn't get here!\n"));
+ return 0;
+ }
+
+#if USE_DMA
+ dma_chan = DMA_CHAN;
+ if(request_dma(dma_chan, "NCR53c406a") != 0){
+ printk("NCR53c406a: unable to allocate DMA channel %d\n", dma_chan);
+ return 0;
+ }
+
+ DEB(printk("Allocated DMA channel %d\n", dma_chan));
+#endif USE_DMA
+
+ tpnt->present = 1;
+
+ shpnt = scsi_register(tpnt, 0);
+ shpnt->irq = irq_level;
+ shpnt->io_port = port_base;
+ shpnt->n_io_port = 0x10;
+#if USE_DMA
+ shpnt->dma = dma_chan;
+#endif
+
+#if USE_DMA
+ sprintf(info_msg, "NCR53c406a at 0x%x, IRQ %d, DMA channel %d.", port_base, irq_level, dma_chan);
+#else
+ sprintf(info_msg, "NCR53c406a at 0x%x, IRQ %d, %s PIO mode.", port_base, irq_level, fast_pio ? "fast" : "slow");
+#endif
+
+ return (tpnt->present);
+}
+
+/* called from init/main.c */
+void NCR53c406a_setup(char *str, int *ints)
+{
+ static size_t setup_idx = 0;
+ size_t i;
+
+ DEB(printk("NCR53c406a: Setup called\n"););
+
+ if (setup_idx >= PORT_COUNT - 1) {
+ printk("NCR53c406a: Setup called too many times. Bad LILO params?\n");
+ return;
+ }
+ if (ints[0] < 1 || ints[0] > 3) {
+ printk("NCR53c406a: Malformed command line\n");
+ printk("NCR53c406a: Usage: ncr53c406a=<PORTBASE>[,<IRQ>[,<FASTPIO>]]\n");
+ return;
+ }
+ for (i = 0; i < PORT_COUNT && !port_base; i++)
+ if (ports[i] == ints[1]) {
+ port_base = ints[1];
+ DEB(printk("NCR53c406a: Specified port_base 0x%X\n", port_base);)
+ }
+ if (!port_base) {
+ printk("NCR53c406a: Invalid PORTBASE 0x%X specified\n", ints[1]);
+ return;
+ }
+
+ if (ints[0] > 1) {
+ if (ints[2] == 0) {
+ irq_level = 0;
+ DEB(printk("NCR53c406a: Specified irq %d\n", irq_level);)
+ }
+ else
+ for (i = 0; i < INTR_COUNT && irq_level < 0; i++)
+ if (intrs[i] == ints[2]) {
+ irq_level = ints[2];
+ DEB(printk("NCR53c406a: Specified irq %d\n", port_base);)
+ }
+ if (irq_level < 0)
+ printk("NCR53c406a: Invalid IRQ %d specified\n", ints[2]);
+ }
+
+ if (ints[0] > 2)
+ fast_pio = ints[3];
+
+ DEB(printk("NCR53c406a: port_base=0x%X, irq=%d, fast_pio=%d\n", port_base, irq_level, fast_pio);)
+}
+
+const char*
+NCR53c406a_info(struct Scsi_Host *SChost){
+ DEB(printk("NCR53c406a_info called\n"));
+ return (info_msg);
+}
+
+static void internal_done(Scsi_Cmnd *SCpnt) {
+ internal_done_errcode = SCpnt->result;
+ ++internal_done_flag;
+}
+
+
+static void wait_intr() {
+ int i = jiffies + WATCHDOG;
+
+ while(i>jiffies && !(inb(STAT_REG)&0xe0)) /* wait for a pseudo-interrupt */
+ barrier();
+
+ if (i <= jiffies) { /* Timed out */
+ rtrc(0);
+ current_SC->result = DID_TIME_OUT << 16;
+ current_SC->SCp.phase = idle;
+ current_SC->scsi_done(current_SC);
+ return;
+ }
+
+ NCR53c406a_intr(0, NULL);
+}
+
+int NCR53c406a_command(Scsi_Cmnd *SCpnt){
+ DEB(printk("NCR53c406a_command called\n"));
+ NCR53c406a_queue(SCpnt, internal_done);
+ if(irq_level)
+ while (!internal_done_flag);
+ else /* interrupts not supported */
+ while (!internal_done_flag)
+ wait_intr();
+
+ internal_done_flag = 0;
+ return internal_done_errcode;
+}
+
+
+int
+NCR53c406a_queue(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)){
+ int i;
+ unsigned long flags = 0;
+
+ VDEB(printk("NCR53c406a_queue called\n"));
+ DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n",
+ SCpnt->cmnd[0],
+ SCpnt->cmd_len,
+ SCpnt->target,
+ SCpnt->lun,
+ SCpnt->request_bufflen));
+
+#if 0
+ VDEB(for(i=0; i<SCpnt->cmd_len; i++)
+ printk("cmd[%d]=%02x ", i, SCpnt->cmnd[i]));
+ VDEB(printk("\n"));
+#endif
+
+ current_SC = SCpnt;
+ current_SC->scsi_done = done;
+ current_SC->SCp.phase = command_ph;
+ current_SC->SCp.Status = 0;
+ current_SC->SCp.Message = 0;
+
+ save_flags(flags);
+ cli();
+ REG0;
+ outb(SCpnt->target, DEST_ID); /* set destination */
+ outb(FLUSH_FIFO, CMD_REG); /* reset the fifos */
+
+ for(i=0; i<SCpnt->cmd_len; i++){
+ outb(SCpnt->cmnd[i], SCSI_FIFO);
+ }
+ outb(SELECT_NO_ATN, CMD_REG);
+ restore_flags(flags);
+
+ rtrc(1);
+ return 0;
+}
+
+int
+NCR53c406a_abort(Scsi_Cmnd *SCpnt){
+ DEB(printk("NCR53c406a_abort called\n"));
+ return SCSI_ABORT_SNOOZE; /* Don't know how to abort */
+}
+
+int
+NCR53c406a_reset(Scsi_Cmnd *SCpnt){
+ DEB(printk("NCR53c406a_reset called\n"));
+ outb(C4_IMG, CONFIG4); /* Select reg set 0 */
+ outb(CHIP_RESET, CMD_REG);
+ outb(SCSI_NOP, CMD_REG); /* required after reset */
+ outb(SCSI_RESET, CMD_REG);
+ chip_init();
+
+ rtrc(2);
+ if (irq_level)
+ return SCSI_RESET_PENDING; /* should get an interrupt */
+ else
+ return SCSI_RESET_WAKEUP; /* won't get any interrupts */
+}
+
+int
+NCR53c406a_biosparm(Scsi_Disk *disk, kdev_t dev, int* info_array){
+ int size;
+
+ DEB(printk("NCR53c406a_biosparm called\n"));
+
+ size = disk->capacity;
+ info_array[0] = 64; /* heads */
+ info_array[1] = 32; /* sectors */
+ info_array[2] = size>>11; /* cylinders */
+ if (info_array[2] > 1024) { /* big disk */
+ info_array[0] = 255;
+ info_array[1] = 63;
+ info_array[2] = size / (255*63);
+ }
+ return 0;
+}
+
+static void
+NCR53c406a_intr(int unused, struct pt_regs *regs){
+ DEB(unsigned char fifo_size;)
+ DEB(unsigned char seq_reg;)
+ unsigned char status, int_reg;
+ unsigned long flags = 0;
+#if USE_PIO
+ unsigned char pio_status;
+ struct scatterlist *sglist;
+ unsigned int sgcount;
+#endif
+
+ VDEB(printk("NCR53c406a_intr called\n"));
+
+ save_flags(flags);
+ cli();
+#if USE_PIO
+ REG1;
+ pio_status = inb(PIO_STATUS);
+#endif
+ REG0;
+ status = inb(STAT_REG);
+ DEB(seq_reg = inb(SEQ_REG));
+ int_reg = inb(INT_REG);
+ DEB(fifo_size = inb(FIFO_FLAGS) & 0x1f);
+ restore_flags(flags);
+
+#if NCR53C406A_DEBUG
+ printk("status=%02x, seq_reg=%02x, int_reg=%02x, fifo_size=%02x",
+ status, seq_reg, int_reg, fifo_size);
+#if (USE_DMA)
+ printk("\n");
+#else
+ printk(", pio=%02x\n", pio_status);
+#endif USE_DMA
+#endif NCR53C406A_DEBUG
+
+ if(int_reg & 0x80){ /* SCSI reset intr */
+ rtrc(3);
+ DEB(printk("NCR53c406a: reset intr received\n"));
+ current_SC->SCp.phase = idle;
+ current_SC->result = DID_RESET << 16;
+ current_SC->scsi_done(current_SC);
+ return;
+ }
+
+#if USE_PIO
+ if(pio_status & 0x80) {
+ printk("NCR53C406A: Warning: PIO error!\n");
+ current_SC->SCp.phase = idle;
+ current_SC->result = DID_ERROR << 16;
+ current_SC->scsi_done(current_SC);
+ return;
+ }
+#endif USE_PIO
+
+ if(status & 0x20) { /* Parity error */
+ printk("NCR53c406a: Warning: parity error!\n");
+ current_SC->SCp.phase = idle;
+ current_SC->result = DID_PARITY << 16;
+ current_SC->scsi_done(current_SC);
+ return;
+ }
+
+ if(status & 0x40) { /* Gross error */
+ printk("NCR53c406a: Warning: gross error!\n");
+ current_SC->SCp.phase = idle;
+ current_SC->result = DID_ERROR << 16;
+ current_SC->scsi_done(current_SC);
+ return;
+ }
+
+ if(int_reg & 0x20){ /* Disconnect */
+ DEB(printk("NCR53c406a: disconnect intr received\n"));
+ if(current_SC->SCp.phase != message_in){ /* Unexpected disconnect */
+ current_SC->result = DID_NO_CONNECT << 16;
+ }
+ else{ /* Command complete, return status and message */
+ current_SC->result = (current_SC->SCp.Status & 0xff)
+ | ((current_SC->SCp.Message & 0xff) << 8) | (DID_OK << 16);
+ }
+
+ rtrc(0);
+ current_SC->SCp.phase = idle;
+ current_SC->scsi_done( current_SC );
+ return;
+ }
+
+ switch(status & 0x07){ /* scsi phase */
+ case 0x00: /* DATA-OUT */
+ if(int_reg & 0x10){ /* Target requesting info transfer */
+ rtrc(5);
+ current_SC->SCp.phase = data_out;
+ VDEB(printk("NCR53c406a: Data-Out phase\n"));
+ outb(FLUSH_FIFO, CMD_REG);
+ LOAD_DMA_COUNT(current_SC->request_bufflen); /* Max transfer size */
+#if USE_DMA /* No s/g support for DMA */
+ NCR53c406a_dma_write(current_SC->request_buffer,
+ current_SC->request_bufflen);
+#endif USE_DMA
+ outb(TRANSFER_INFO | DMA_OP, CMD_REG);
+#if USE_PIO
+ if (!current_SC->use_sg) /* Don't use scatter-gather */
+ NCR53c406a_pio_write(current_SC->request_buffer,
+ current_SC->request_bufflen);
+ else { /* use scatter-gather */
+ sgcount = current_SC->use_sg;
+ sglist = current_SC->request_buffer;
+ while( sgcount-- ) {
+ NCR53c406a_pio_write(sglist->address, sglist->length);
+ sglist++;
+ }
+ }
+ REG0;
+#endif USE_PIO
+ }
+ break;
+
+ case 0x01: /* DATA-IN */
+ if(int_reg & 0x10){ /* Target requesting info transfer */
+ rtrc(6);
+ current_SC->SCp.phase = data_in;
+ VDEB(printk("NCR53c406a: Data-In phase\n"));
+ outb(FLUSH_FIFO, CMD_REG);
+ LOAD_DMA_COUNT(current_SC->request_bufflen); /* Max transfer size */
+#if USE_DMA /* No s/g support for DMA */
+ NCR53c406a_dma_read(current_SC->request_buffer,
+ current_SC->request_bufflen);
+#endif USE_DMA
+ outb(TRANSFER_INFO | DMA_OP, CMD_REG);
+#if USE_PIO
+ if (!current_SC->use_sg) /* Don't use scatter-gather */
+ NCR53c406a_pio_read(current_SC->request_buffer,
+ current_SC->request_bufflen);
+ else { /* Use scatter-gather */
+ sgcount = current_SC->use_sg;
+ sglist = current_SC->request_buffer;
+ while( sgcount-- ) {
+ NCR53c406a_pio_read(sglist->address, sglist->length);
+ sglist++;
+ }
+ }
+ REG0;
+#endif USE_PIO
+ }
+ break;
+
+ case 0x02: /* COMMAND */
+ current_SC->SCp.phase = command_ph;
+ printk("NCR53c406a: Warning: Unknown interupt occured in command phase!\n");
+ break;
+
+ case 0x03: /* STATUS */
+ rtrc(7);
+ current_SC->SCp.phase = status_ph;
+ VDEB(printk("NCR53c406a: Status phase\n"));
+#if 0
+#if VERBOSE_NCR53C406A_DEBUG
+ printk("request_buffer=");
+ for(i=0; i<current_SC->request_bufflen && i<256; i++)
+ printk("%02x ", *((unsigned char*)current_SC->request_buffer + i));
+ printk("\n");
+#if USE_DMA
+ printk("dma residue = %d\n", NCR53c406a_dma_residual());
+#endif USE_DMA
+#endif VERBOSE_NCR53C406A_DEBUG
+#endif
+
+ outb(FLUSH_FIFO, CMD_REG);
+ outb(INIT_CMD_COMPLETE, CMD_REG);
+ break;
+
+ case 0x04: /* Reserved */
+ case 0x05: /* Reserved */
+ printk("NCR53c406a: WARNING: Reserved phase!!!\n");
+ break;
+
+ case 0x06: /* MESSAGE-OUT */
+ DEB(printk("NCR53c406a: Message-Out phase\n"));
+ current_SC->SCp.phase = message_out;
+ outb(SET_ATN, CMD_REG); /* Reject the message */
+ outb(MSG_ACCEPT, CMD_REG);
+ break;
+
+ case 0x07: /* MESSAGE-IN */
+ rtrc(4);
+ VDEB(printk("NCR53c406a: Message-In phase\n"));
+ current_SC->SCp.phase = message_in;
+
+ current_SC->SCp.Status = inb(SCSI_FIFO);
+ current_SC->SCp.Message = inb(SCSI_FIFO);
+
+ VDEB(printk("SCSI FIFO size=%d\n", inb(FIFO_FLAGS) & 0x1f));
+ DEB(printk("Status = %02x Message = %02x\n",
+ current_SC->SCp.Status, current_SC->SCp.Message));
+
+ if(current_SC->SCp.Message == SAVE_POINTERS ||
+ current_SC->SCp.Message == DISCONNECT) {
+ outb(SET_ATN, CMD_REG); /* Reject message */
+ DEB(printk("Discarding SAVE_POINTERS message\n"));
+ }
+ outb(MSG_ACCEPT, CMD_REG);
+ break;
+ }
+}
+
+#ifndef IRQ_LEV
+static int irq_probe()
+{
+ int irqs, irq;
+ int i;
+
+ inb(INT_REG); /* clear the interrupt register */
+ sti();
+ irqs = probe_irq_on();
+
+ /* Invalid command will cause an interrupt */
+ REG0;
+ outb(0xff, CMD_REG);
+
+ /* Wait for the interrupt to occur */
+ i = jiffies + WATCHDOG;
+ while(i > jiffies && !(inb(STAT_REG) & 0x80))
+ barrier();
+ if (i <= jiffies) { /* Timed out, must be hardware trouble */
+ probe_irq_off(irqs);
+ return -1;
+ }
+
+ irq = probe_irq_off(irqs);
+
+ /* Kick the chip */
+ outb(CHIP_RESET, CMD_REG);
+ outb(SCSI_NOP, CMD_REG);
+ chip_init();
+
+ return irq;
+}
+#endif IRQ_LEV
+
+static void chip_init()
+{
+ REG1;
+#if USE_DMA
+ outb(0x00, PIO_STATUS);
+#else /* USE_PIO */
+ outb(0x01, PIO_STATUS);
+#endif
+ outb(0x00, PIO_FLAG);
+
+ outb(C4_IMG, CONFIG4); /* REG0; */
+ outb(C3_IMG, CONFIG3);
+ outb(C2_IMG, CONFIG2);
+ outb(C1_IMG, CONFIG1);
+
+ outb(0x05, CLKCONV); /* clock conversion factor */
+ outb(0x9C, SRTIMOUT); /* Selection timeout */
+ outb(0x05, SYNCPRD); /* Synchronous transfer period */
+ outb(SYNC_MODE, SYNCOFF); /* synchronous mode */
+}
+
+void calc_port_addr()
+{
+/* Control Register Set 0 */
+TC_LSB = (port_base+0x00);
+TC_MSB = (port_base+0x01);
+SCSI_FIFO = (port_base+0x02);
+CMD_REG = (port_base+0x03);
+STAT_REG = (port_base+0x04);
+DEST_ID = (port_base+0x04);
+INT_REG = (port_base+0x05);
+SRTIMOUT = (port_base+0x05);
+SEQ_REG = (port_base+0x06);
+SYNCPRD = (port_base+0x06);
+FIFO_FLAGS = (port_base+0x07);
+SYNCOFF = (port_base+0x07);
+CONFIG1 = (port_base+0x08);
+CLKCONV = (port_base+0x09);
+/* TESTREG = (port_base+0x0A); */
+CONFIG2 = (port_base+0x0B);
+CONFIG3 = (port_base+0x0C);
+CONFIG4 = (port_base+0x0D);
+TC_HIGH = (port_base+0x0E);
+/* FIFO_BOTTOM = (port_base+0x0F); */
+
+/* Control Register Set 1 */
+/* JUMPER_SENSE = (port_base+0x00);*/
+/* SRAM_PTR = (port_base+0x01);*/
+/* SRAM_DATA = (port_base+0x02);*/
+PIO_FIFO = (port_base+0x04);
+/* PIO_FIFO1 = (port_base+0x05);*/
+/* PIO_FIFO2 = (port_base+0x06);*/
+/* PIO_FIFO3 = (port_base+0x07);*/
+PIO_STATUS = (port_base+0x08);
+/* ATA_CMD = (port_base+0x09);*/
+/* ATA_ERR = (port_base+0x0A);*/
+PIO_FLAG = (port_base+0x0B);
+CONFIG5 = (port_base+0x0D);
+/* SIGNATURE = (port_base+0x0E);*/
+/* CONFIG6 = (port_base+0x0F);*/
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = NCR53c406a;
+
+#include "scsi_module.c"
+#endif
--- /dev/null
+#ifndef _NCR53C406A_H
+#define _NCR53C406A_H
+
+/*
+ * NCR53c406a.h
+ *
+ * Copyright (C) 1994 Normunds Saumanis (normunds@rx.tech.swh.lv)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+/* NOTE: scatter-gather support only works in PIO mode.
+ * Use SG_NONE if DMA mode is enabled!
+ */
+#define NCR53c406a { \
+ NULL /* next */, \
+ NULL /* usage count */, \
+ NULL /* proc_dir */, \
+ NULL /* proc_info */, \
+ "NCR53c406a" /* name */, \
+ NCR53c406a_detect /* detect */, \
+ NULL /* release */, \
+ NCR53c406a_info /* info */, \
+ NCR53c406a_command /* command */, \
+ NCR53c406a_queue /* queuecommand */, \
+ NCR53c406a_abort /* abort */, \
+ NCR53c406a_reset /* reset */, \
+ NULL /* slave_attach */, \
+ NCR53c406a_biosparm /* biosparm */, \
+ 1 /* can_queue */, \
+ 7 /* SCSI ID of the chip */, \
+ 32 /*SG_ALL*/ /*SG_NONE*/, \
+ 1 /* commands per lun */, \
+ 0 /* number of boards in system */, \
+ 1 /* unchecked_isa_dma */, \
+ ENABLE_CLUSTERING \
+}
+
+int NCR53c406a_detect(Scsi_Host_Template *);
+const char* NCR53c406a_info(struct Scsi_Host *);
+
+int NCR53c406a_command(Scsi_Cmnd *);
+int NCR53c406a_queue(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int NCR53c406a_abort(Scsi_Cmnd *);
+int NCR53c406a_reset(Scsi_Cmnd *);
+int NCR53c406a_biosparm(Disk *, kdev_t, int []);
+
+#endif /* _NCR53C406A_H */
+
This file contains brief information about the SCSI tape driver.
-Last modified: Sun Sep 10 13:19:02 1995 by root@kai.makisara.fi
+Last modified: Sun Sep 24 23:40:06 1995 by root@kai.makisara.fi
BASICS
BUFFERING
-The driver uses a buffer allocated at system initialization. The size
-of the buffer is selectable at compile and/or boot time. The buffer is
-used to store the data being transferred to/from the SCSI adapter. The
-following buffering options are selectable at compile time and/or at run
-time (via ioctl):
+The driver uses tape buffers allocated either at system initialization
+or at run-time when needed. One buffer is used for each open tape
+device. The size of the buffers is selectable at compile and/or boot
+time. The buffers are used to store the data being transferred to/from
+the SCSI adapter. The following buffering options are selectable at
+compile time and/or at run time (via ioctl):
-Buffering of data to be written across write calls for fixed block
-mode (define ST_BUFFER_WRITES). This should be disabled if reliable
-detection of end of media (EOM) for fixed block mode is desired.
+Buffering of data across write calls in fixed block mode (define
+ST_BUFFER_WRITES). This should be disabled if reliable detection of
+end of medium (EOM) for fixed block mode is desired.
Asynchronous writing. Writing the buffer contents to the tape is
started and the write call returns immediately. The status is checked
a filemark to truncate a read request or that don't like backspacing.
The buffer size is defined (in 1024 byte units) by ST_BUFFER_BLOCKS or
-at boot time. The maximum number of buffers allocated is defined by
+at boot time. If this size is not enough, the driver tries to allocate
+a large enough temporary buffer that is released when the device is
+closed.
+
+The maximum number of buffers allocated at initialization is defined by
ST_MAX_BUFFERS. One buffer is allocated for each drive detected when
the driver is initialized up to the maximum. The minimum number of
allocated buffers is ST_EXTRA_DEVS (in hosts.h). This ensures some
functionality also for the drives found after tape driver
initialization (a SCSI adapter driver is loaded as a module). The
-default for ST_EXTRA_DEVS is two. This is enough to enable tape copies.
+default for ST_EXTRA_DEVS is two. The driver tries to allocate new
+buffers at run-time if necessary.
+
+Allocation of the buffers can be postponed to run-time if
+(ST_RUNTIME_BUFFERS). The advantage is that memory is not wasted for
+buffers not being used. The disadvantage is that there may not be
+memory available at the time when a buffer is needed for the first
+time (once a buffer is allocated, it is not released).
-The threshold for triggering asynchronous write is defined by
-ST_WRITE_THRESHOLD.
+The threshold for triggering asynchronous write in fixed block mode
+is defined by ST_WRITE_THRESHOLD.
BOOT TIME CONFIGURATION
The recovered write errors are considered fatal if ST_RECOVERED_WRITE_FATAL
is defined.
+The maximum number of tape devices is determined by the define
+ST_MAX_TAPES. If more tapes are detected at driver intialization, the
+maximum is adjusted accordingly.
+
Immediate return from tape positioning SCSI commands can be enabled by
defining ST_NOWAIT.
**************************************************************************/
+#ifdef PCMCIA
+#define MODULE
+#endif
+
#ifdef MODULE
#include <linux/config.h>
#include <linux/module.h>
#endif
+#ifdef PCMCIA
+#undef MODULE
+#endif
+
#include <linux/sched.h>
#include <asm/io.h>
#include "../block/blk.h"
#include "eata.h"
#endif
+#ifdef CONFIG_SCSI_NCR53C406A
+#include "NCR53c406a.h"
+#endif
+
#ifdef CONFIG_SCSI_DEBUG
#include "scsi_debug.h"
#endif
#ifdef CONFIG_SCSI_GENERIC_NCR5380
GENERIC_NCR5380,
#endif
+#ifdef CONFIG_SCSI_NCR53C406A /* 53C406A should come before QLOGIC */
+ NCR53c406a,
+#endif
#ifdef CONFIG_SCSI_QLOGIC
QLOGIC,
#endif
struct SHT * next;
/* Used with loadable modules so that we know when it is safe to unload */
- int * usage_count;
+ long * usage_count;
/* The pointer to the /proc/scsi directory entry */
struct proc_dir_entry *proc_dir;
struct Scsi_Device_Template * next;
const char * name;
const char * tag;
- int * usage_count; /* Used for loadable modules */
+ long * usage_count; /* Used for loadable modules */
unsigned char scsi_type;
unsigned char major;
unsigned char nr_dev; /* Number currently attached */
SDpnt->manufacturer = SCSI_MAN_TOSHIBA;
else if (!strncmp(scsi_result+8,"SONY",4))
SDpnt->manufacturer = SCSI_MAN_SONY;
+ else if (!strncmp(scsi_result+8, "PIONEER", 7))
+ SDpnt->manufacturer = SCSI_MAN_PIONEER;
else
SDpnt->manufacturer = SCSI_MAN_UNKNOWN;
void * scsi_init_malloc(unsigned int size, int priority)
{
unsigned long retval;
+ int order, a_size;
/* Use the statically allocated memory instead of kmalloc (DB) */
#if defined(USE_STATIC_SCSI_MEMORY)
* For buffers used by the DMA pool, we assume page aligned
* structures.
*/
- if(size == PAGE_SIZE)
- retval = (unsigned long) __get_dma_pages(priority & GFP_LEVEL_MASK, 0);
- else
- retval = (unsigned long) kmalloc(size, priority);
+ if ((size % PAGE_SIZE) == 0) {
+ for (order = 0, a_size = PAGE_SIZE;
+ a_size < size; order++, a_size <<= 1)
+ ;
+ retval =
+ (unsigned long) __get_dma_pages(priority & GFP_LEVEL_MASK,
+ order);
+ }
+ else
+ retval = (unsigned long) kmalloc(size, priority);
+
} else {
/*
* Keep all memory aligned on 16-byte boundaries. Some host
void scsi_init_free(char * ptr, unsigned int size)
{
+ int order, a_size;
+
/* We need to compare addresses to see whether this was kmalloc'd or not */
if((unsigned long) ptr >= scsi_init_memory_start ||
* page aligned data. Besides, it is wasteful to allocate
* page sized chunks with kmalloc.
*/
- if(size == PAGE_SIZE)
- free_pages((unsigned long)ptr, 0);
- else
- kfree(ptr);
+ if ((size % PAGE_SIZE) == 0) {
+ for (order = 0, a_size = PAGE_SIZE;
+ a_size < size; order++, a_size <<= 1)
+ ;
+ free_pages((unsigned long)ptr, order);
+ }
+ else
+ kfree(ptr);
} else {
/* Use the same alignment as scsi_init_malloc() */
size = (size + 15) & ~15;
#define SCSI_MAN_TOSHIBA 2
#define SCSI_MAN_NEC_OLDCDR 3
#define SCSI_MAN_SONY 4
+#define SCSI_MAN_PIONEER 5
/*
* As the scsi do command functions are intelligent, and may need to
* 19950704 operator@melchior.frmug.fr.net (Thomas Quinot)
*
* - SONY: Same as Nec.
+ *
+ * - PIONEER: works with SONY code
*/
static void sr_photocd(struct inode *inode)
break;
case SCSI_MAN_SONY: /* Thomas QUINOT <thomas@melchior.frmug.fr.net> */
+ case SCSI_MAN_PIONEER:
#ifdef DEBUG
- printk("sr_photocd: use SONY code\n");
+ printk("sr_photocd: use SONY/PIONEER code\n");
#endif
memset(buf,0,40);
*((unsigned long*)buf) = 0x0; /* we send nothing... */
Copyright 1992, 1993, 1994, 1995 Kai Makisara
email Kai.Makisara@metla.fi
- Last modified: Tue Sep 19 21:51:41 1995 by root@kai.makisara.fi
+ Last modified: Mon Sep 25 19:52:16 1995 by root@kai.makisara.fi
Some small formal changes - aeb, 950809
*/
#ifdef MODULE
#include <linux/ioctl.h>
#include <linux/fcntl.h>
#include <asm/segment.h>
+#include <asm/dma.h>
#include <asm/system.h>
/* The driver prints some debugging information on the console if DEBUG
static int st_write_threshold = ST_WRITE_THRESHOLD;
static int st_max_buffers = ST_MAX_BUFFERS;
-#define MAX_SCSI_TAPES 8
Scsi_Tape * scsi_tapes = NULL;
static ST_buffer *new_tape_buffer(int);
+static int enlarge_buffer(ST_buffer *, int);
+static void normalize_buffer(ST_buffer *);
static int st_init(void);
static int st_attach(Scsi_Device *);
int dev = TAPE_NR(SCpnt->request.rq_dev);
int result = SCpnt->result;
unsigned char * sense = SCpnt->sense_buffer, scode;
+#if DEBUG
const char *stp;
+#endif
if (!result /* && SCpnt->sense_buffer[0] == 0 */ )
return 0;
) {
scsi_tapes[dev].recover_count++;
scsi_tapes[dev].mt_status->mt_erreg += (1 << MT_ST_SOFTERR_SHIFT);
- if (SCpnt->data_cmnd[0] == READ_6)
- stp = "read";
- else if (SCpnt->data_cmnd[0] == WRITE_6)
- stp = "write";
- else
- stp = "ioctl";
- printk("st%d: Recovered %s error (%d).\n", dev, stp,
- scsi_tapes[dev].recover_count);
+#if DEBUG
+ if (debugging) { /* This is compiled always on purpose */
+ if (SCpnt->data_cmnd[0] == READ_6)
+ stp = "read";
+ else if (SCpnt->data_cmnd[0] == WRITE_6)
+ stp = "write";
+ else
+ stp = "ioctl";
+ printk("st%d: Recovered %s error (%d).\n", dev, stp,
+ scsi_tapes[dev].recover_count);
+ }
+#endif
return 0;
}
return (-EIO);
}
else
(STp->buffer)->last_result = SCpnt->result;
- (STp->buffer)->last_result_fatal = st_chk_result(SCpnt);
- if ((STp->buffer)->writing)
+ if ((STp->buffer)->writing) {
+ /* Process errors before releasing request */
+ (STp->buffer)->last_result_fatal = st_chk_result(SCpnt);
SCpnt->request.rq_status = RQ_INACTIVE;
+ }
else
SCpnt->request.rq_status = RQ_SCSI_DONE;
if (!(STp->buffer)->writing || STp->write_pending)
}
+/* Do the scsi command */
+ static Scsi_Cmnd *
+st_do_scsi(Scsi_Cmnd *SCpnt, Scsi_Tape *STp, unsigned char *cmd, int bytes,
+ int timeout, int retries)
+{
+ unsigned int flags;
+
+ if (SCpnt == NULL)
+ if ((SCpnt = allocate_device(NULL, STp->device, 1)) == NULL) {
+ printk("st%d: Can't get SCSI request.\n", TAPE_NR(STp->devt));
+ return NULL;
+ }
+
+ cmd[1] |= (SCpnt->lun << 5) & 0xe0;
+ SCpnt->request.rq_status = RQ_SCSI_BUSY;
+ SCpnt->request.rq_dev = STp->devt;
+
+ scsi_do_cmd(SCpnt, (void *)cmd, (STp->buffer)->b_data, bytes,
+ st_sleep_done, timeout, retries);
+
+ /* this must be done with interrupts off */
+ save_flags (flags);
+ cli();
+ if (SCpnt->request.rq_status != RQ_SCSI_DONE)
+ sleep_on( &(STp->waiting) );
+ restore_flags(flags);
+
+ (STp->buffer)->last_result_fatal = st_chk_result(SCpnt);
+
+ return SCpnt;
+}
+
+
/* Handle the write-behind checking */
static void
-write_behind_check(kdev_t devt)
+write_behind_check(Scsi_Tape *STp)
{
- int dev = TAPE_NR(devt);
- Scsi_Tape * STp;
ST_buffer * STbuffer;
unsigned long flags;
- STp = &(scsi_tapes[dev]);
STbuffer = STp->buffer;
save_flags(flags);
/* Back over EOF if it has been inadvertently crossed (ioctl not used because
it messes up the block number). */
static int
-back_over_eof(kdev_t devt)
+back_over_eof(Scsi_Tape *STp)
{
- int dev = TAPE_NR(devt);
Scsi_Cmnd *SCpnt;
- Scsi_Tape *STp = &(scsi_tapes[dev]);
unsigned char cmd[10];
- unsigned int flags;
- SCpnt = allocate_device(NULL, STp->device, 1);
- SCpnt->request.rq_dev = devt;
cmd[0] = SPACE;
- cmd[1] = ((SCpnt->lun << 5) & 0xe0) | 0x01; /* Space FileMarks */
+ cmd[1] = 0x01; /* Space FileMarks */
cmd[2] = cmd[3] = cmd[4] = 0xff; /* -1 filemarks */
cmd[5] = 0;
- SCpnt->request.rq_status = RQ_SCSI_BUSY;
- scsi_do_cmd(SCpnt,
- (void *) cmd, (void *) (STp->buffer)->b_data, 0,
- st_sleep_done, ST_TIMEOUT, MAX_RETRIES);
+ SCpnt = st_do_scsi(NULL, STp, cmd, 0, ST_TIMEOUT, MAX_RETRIES);
+ if (!SCpnt)
+ return (-EBUSY);
- /* need to do the check with interrupts off. -RAB */
- save_flags(flags);
- cli();
- if (SCpnt->request.rq_status != RQ_SCSI_DONE)
- sleep_on( &(STp->waiting) );
- restore_flags(flags);
-
SCpnt->request.rq_status = RQ_INACTIVE;
if ((STp->buffer)->last_result != 0) {
- printk("st%d: Backing over filemark failed.\n", dev);
+ printk("st%d: Backing over filemark failed.\n", TAPE_NR(STp->devt));
if ((STp->mt_status)->mt_fileno >= 0)
(STp->mt_status)->mt_fileno += 1;
(STp->mt_status)->mt_blkno = 0;
/* Flush the write buffer (never need to write if variable blocksize). */
static int
-flush_write_buffer(kdev_t devt)
+flush_write_buffer(Scsi_Tape *STp)
{
- int dev = TAPE_NR(devt);
int offset, transfer, blks;
int result;
- unsigned int flags;
unsigned char cmd[10];
Scsi_Cmnd *SCpnt;
- Scsi_Tape *STp = &(scsi_tapes[dev]);
if ((STp->buffer)->writing) {
- write_behind_check(devt);
+ write_behind_check(STp);
if ((STp->buffer)->last_result_fatal) {
#if DEBUG
if (debugging)
- printk("st%d: Async write error (flush) %x.\n", dev,
+ printk("st%d: Async write error (flush) %x.\n", TAPE_NR(STp->devt),
(STp->buffer)->last_result);
#endif
if ((STp->buffer)->last_result == INT_MAX)
result = 0;
if (STp->dirty == 1) {
- SCpnt = allocate_device(NULL, STp->device, 1);
offset = (STp->buffer)->buffer_bytes;
transfer = ((offset + STp->block_size - 1) /
STp->block_size) * STp->block_size;
#if DEBUG
if (debugging)
- printk("st%d: Flushing %d bytes.\n", dev, transfer);
+ printk("st%d: Flushing %d bytes.\n", TAPE_NR(STp->devt), transfer);
#endif
memset((STp->buffer)->b_data + offset, 0, transfer - offset);
memset(cmd, 0, 10);
cmd[0] = WRITE_6;
- cmd[1] = ((SCpnt->lun << 5) & 0xe0) | 1;
+ cmd[1] = 1;
blks = transfer / STp->block_size;
cmd[2] = blks >> 16;
cmd[3] = blks >> 8;
cmd[4] = blks;
- SCpnt->request.rq_status = RQ_SCSI_BUSY;
- SCpnt->request.rq_dev = devt;
- scsi_do_cmd (SCpnt,
- (void *) cmd, (STp->buffer)->b_data, transfer,
- st_sleep_done, ST_TIMEOUT, MAX_WRITE_RETRIES);
-
- /* this must be done with interrupts off */
- save_flags (flags);
- cli();
- if (SCpnt->request.rq_status != RQ_SCSI_DONE)
- sleep_on( &(STp->waiting) );
- restore_flags(flags);
-
+
+ SCpnt = st_do_scsi(NULL, STp, cmd, transfer, ST_TIMEOUT, MAX_WRITE_RETRIES);
+ if (!SCpnt)
+ return (-EBUSY);
+
if ((STp->buffer)->last_result_fatal != 0) {
- printk("st%d: Error on flush.\n", dev);
+ printk("st%d: Error on flush.\n", TAPE_NR(STp->devt));
if ((SCpnt->sense_buffer[0] & 0x70) == 0x70 &&
(SCpnt->sense_buffer[2] & 0x40) &&
(SCpnt->sense_buffer[2] & 0x0f) != VOLUME_OVERFLOW) {
int backspace, result;
Scsi_Tape * STp;
ST_buffer * STbuffer;
- kdev_t devt = inode->i_rdev;
- int dev = TAPE_NR(devt);
+ int dev = TAPE_NR(inode->i_rdev);
STp = &(scsi_tapes[dev]);
STbuffer = STp->buffer;
return 0;
if (STp->rw == ST_WRITING) /* Writing */
- return flush_write_buffer(devt);
+ return flush_write_buffer(STp);
if (STp->block_size == 0)
return 0;
result = 0;
if (!seek_next) {
if ((STp->eof == ST_FM) && !STp->eof_hit) {
- result = back_over_eof(devt); /* Back over the EOF hit */
+ result = back_over_eof(STp); /* Back over the EOF hit */
if (!result) {
STp->eof = ST_NOEOF;
STp->eof_hit = 0;
scsi_tape_open(struct inode * inode, struct file * filp)
{
unsigned short flags;
- unsigned int processor_flags;
int i;
unsigned char cmd[10];
Scsi_Cmnd * SCpnt;
Scsi_Tape * STp;
- kdev_t devt = inode->i_rdev;
- int dev;
+ int dev = TAPE_NR(inode->i_rdev);
- dev = TAPE_NR(devt);
if (dev >= st_template.dev_max || !scsi_tapes[dev].device)
return (-ENXIO);
STp = &(scsi_tapes[dev]);
STp->nbr_waits = STp->nbr_finished = 0;
#endif
- SCpnt = allocate_device(NULL, STp->device, 1);
- SCpnt->request.rq_dev = devt;
- if (!SCpnt) {
- printk("st%d: Tape request not allocated", dev);
- return (-EBUSY);
- }
-
memset ((void *) &cmd[0], 0, 10);
cmd[0] = TEST_UNIT_READY;
- cmd[1] = (SCpnt->lun << 5) & 0xe0;
- SCpnt->request.rq_status = RQ_SCSI_BUSY;
- scsi_do_cmd(SCpnt,
- (void *) cmd, (void *) (STp->buffer)->b_data,
- 0, st_sleep_done, ST_LONG_TIMEOUT,
- MAX_READY_RETRIES);
-
- /* this must be done with interrupts off */
- save_flags (processor_flags);
- cli();
- if (SCpnt->request.rq_status != RQ_SCSI_DONE)
- sleep_on( &(STp->waiting) );
- restore_flags(processor_flags);
+
+ SCpnt = st_do_scsi(NULL, STp, cmd, 0, ST_LONG_TIMEOUT, MAX_READY_RETRIES);
+ if (!SCpnt)
+ return (-EBUSY);
if ((SCpnt->sense_buffer[0] & 0x70) == 0x70 &&
(SCpnt->sense_buffer[2] & 0x0f) == UNIT_ATTENTION) { /* New media? */
(STp->mt_status)->mt_fileno = 0 ;
memset ((void *) &cmd[0], 0, 10);
cmd[0] = TEST_UNIT_READY;
- cmd[1] = (SCpnt->lun << 5) & 0xe0;
- SCpnt->request.rq_status = RQ_SCSI_BUSY;
- scsi_do_cmd(SCpnt,
- (void *) cmd, (void *) (STp->buffer)->b_data,
- 0, st_sleep_done, ST_LONG_TIMEOUT,
- MAX_READY_RETRIES);
-
- /* this must be done with interrupts off */
- save_flags (processor_flags);
- cli();
- if (SCpnt->request.rq_status != RQ_SCSI_DONE)
- sleep_on( &(STp->waiting) );
- restore_flags(processor_flags);
+
+ SCpnt = st_do_scsi(SCpnt, STp, cmd, 0, ST_LONG_TIMEOUT, MAX_READY_RETRIES);
(STp->mt_status)->mt_fileno = STp->drv_block = 0;
STp->eof = ST_NOEOF;
+ (STp->device)->was_reset = 0;
}
if ((STp->buffer)->last_result_fatal != 0) {
memset ((void *) &cmd[0], 0, 10);
cmd[0] = READ_BLOCK_LIMITS;
- cmd[1] = (SCpnt->lun << 5) & 0xe0;
- SCpnt->request.rq_status = RQ_SCSI_BUSY;
- scsi_do_cmd(SCpnt,
- (void *) cmd, (void *) (STp->buffer)->b_data,
- 6, st_sleep_done, ST_TIMEOUT, MAX_READY_RETRIES);
-
- /* this must be done with interrupts off */
- save_flags (processor_flags);
- cli();
- if (SCpnt->request.rq_status != RQ_SCSI_DONE)
- sleep_on( &(STp->waiting) );
- restore_flags(processor_flags);
+
+ SCpnt = st_do_scsi(SCpnt, STp, cmd, 6, ST_TIMEOUT, MAX_READY_RETRIES);
if (!SCpnt->result && !SCpnt->sense_buffer[0]) {
STp->max_block = ((STp->buffer)->b_data[1] << 16) |
memset ((void *) &cmd[0], 0, 10);
cmd[0] = MODE_SENSE;
- cmd[1] = (SCpnt->lun << 5) & 0xe0;
cmd[4] = 12;
- SCpnt->request.rq_status = RQ_SCSI_BUSY;
- scsi_do_cmd(SCpnt,
- (void *) cmd, (void *) (STp->buffer)->b_data,
- 12, st_sleep_done, ST_TIMEOUT, MAX_READY_RETRIES);
-
- /* this must be done with interrupts off */
- save_flags (processor_flags);
- cli();
- if (SCpnt->request.rq_status != RQ_SCSI_DONE)
- sleep_on( &(STp->waiting) );
- restore_flags(processor_flags);
+
+ SCpnt = st_do_scsi(SCpnt, STp, cmd, 12, ST_TIMEOUT, MAX_READY_RETRIES);
if ((STp->buffer)->last_result_fatal != 0) {
#if DEBUG
#endif
}
- if (STp->block_size > st_buffer_size) {
+ if (STp->block_size > (STp->buffer)->buffer_size &&
+ !enlarge_buffer(STp->buffer, STp->block_size)) {
printk("st%d: Blocksize %d too large for buffer.\n", dev,
STp->block_size);
(STp->buffer)->in_use = 0;
}
SCpnt->request.rq_status = RQ_INACTIVE; /* Mark as not busy */
- if (STp->block_size > 0) {
+ if (STp->block_size > 0)
(STp->buffer)->buffer_blocks = st_buffer_size / STp->block_size;
- (STp->buffer)->buffer_size =
- (STp->buffer)->buffer_blocks * STp->block_size;
- }
- else {
+ else
(STp->buffer)->buffer_blocks = 1;
- (STp->buffer)->buffer_size = st_buffer_size;
- }
(STp->buffer)->buffer_bytes = (STp->buffer)->read_pointer = 0;
#if DEBUG
static unsigned char cmd[10];
Scsi_Cmnd * SCpnt;
Scsi_Tape * STp;
- unsigned int flags;
kdev_t devt = inode->i_rdev;
int dev;
STp = &(scsi_tapes[dev]);
- if ( STp->rw == ST_WRITING) {
+ if ( STp->rw == ST_WRITING && !(STp->device)->was_reset) {
- result = flush_write_buffer(devt);
+ result = flush_write_buffer(STp);
#if DEBUG
if (debugging) {
#endif
if (result == 0 || result == (-ENOSPC)) {
- SCpnt = allocate_device(NULL, STp->device, 1);
- SCpnt->request.rq_dev = devt;
memset(cmd, 0, 10);
cmd[0] = WRITE_FILEMARKS;
- cmd[1] = (SCpnt->lun << 5) & 0xe0;
cmd[4] = 1 + STp->two_fm;
- SCpnt->request.rq_status = RQ_SCSI_BUSY;
- scsi_do_cmd( SCpnt,
- (void *) cmd, (void *) (STp->buffer)->b_data,
- 0, st_sleep_done, ST_TIMEOUT, MAX_WRITE_RETRIES);
-
- /* this must be done with interrupts off */
- save_flags (flags);
- cli();
- if (SCpnt->request.rq_status != RQ_SCSI_DONE)
- sleep_on( &(STp->waiting) );
- restore_flags(flags);
-
- if ((STp->buffer)->last_result_fatal != 0) {
- SCpnt->request.rq_status = RQ_INACTIVE; /* Mark as not busy */
+
+ SCpnt = st_do_scsi(NULL, STp, cmd, 0, ST_TIMEOUT, MAX_WRITE_RETRIES);
+ if (!SCpnt)
+ return;
+
+ if ((STp->buffer)->last_result_fatal != 0)
printk("st%d: Error on write filemark.\n", dev);
- }
else {
- SCpnt->request.rq_status = RQ_INACTIVE; /* Mark as not busy */
if ((STp->mt_status)->mt_fileno >= 0)
(STp->mt_status)->mt_fileno++ ;
STp->drv_block = 0;
if (STp->two_fm)
- back_over_eof(devt);
+ back_over_eof(STp);
}
-
+ SCpnt->request.rq_status = RQ_INACTIVE; /* Mark as not busy */
}
#if DEBUG
flush_buffer(inode, filp, 0);
#else
if ((STp->eof == ST_FM) && !STp->eof_hit)
- back_over_eof(devt);
+ back_over_eof(STp);
#endif
}
if (STp->door_locked == ST_LOCKED_AUTO)
st_int_ioctl(inode, filp, MTUNLOCK, 0);
- if (STp->buffer != NULL)
+ if (STp->buffer != NULL) {
+ normalize_buffer(STp->buffer);
(STp->buffer)->in_use = 0;
+ }
STp->in_use = 0;
if (scsi_tapes[dev].device->host->hostt->usage_count)
int doing_write = 0;
static unsigned char cmd[10];
const char *b_point;
- Scsi_Cmnd * SCpnt;
+ Scsi_Cmnd * SCpnt = NULL;
Scsi_Tape * STp;
- unsigned int flags;
- kdev_t devt = inode->i_rdev;
- int dev;
+ int dev = TAPE_NR(inode->i_rdev);
- dev = TAPE_NR(devt);
STp = &(scsi_tapes[dev]);
if (STp->ready != ST_READY)
return (-EIO);
if (STp->write_prot)
return (-EACCES);
- if (STp->block_size == 0 && count > st_buffer_size)
+ if (STp->block_size == 0 &&
+ count > (STp->buffer)->buffer_size &&
+ !enlarge_buffer(STp->buffer, count))
return (-EOVERFLOW);
if (STp->do_auto_lock && STp->door_locked == ST_UNLOCKED &&
STp->moves_after_eof++;
if ((STp->buffer)->writing) {
- write_behind_check(devt);
+ write_behind_check(STp);
if ((STp->buffer)->last_result_fatal) {
#if DEBUG
if (debugging)
write_threshold = 1;
}
else
- write_threshold = (STp->buffer)->buffer_size;
+ write_threshold = (STp->buffer)->buffer_blocks * STp->block_size;
if (!STp->do_async_writes)
write_threshold--;
- SCpnt = allocate_device(NULL, STp->device, 1);
- SCpnt->request.rq_dev = devt;
-
total = count;
memset(cmd, 0, 10);
cmd[0] = WRITE_6;
- cmd[1] = ((SCpnt->lun << 5) & 0xe0) | (STp->block_size != 0);
+ cmd[1] = (STp->block_size != 0);
STp->rw = ST_WRITING;
if (STp->block_size == 0)
do_count = count;
else {
- do_count = (STp->buffer)->buffer_size - (STp->buffer)->buffer_bytes;
+ do_count = (STp->buffer)->buffer_blocks * STp->block_size -
+ (STp->buffer)->buffer_bytes;
if (do_count > count)
do_count = count;
}
cmd[2] = blks >> 16;
cmd[3] = blks >> 8;
cmd[4] = blks;
- SCpnt->request.rq_status = RQ_SCSI_BUSY;
- scsi_do_cmd (SCpnt,
- (void *) cmd, (STp->buffer)->b_data, transfer,
- st_sleep_done, ST_TIMEOUT, MAX_WRITE_RETRIES);
- /* this must be done with interrupts off */
- save_flags (flags);
- cli();
- if (SCpnt->request.rq_status != RQ_SCSI_DONE)
- sleep_on( &(STp->waiting) );
- restore_flags(flags);
+ SCpnt = st_do_scsi(SCpnt, STp, cmd, transfer, ST_TIMEOUT, MAX_WRITE_RETRIES);
+ if (!SCpnt)
+ return (-EBUSY);
if ((STp->buffer)->last_result_fatal != 0) {
#if DEBUG
((STp->buffer)->buffer_bytes >= STp->write_threshold ||
STp->block_size == 0) ) {
/* Schedule an asynchronous write */
+ if (!SCpnt) {
+ SCpnt = allocate_device(NULL, STp->device, 1);
+ if (!SCpnt)
+ return (-EBUSY);
+ }
if (STp->block_size == 0)
(STp->buffer)->writing = (STp->buffer)->buffer_bytes;
else
cmd[3] = blks >> 8;
cmd[4] = blks;
SCpnt->request.rq_status = RQ_SCSI_BUSY;
+ SCpnt->request.rq_dev = STp->devt;
STp->write_pending = 1;
scsi_do_cmd (SCpnt,
(void *) cmd, (STp->buffer)->b_data,
int total;
int transfer, blks, bytes;
static unsigned char cmd[10];
- Scsi_Cmnd * SCpnt;
+ Scsi_Cmnd * SCpnt = NULL;
Scsi_Tape * STp;
- unsigned int flags;
- kdev_t devt = inode->i_rdev;
- int dev;
+ int dev = TAPE_NR(inode->i_rdev);
- dev = TAPE_NR(devt);
STp = &(scsi_tapes[dev]);
if (STp->ready != ST_READY)
return (-EIO);
}
#endif
- if (STp->block_size == 0 && count > st_buffer_size)
+ if (STp->block_size == 0 &&
+ count > (STp->buffer)->buffer_size &&
+ !enlarge_buffer(STp->buffer, count))
return (-EOVERFLOW);
if (!(STp->do_read_ahead) && STp->block_size != 0 &&
STp->rw = ST_READING;
- SCpnt = allocate_device(NULL, STp->device, 1);
- SCpnt->request.rq_dev = devt;
-
for (total = 0; total < count; ) {
if ((STp->buffer)->buffer_bytes == 0 &&
memset(cmd, 0, 10);
cmd[0] = READ_6;
- cmd[1] = ((SCpnt->lun << 5) & 0xe0) | (STp->block_size != 0);
+ cmd[1] = (STp->block_size != 0);
if (STp->block_size == 0)
blks = bytes = count;
else {
}
else {
bytes = count;
- if (bytes > st_buffer_size)
- bytes = st_buffer_size;
+ if (bytes > (STp->buffer)->buffer_size)
+ bytes = (STp->buffer)->buffer_size;
blks = bytes / STp->block_size;
bytes = blks * STp->block_size;
}
cmd[3] = blks >> 8;
cmd[4] = blks;
- SCpnt->request.rq_status = RQ_SCSI_BUSY;
- scsi_do_cmd (SCpnt,
- (void *) cmd, (STp->buffer)->b_data,
- bytes, st_sleep_done, ST_TIMEOUT, MAX_RETRIES);
-
- /* this must be done with interrupts off */
- save_flags (flags);
- cli();
- if (SCpnt->request.rq_status != RQ_SCSI_DONE)
- sleep_on( &(STp->waiting) );
- restore_flags(flags);
+ SCpnt = st_do_scsi(SCpnt, STp, cmd, bytes, ST_TIMEOUT, MAX_RETRIES);
+ if (!SCpnt)
+ return (-EBUSY);
(STp->buffer)->read_pointer = 0;
STp->eof_hit = 0;
{
int value;
Scsi_Tape *STp;
- kdev_t devt = inode->i_rdev;
- int dev;
+ int dev = TAPE_NR(inode->i_rdev);
- dev = TAPE_NR(devt);
STp = &(scsi_tapes[dev]);
if ((options & MT_ST_OPTIONS) == MT_ST_BOOLEANS) {
STp->do_buffer_writes = (options & MT_ST_BUFFER_WRITES) != 0;
Scsi_Cmnd * SCpnt;
Scsi_Tape * STp;
int fileno, blkno, at_sm, undone, datalen;
- unsigned int flags;
- kdev_t devt = inode->i_rdev;
- int dev = TAPE_NR(devt);
+ int dev = TAPE_NR(inode->i_rdev);
STp = &(scsi_tapes[dev]);
if (STp->ready != ST_READY && cmd_in != MTLOAD)
return (-ENOSYS);
}
- SCpnt = allocate_device(NULL, STp->device, 1);
- SCpnt->request.rq_dev = devt;
- cmd[1] |= (SCpnt->lun << 5) & 0xe0;
- SCpnt->request.rq_status = RQ_SCSI_BUSY;
- scsi_do_cmd(SCpnt,
- (void *) cmd, (void *) (STp->buffer)->b_data, datalen,
- st_sleep_done, timeout, MAX_RETRIES);
-
- /* this must be done with interrupts off */
- save_flags (flags);
- cli();
- if (SCpnt->request.rq_status != RQ_SCSI_DONE)
- sleep_on( &(STp->waiting) );
- restore_flags(flags);
+ SCpnt = st_do_scsi(NULL, STp, cmd, datalen, timeout, MAX_RETRIES);
+ if (!SCpnt)
+ return (-EBUSY);
ioctl_result = (STp->buffer)->last_result_fatal;
ioctl_result = st_int_ioctl(inode, file, MTBSF, 1);
else if (cmd_in == MTSETBLK) {
STp->block_size = arg;
- if (arg != 0) {
+ if (arg != 0)
(STp->buffer)->buffer_blocks =
- st_buffer_size / STp->block_size;
- (STp->buffer)->buffer_size =
- (STp->buffer)->buffer_blocks * STp->block_size;
- }
- else {
- (STp->buffer)->buffer_blocks = 1;
- (STp->buffer)->buffer_size = st_buffer_size;
- }
- (STp->buffer)->buffer_bytes =
- (STp->buffer)->read_pointer = 0;
+ (STp->buffer)->buffer_size / STp->block_size;
+ (STp->buffer)->buffer_bytes = (STp->buffer)->read_pointer = 0;
}
else if (cmd_in == MTSETDRVBUFFER)
STp->drv_buffer = (arg & 7);
unsigned char scmd[10];
Scsi_Cmnd *SCpnt;
Scsi_Tape *STp;
- unsigned int flags;
- kdev_t devt = inode->i_rdev;
- int dev = TAPE_NR(devt);
+ int dev = TAPE_NR(inode->i_rdev);
STp = &(scsi_tapes[dev]);
#if DEBUG
mtc.mt_op != MTEOM)
return (-EIO);
STp->device->was_reset = 0;
- if (STp->door_locked != ST_UNLOCKED) {
+ if (STp->door_locked != ST_UNLOCKED &&
+ STp->door_locked != ST_LOCK_FAILS) {
if (st_int_ioctl(inode, file, MTLOCK, 0)) {
printk("st%d: Could not relock door after bus reset.\n", dev);
STp->door_locked = ST_UNLOCKED;
if (i)
return i;
- SCpnt = allocate_device(NULL, STp->device, 1);
- SCpnt->request.rq_dev = devt;
-
memset (scmd, 0, 10);
if ((STp->device)->scsi_level < SCSI_2) {
scmd[0] = QFA_REQUEST_BLOCK;
scmd[0] = READ_POSITION;
scmd[1] = 1;
}
- SCpnt->request.rq_status = RQ_SCSI_BUSY;
- scmd[1] |= (SCpnt->lun << 5) & 0xe0;
- scsi_do_cmd(SCpnt,
- (void *) scmd, (void *) (STp->buffer)->b_data,
- 20, st_sleep_done, ST_TIMEOUT, MAX_READY_RETRIES);
-
- /* this must be done with interrupts off */
- save_flags (flags);
- cli();
- if (SCpnt->request.rq_status != RQ_SCSI_DONE)
- sleep_on( &(STp->waiting) );
- restore_flags(flags);
+ SCpnt = st_do_scsi(NULL, STp, scmd, 20, ST_TIMEOUT, MAX_READY_RETRIES);
+ if (!SCpnt)
+ return (-EBUSY);
if ((STp->buffer)->last_result_fatal != 0) {
mt_pos.mt_blkno = (-1);
static ST_buffer *
new_tape_buffer( int from_initialization )
{
- int priority;
+ int priority, a_size;
ST_buffer *tb;
if (st_nbr_buffers >= st_template.dev_max)
return NULL; /* Should never happen */
- if (from_initialization)
- priority = GFP_ATOMIC;
- else
- priority = GFP_KERNEL;
+ if (from_initialization) {
+ priority = GFP_ATOMIC | GFP_DMA;
+ a_size = st_buffer_size;
+ }
+ else {
+ priority = GFP_KERNEL | GFP_DMA;
+ for (a_size = PAGE_SIZE; a_size < st_buffer_size; a_size <<= 1)
+ ; /* Make sure we allocate efficiently */
+ }
tb = (ST_buffer *)scsi_init_malloc(sizeof(ST_buffer), priority);
if (tb) {
- tb->b_data = (unsigned char *)scsi_init_malloc(st_buffer_size,
- priority | GFP_DMA);
+ tb->b_data = (unsigned char *)scsi_init_malloc(a_size, priority);
if (!tb->b_data) {
scsi_init_free((char *)tb, sizeof(ST_buffer));
tb = NULL;
}
#if DEBUG
- printk("st: Allocated tape buffer %d.\n", st_nbr_buffers);
+ if (debugging)
+ printk("st: Allocated tape buffer %d (%d bytes).\n", st_nbr_buffers,
+ a_size);
#endif
tb->in_use = 0;
+ tb->buffer_size = a_size;
tb->writing = 0;
+ tb->orig_b_data = NULL;
st_buffers[st_nbr_buffers++] = tb;
return tb;
}
+/* Try to allocate a temporary enlarged tape buffer */
+ static int
+enlarge_buffer(ST_buffer *STbuffer, int new_size)
+{
+ int a_size;
+ unsigned char *tbd;
+
+ normalize_buffer(STbuffer);
+
+ for (a_size = PAGE_SIZE; a_size < new_size; a_size <<= 1)
+ ; /* Make sure that we allocate efficiently */
+
+ tbd = (unsigned char *)scsi_init_malloc(a_size, GFP_DMA | GFP_KERNEL);
+ if (!tbd)
+ return FALSE;
+
+#if DEBUG
+ if (debugging)
+ printk("st: Buffer enlarged to %d bytes.\n", a_size);
+#endif
+
+ STbuffer->orig_b_data = STbuffer->b_data;
+ STbuffer->orig_size = STbuffer->buffer_size;
+ STbuffer->b_data = tbd;
+ STbuffer->buffer_size = a_size;
+ return TRUE;
+}
+
+
+/* Release the extra buffer */
+ static void
+normalize_buffer(ST_buffer *STbuffer)
+{
+ if (STbuffer->orig_b_data == NULL)
+ return;
+
+ scsi_init_free(STbuffer->b_data, STbuffer->buffer_size);
+ STbuffer->b_data = STbuffer->orig_b_data;
+ STbuffer->orig_b_data = NULL;
+ STbuffer->buffer_size = STbuffer->orig_size;
+
+#if DEBUG
+ if (debugging)
+ printk("st: Buffer normalized to %d bytes.\n", STbuffer->buffer_size);
+#endif
+}
+
+
/* Set the boot options. Syntax: st=xxx,yyy
where xxx is buffer size in 512 byte blocks and yyy is write threshold
in 512 byte blocks. */
else
scsi_tapes[i].mt_status->mt_type = MT_ISSCSI2;
+ tpnt->devt = MKDEV(SCSI_TAPE_MAJOR, i);
tpnt->dirty = 0;
tpnt->rw = ST_IDLE;
tpnt->eof = ST_NOEOF;
if (scsi_tapes) return 0;
st_template.dev_max = st_template.dev_noticed + ST_EXTRA_DEVS;
- if (st_template.dev_max < MAX_SCSI_TAPES)
- st_template.dev_max = MAX_SCSI_TAPES;
+ if (st_template.dev_max < ST_MAX_TAPES)
+ st_template.dev_max = ST_MAX_TAPES;
scsi_tapes =
(Scsi_Tape *) scsi_init_malloc(st_template.dev_max * sizeof(Scsi_Tape),
GFP_ATOMIC);
if (st_buffers != NULL) {
for (i=0; i < st_nbr_buffers; i++)
if (st_buffers[i] != NULL) {
- scsi_init_free((char *) st_buffers[i]->b_data, st_buffer_size);
+ scsi_init_free((char *) st_buffers[i]->b_data,
+ st_buffers[i]->buffer_size);
scsi_init_free((char *) st_buffers[i], sizeof(ST_buffer));
}
-
+
scsi_init_free((char *) st_buffers,
st_template.dev_max * sizeof(ST_buffer *));
}
int last_result;
int last_result_fatal;
unsigned char *b_data;
+ int orig_size;
+ unsigned char *orig_b_data;
} ST_buffer;
typedef struct {
+ kdev_t devt;
unsigned capacity;
struct wait_queue * waiting;
Scsi_Device* device;
Copyright 1995 Kai Makisara.
- Last modified: Mon Sep 18 21:00:49 1995 by root@kai.makisara.fi
+ Last modified: Sun Sep 24 11:46:15 1995 by root@kai.makisara.fi
*/
#ifndef _ST_OPTIONS_H
case the driver tries to allocate a new tape buffer when none is free. */
#define ST_RUNTIME_BUFFERS 0
+/* The minimum limit for the number of SCSI tape devices is determined by
+ ST_MAX_TAPES. If the number of tape devices and the "slack" defined by
+ ST_EXTRA_DEVS exceeds ST_MAX_TAPES, the large number is used. */
+#define ST_MAX_TAPES 4
+
/* The driver does not wait for some operations to finish before returning
to the user program if ST_NOWAIT is non-zero. This helps if the SCSI
adapter does not support multiple outstanding commands. However, the user
--- /dev/null
+#ifndef _ASMAXP_GENTRAP_H
+#define _ASMAXP_GENTRAP_H
+
+/*
+ * Definitions for gentrap causes. They are generated by user-level
+ * programs and therefore should be compatible with the corresponding
+ * OSF/1 definitions.
+ */
+#define GEN_INTOVF -1 /* integer overflow */
+#define GEN_INTDIV -2 /* integer division by zero */
+#define GEN_FLTOVF -3 /* fp overflow */
+#define GEN_FLTDIV -4 /* fp division by zero */
+#define GEN_FLTUND -5 /* fp underflow */
+#define GEN_FLTINV -6 /* invalid fp operand */
+#define GEN_FLTINE -7 /* inexact fp operand */
+#define GEN_DECOVF -8 /* decimal overflow (for COBOL??) */
+#define GEN_DECDIV -9 /* decimal division by zero */
+#define GEN_DECINV -10 /* invalid decimal operand */
+#define GEN_ROPRAND -11 /* reserved operand */
+#define GEN_ASSERTERR -12 /* assertion error */
+#define GEN_NULPTRERR -13 /* null pointer error */
+#define GEN_STKOVF -14 /* stack overflow */
+#define GEN_STRLENERR -15 /* string length error */
+#define GEN_SUBSTRERR -16 /* substring error */
+#define GEN_RANGERR -17 /* range error */
+#define GEN_SUBRNG -18
+#define GEN_SUBRNG1 -19
+#define GEN_SUBRNG2 -20
+#define GEN_SUBRNG3 -21 /* these report range errors for */
+#define GEN_SUBRNG4 -22 /* subscripting (indexing) at levels 0..7 */
+#define GEN_SUBRNG5 -23
+#define GEN_SUBRNG6 -24
+#define GEN_SUBRNG7 -25
+
+/* the remaining codes (-26..-1023) are reserved. */
+
+#endif /* _ASMAXP_GENTRAP_H */
#define SA_NOMASK 0x00000008
#define SA_ONESHOT 0x00000010
+#ifdef __KERNEL__
+/*
+ * These values of sa_flags are used only by the kernel as part of the
+ * irq handling routines.
+ *
+ * SA_INTERRUPT is also used by the irq handling routines.
+ */
+#define SA_PROBE SA_ONESHOT
+#define SA_SAMPLE_RANDOM SA_RESTART
+#endif
+
+
#define SIG_BLOCK 1 /* for blocking signals */
#define SIG_UNBLOCK 2 /* for unblocking signals */
#define SIG_SETMASK 3 /* for setting the signal mask */
#define SA_NOMASK 0x40000000
#define SA_ONESHOT 0x80000000
+#ifdef __KERNEL__
+/*
+ * These values of sa_flags are used only by the kernel as part of the
+ * irq handling routines.
+ *
+ * SA_INTERRUPT is also used by the irq handling routines.
+ */
+#define SA_PROBE SA_ONESHOT
+#define SA_SAMPLE_RANDOM SA_RESTART
+#endif
+
+
#define SIG_BLOCK 0 /* for blocking signals */
#define SIG_UNBLOCK 1 /* for unblocking signals */
#define SIG_SETMASK 2 /* for setting the signal mask */
#ifndef ASSEMBLY
#include <asm/i82489.h>
+#include <linux/tasks.h>
+#include <linux/ptrace.h>
/*
* Support definitions for SMP machines following the intel multiprocessing
};
-extern struct cpuinfo_x86 cpu_data[32];
+extern struct cpuinfo_x86 cpu_data[NR_PROCS];
/*
* Private routines/data
extern void smp_scan_config(unsigned long, unsigned long);
extern unsigned long smp_alloc_memory(unsigned long mem_base);
extern unsigned char *apic_reg;
-extern unsigned char *kernel_stacks[32];
+extern unsigned char *kernel_stacks[NR_PROCS];
extern unsigned char boot_cpu_id;
extern unsigned long cpu_present_map;
extern void smp_invalidate(void);
#define PF_AX25 AF_AX25
#define AX25_MTU 256
-#define AX25_MAX_DIGIS 8
+#define AX25_MAX_DIGIS 6
typedef struct
{
ax25_address port_addr;
ax25_address dest_addr;
unsigned char digi_count;
- ax25_address digi_addr[AX25_MAX_DIGIS - 2];
+ ax25_address digi_addr[AX25_MAX_DIGIS];
};
#define AX25_WINDOW 1
#define SIOCAX25ADDUID (SIOCPROTOPRIVATE+1)
#define SIOCAX25DELUID (SIOCPROTOPRIVATE+2)
#define SIOCAX25NOUID (SIOCPROTOPRIVATE+3)
-#define SIOCAX25DIGCTL (SIOCPROTOPRIVATE+4)
#define SIOCAX25GETPARMS (SIOCPROTOPRIVATE+5)
#define SIOCAX25SETPARMS (SIOCPROTOPRIVATE+6)
#define AX25_NOUID_DEFAULT 0
#define AX25_NOUID_BLOCK 1
+#define AX25_DIGI_INBAND 0x01 /* Allow digipeating within port **/
+#define AX25_DIGI_XBAND 0x02 /* Allow digipeating across ports **/
+
#define AX25_VALUES_IPDEFMODE 0 /* 'D'=DG 'V'=VC */
#define AX25_VALUES_AXDEFMODE 1 /* 8=Normal 128=Extended Seq Nos */
#define AX25_VALUES_NETROM 2 /* Allow NET/ROM - 0=No 1=Yes */
#define AX25_VALUES_T2 9 /* Default T2 timeout value */
#define AX25_VALUES_T3 10 /* Default T3 timeout value */
#define AX25_VALUES_N2 11 /* Default N2 value */
+#define AX25_VALUES_DIGI 12 /* Digipeat mode */
#define AX25_MAX_VALUES 20
struct ax25_parms_struct
struct timestamp {
__u8 len;
__u8 ptr;
- union {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 flags:4,
overflow:4;
#else
#error "Please fix <asm/byteorder.h>"
#endif
- __u8 full_char;
- } x;
__u32 data[9];
};
unsigned long route[MAX_ROUTE];
};
+#define IPOPT_OPTVAL 0
+#define IPOPT_OLEN 1
+#define IPOPT_OFFSET 2
+#define IPOPT_MINOFF 4
+#define MAX_IPOPTLEN 40
+#define IPOPT_NOP IPOPT_NOOP
+#define IPOPT_EOL IPOPT_END
+#define IPOPT_TS IPOPT_TIMESTAMP
+
+#define IPOPT_TS_TSONLY 0 /* timestamps only */
+#define IPOPT_TS_TSANDADDR 1 /* timestamps and addresses */
+#define IPOPT_TS_PRESPEC 2 /* specified modules only */
struct options {
- struct route record_route;
- struct route loose_route;
- struct route strict_route;
- struct timestamp tstamp;
- unsigned short security;
- unsigned short compartment;
- unsigned short handling;
- unsigned short stream;
- unsigned tcc;
+ __u32 faddr; /* Saved first hop address */
+ unsigned char optlen;
+ unsigned char srr;
+ unsigned char rr;
+ unsigned char ts;
+ unsigned char is_setbyuser:1, /* Set by setsockopt? */
+ is_data:1, /* Options in __data, rather than skb */
+ is_strictroute:1, /* Strict source route */
+ srr_is_hit:1, /* Packet destination addr was our one */
+ is_changed:1, /* IP checksum more not valid */
+ rr_needaddr:1, /* Need to record addr of outgoing dev */
+ ts_needtime:1, /* Need to record timestamp */
+ ts_needaddr:1; /* Need to record addr of outgoing dev */
+ unsigned char __pad1;
+ unsigned char __pad2;
+ unsigned char __pad3;
+ unsigned char __data[0];
};
-
struct iphdr {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 ihl:4,
#define PCI_DEVICE_ID_IMS_8849 0x8849
#define PCI_VENDOR_ID_REALTEK 0x10ec
-#define PCI_DEVICE_ID_REALTEK_8300 0x8029
+#define PCI_DEVICE_ID_REALTEK_8029 0x8029
#define PCI_VENDOR_ID_VIA 0x1106
#define PCI_DEVICE_ID_VIA_82C505 0x0505
--- /dev/null
+/*
+ * include/linux/random.h
+ *
+ * Include file for the random number generator.
+ */
+
+/*
+ * We should always include the random number generator, since it's
+ * relatively small, and it's most useful when application developers
+ * can assume that all Linux systems have it. (Ideally, it would be
+ * best if we could assume that all Unix systems had it, but oh
+ * well....)
+ *
+ * Also, many kernel routines will have a use for good random numbers,
+ * for example, for truely random TCP sequence numbers, which prevent
+ * certain forms of TCP spoofing attacks.
+ */
+#define CONFIG_RANDOM
+
+/* Exported functions */
+
+#ifdef CONFIG_RANDOM
+void rand_initialize(void);
+
+void add_keyboard_randomness(unsigned char scancode);
+void add_interrupt_randomness(int irq);
+
+void get_random_bytes(void *buf, int nbytes);
+int read_random(struct inode * inode, struct file * file,
+ char * buf, int nbytes);
+int read_random_unlimited(struct inode * inode, struct file * file,
+ char * buf, int nbytes);
+#else
+#define add_keyboard_randomness(x)
+#define add_interrupt_randomness(x)
+#endif
extern unsigned long itimer_next;
extern struct timeval xtime;
extern int need_resched;
+extern void do_timer(struct pt_regs *);
extern unsigned long * prof_buffer;
extern unsigned long prof_len;
unsigned long daddr; /* IP target address */
unsigned long raddr; /* IP next hop address */
unsigned long csum; /* Checksum */
+ unsigned char proto_priv[16]; /* Protocol private data */
volatile char acked, /* Are we acked ? */
used, /* Are we in use ? */
free, /* How to free this buffer */
#define IPTOS_RELIABILITY 0x04
#define IP_TTL 2
#define IP_HDRINCL 3
-#ifdef V1_3_WILL_DO_THIS_FUNKY_STUFF
#define IP_OPTIONS 4
-#endif
#define IP_MULTICAST_IF 32
#define IP_MULTICAST_TTL 33
#ifdef __KERNEL__
void do_gettimeofday(struct timeval *tv);
+void do_settimeofday(struct timeval *tv);
#endif
#define FD_SETSIZE __FD_SETSIZE
#define _AX25_H
#include <linux/ax25.h>
+#define PR_SLOWHZ 10 /* Run timing at 1/10 second - gives us better resolution for 56kbit links */
+
+#define AX25_T1CLAMPLO (1 * PR_SLOWHZ) /* If defined, clamp at 1 second **/
+#define AX25_T1CLAMPHI (30 * PR_SLOWHZ) /* If defined, clamp at 30 seconds **/
+
+#define AX25_BROKEN_NETMAC
+
#define AX25_BPQ_HEADER_LEN 16
#define AX25_KISS_HEADER_LEN 1
-#define AX25_MAX_HEADER_LEN 56
#define AX25_HEADER_LEN 17
#define AX25_ADDR_LEN 7
+#define AX25_DIGI_HEADER_LEN (AX25_MAX_DIGIS * AX25_ADDR_LEN)
+#define AX25_MAX_HEADER_LEN (AX25_HEADER_LEN + AX25_DIGI_HEADER_LEN)
#define AX25_P_IP 0xCC
#define AX25_P_ARP 0xCD
#define AX25_STATE_3 3
#define AX25_STATE_4 4
-#define PR_SLOWHZ 10 /* Run timing at 1/10 second - gives us better resolution for 56kbit links */
#define MODULUS 8 /* Standard AX.25 modulus */
#define EMODULUS 128 /* Extended AX.25 modulus */
#define AX25_DEF_T2 3
#define AX25_DEF_T3 300
#define AX25_DEF_N2 10
+#define AX25_DEF_DIGI (AX25_DIGI_INBAND|AX25_DIGI_XBAND)
typedef struct ax25_uid_assoc {
struct ax25_uid_assoc *next;
} ax25_uid_assoc;
typedef struct {
- ax25_address calls[6];
- unsigned char repeated[6];
+ ax25_address calls[AX25_MAX_DIGIS];
+ unsigned char repeated[AX25_MAX_DIGIS];
unsigned char ndigi;
char lastrepeat;
} ax25_digi;
/* ax25_subr.c */
extern void ax25_clear_queues(ax25_cb *);
extern void ax25_frames_acked(ax25_cb *, unsigned short);
+extern void ax25_requeue_frames(ax25_cb *);
extern int ax25_validate_nr(ax25_cb *, unsigned short);
extern int ax25_decode(ax25_cb *, struct sk_buff *, int *, int *, int *);
extern void ax25_send_control(ax25_cb *, int, int, int);
/*extern unsigned short ip_compute_csum(unsigned char * buff, int len);*/
extern int ip_rcv(struct sk_buff *skb, struct device *dev,
struct packet_type *pt);
-extern int ip_forward(struct sk_buff *skb, struct device *dev, int is_frag, unsigned long target_addr, int target_strict);
+extern int ip_forward(struct sk_buff *skb, struct device *dev,
+ int is_frag, __u32 target_addr);
+extern int ip_options_echo(struct options * dopt, struct options * sopt,
+ __u32 daddr, __u32 saddr,
+ struct sk_buff * skb);
+extern int ip_options_compile(struct options * opt, struct sk_buff * skb);
extern void ip_send_check(struct iphdr *ip);
extern int ip_id_count;
extern void ip_queue_xmit(struct sock *sk,
const void *frag,
unsigned short int length,
__u32 daddr,
+ __u32 saddr,
+ struct options * opt,
int flags,
int type);
#define _NETROM_H
#include <linux/netrom.h>
+#define NR_T1CLAMPLO (1 * PR_SLOWHZ) /* If defined, clamp at 1 second **/
+#define NR_T1CLAMPHI (300 * PR_SLOWHZ) /* If defined, clamp at 30 seconds **/
+
#define NR_NETWORK_LEN 15
#define NR_TRANSPORT_LEN 5
#define NR_STATE_2 2
#define NR_STATE_3 3
-#define NR_DEFAULT_T1 (120 * PR_SLOWHZ) /* Outstanding frames - 10 seconds */
-#define NR_DEFAULT_T2 (5 * PR_SLOWHZ) /* Response delay - 3 seconds */
+#define NR_DEFAULT_T1 (120 * PR_SLOWHZ) /* Outstanding frames - 120 seconds */
+#define NR_DEFAULT_T2 (5 * PR_SLOWHZ) /* Response delay - 5 seconds */
#define NR_DEFAULT_N2 3 /* Number of Retries */
#define NR_DEFAULT_T4 (180 * PR_SLOWHZ) /* Transport Busy Delay */
#define NR_DEFAULT_WINDOW 4 /* Default Window Size */
extern struct nr_parms_struct nr_default;
extern int nr_rx_frame(struct sk_buff *, struct device *);
extern void nr_destroy_socket(struct sock *);
-/*extern int nr_get_info(char *, char **, off_t, int, int);*/
/* nr_dev.c */
extern int nr_rx_ip(struct sk_buff *, struct device *);
extern void nr_send_nak_frame(struct sock *);
extern void nr_kick(struct sock *);
extern void nr_transmit_buffer(struct sock *, struct sk_buff *);
-extern void nr_nr_error_recovery(struct sock *);
extern void nr_establish_data_link(struct sock *);
extern void nr_enquiry_response(struct sock *);
extern void nr_check_iframes_acked(struct sock *, unsigned short);
extern void aic7xxx_setup(char *str, int *ints);
extern void buslogic_setup(char *str, int *ints);
extern void fdomain_setup(char *str, int *ints);
+extern void NCR53c406a_setup(char *str, int *ints);
extern void scsi_luns_setup(char *str, int *ints);
extern void sound_setup(char *str, int *ints);
#ifdef CONFIG_CDU31A
#ifdef CONFIG_SCSI_BUSLOGIC
{ "buslogic=", buslogic_setup},
#endif
+#ifdef CONFIG_SCSI_NCR53C406A
+ { "ncr53c406a=", NCR53c406a_setup},
+#endif
#ifdef CONFIG_SCSI_FUTURE_DOMAIN
{ "fdomain=", fdomain_setup},
#endif
trap_init();
init_IRQ();
sched_init();
+ time_init();
parse_options(command_line);
init_modules();
#ifdef CONFIG_PROFILE
memory_start = name_cache_init(memory_start,memory_end);
mem_init(memory_start,memory_end);
buffer_init();
- time_init();
sock_init();
#ifdef CONFIG_SYSVIPC
ipc_init();
goto bad_fork;
new_stack = get_free_page(GFP_KERNEL);
if (!new_stack)
- goto bad_fork_free;
+ goto bad_fork_free_p;
error = -EAGAIN;
nr = find_empty_process();
if (nr < 0)
- goto bad_fork_free;
+ goto bad_fork_free_stack;
*p = *current;
task[nr] = NULL;
REMOVE_LINKS(p);
nr_tasks--;
-bad_fork_free:
+bad_fork_free_stack:
free_page(new_stack);
+bad_fork_free_p:
kfree(p);
bad_fork:
return error;
X(free_irq),
X(enable_irq),
X(disable_irq),
+ X(probe_irq_on),
+ X(probe_irq_off),
X(bh_active),
X(bh_mask),
X(bh_base),
#include <asm/segment.h>
#include <asm/pgtable.h>
-#define TIMER_IRQ 0
-
#include <linux/timex.h>
/*
extern void mem_use(void);
-extern int timer_interrupt(void);
-
static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
unsigned long init_user_stack[1024] = { STACK_MAGIC, };
static struct vm_area_struct init_mmap = INIT_MMAP;
run_task_queue(&tq_immediate);
}
-/*
- * The int argument is really a (struct pt_regs *), in case the
- * interrupt wants to know from where it was called. The timer
- * irq uses this to decide if it should update the user or system
- * times.
- */
-static void do_timer(int irq, struct pt_regs * regs)
+void do_timer(struct pt_regs * regs)
{
unsigned long mask;
struct timer_struct *tp;
- /* last time the cmos clock got updated */
- static long last_rtc_update=0;
- extern int set_rtc_mmss(unsigned long);
-
long ltemp, psecs;
/* Advance the phase, once it gets to one microsecond, then
second_overflow();
}
- /* If we have an externally synchronized Linux clock, then update
- * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
- * called as close as possible to 500 ms before the new second starts.
- */
- if (time_state != TIME_BAD && xtime.tv_sec > last_rtc_update + 660 &&
- xtime.tv_usec > 500000 - (tick >> 1) &&
- xtime.tv_usec < 500000 + (tick >> 1))
- if (set_rtc_mmss(xtime.tv_sec) == 0)
- last_rtc_update = xtime.tv_sec;
- else
- last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
-
jiffies++;
calc_load();
if (user_mode(regs)) {
bh_base[TIMER_BH].routine = timer_bh;
bh_base[TQUEUE_BH].routine = tqueue_bh;
bh_base[IMMEDIATE_BH].routine = immediate_bh;
- if (request_irq(TIMER_IRQ, do_timer, 0, "timer") != 0)
- panic("Could not allocate timer IRQ!");
enable_bh(TIMER_BH);
enable_bh(TQUEUE_BH);
enable_bh(IMMEDIATE_BH);
* Created file with time related functions from sched.c and adjtimex()
* 1993-10-08 Torsten Duwe
* adjtime interface update and CMOS clock write code
- * 1994-07-02 Alan Modra
- * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
- * 1995-03-26 Markus Kuhn
- * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
- * precision CMOS clock update
* 1995-08-13 Torsten Duwe
* kernel PLL updated to 1994-12-13 specs (rfc-1489)
*/
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
-
-#include <asm/segment.h>
-#include <asm/io.h>
-
-#include <linux/mc146818rtc.h>
#include <linux/timex.h>
-/* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
- * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
- * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
- *
- * [For the Julian calendar (which was used in Russia before 1917,
- * Britain & colonies before 1752, anywhere else before 1582,
- * and is still in use by some communities) leave out the
- * -year/100+year/400 terms, and add 10.]
- *
- * This algorithm was first published by Gauss (I think).
- *
- * WARNING: this function will overflow on 2106-02-07 06:28:16 on
- * machines were long is 32-bit! (However, as time_t is signed, we
- * will already get problems at other places on 2038-01-19 03:14:08)
- */
-static inline unsigned long mktime(unsigned int year, unsigned int mon,
- unsigned int day, unsigned int hour,
- unsigned int min, unsigned int sec)
-{
- if (0 >= (int) (mon -= 2)) { /* 1..12 -> 11,12,1..10 */
- mon += 12; /* Puts Feb last since it has leap day */
- year -= 1;
- }
- return (((
- (unsigned long)(year/4 - year/100 + year/400 + 367*mon/12 + day) +
- year*365 - 719499
- )*24 + hour /* now have hours */
- )*60 + min /* now have minutes */
- )*60 + sec; /* finally seconds */
-}
-
-void time_init(void)
-{
- unsigned int year, mon, day, hour, min, sec;
- int i;
-
- /* The Linux interpretation of the CMOS clock register contents:
- * When the Update-In-Progress (UIP) flag goes from 1 to 0, the
- * RTC registers show the second which has precisely just started.
- * Let's hope other operating systems interpret the RTC the same way.
- */
- /* read RTC exactly on falling edge of update flag */
- for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */
- if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)
- break;
- for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */
- if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
- break;
- do { /* Isn't this overkill ? UIP above should guarantee consistency */
- sec = CMOS_READ(RTC_SECONDS);
- min = CMOS_READ(RTC_MINUTES);
- hour = CMOS_READ(RTC_HOURS);
- day = CMOS_READ(RTC_DAY_OF_MONTH);
- mon = CMOS_READ(RTC_MONTH);
- year = CMOS_READ(RTC_YEAR);
- } while (sec != CMOS_READ(RTC_SECONDS));
- if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
- {
- BCD_TO_BIN(sec);
- BCD_TO_BIN(min);
- BCD_TO_BIN(hour);
- BCD_TO_BIN(day);
- BCD_TO_BIN(mon);
- BCD_TO_BIN(year);
- }
-#ifdef ALPHA_PRE_V1_2_SRM_CONSOLE
- /*
- * The meaning of life, the universe, and everything. Plus
- * this makes the year come out right on SRM consoles earlier
- * than v1.2.
- */
- year -= 42;
-#endif
- if ((year += 1900) < 1970)
- year += 100;
- xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
- xtime.tv_usec = 0;
-}
-
/*
* The timezone where the local system is located. Used as a default by some
* programs who obtain this value by using gettimeofday.
asmlinkage int sys_time(int * tloc)
{
- int i, error;
+ int i;
i = CURRENT_TIME;
if (tloc) {
- error = verify_area(VERIFY_WRITE, tloc, sizeof(*tloc));
+ int error = verify_area(VERIFY_WRITE, tloc, sizeof(*tloc));
if (error)
return error;
put_user(i,tloc);
return 0;
}
-/* This function must be called with interrupts disabled
- * It was inspired by Steve McCanne's microtime-i386 for BSD. -- jrs
- *
- * However, the pc-audio speaker driver changes the divisor so that
- * it gets interrupted rather more often - it loads 64 into the
- * counter rather than 11932! This has an adverse impact on
- * do_gettimeoffset() -- it stops working! What is also not
- * good is that the interval that our timer function gets called
- * is no longer 10.0002 ms, but 9.9767 ms. To get around this
- * would require using a different timing source. Maybe someone
- * could use the RTC - I know that this can interrupt at frequencies
- * ranging from 8192Hz to 2Hz. If I had the energy, I'd somehow fix
- * it so that at startup, the timer code in sched.c would select
- * using either the RTC or the 8253 timer. The decision would be
- * based on whether there was any other device around that needed
- * to trample on the 8253. I'd set up the RTC to interrupt at 1024 Hz,
- * and then do some jiggery to have a version of do_timer that
- * advanced the clock by 1/1024 s. Every time that reached over 1/100
- * of a second, then do all the old code. If the time was kept correct
- * then do_gettimeoffset could just return 0 - there is no low order
- * divider that can be accessed.
- *
- * Ideally, you would be able to use the RTC for the speaker driver,
- * but it appears that the speaker driver really needs interrupt more
- * often than every 120 us or so.
- *
- * Anyway, this needs more thought.... pjsg (1993-08-28)
- *
- * If you are really that interested, you should be reading
- * comp.protocols.time.ntp!
- */
-
-#define TICK_SIZE tick
-
-static inline unsigned long do_gettimeoffset(void)
-{
- int count;
- unsigned long offset = 0;
-
- /* timer count may underflow right here */
- outb_p(0x00, 0x43); /* latch the count ASAP */
- count = inb_p(0x40); /* read the latched count */
- count |= inb(0x40) << 8;
- /* we know probability of underflow is always MUCH less than 1% */
- if (count > (LATCH - LATCH/100)) {
- /* check for pending timer interrupt */
- outb_p(0x0a, 0x20);
- if (inb(0x20) & 1)
- offset = TICK_SIZE;
- }
- count = ((LATCH-1) - count) * TICK_SIZE;
- count = (count + LATCH/2) / LATCH;
- return offset + count;
-}
-
-/*
- * This version of gettimeofday has near microsecond resolution.
- */
-void do_gettimeofday(struct timeval *tv)
-{
- unsigned long flags;
-
- save_flags(flags);
- cli();
- *tv = xtime;
-#if defined (__i386__) || defined (__mips__)
- tv->tv_usec += do_gettimeoffset();
- if (tv->tv_usec >= 1000000) {
- tv->tv_usec -= 1000000;
- tv->tv_sec++;
- }
-#endif /* !defined (__i386__) && !defined (__mips__) */
- restore_flags(flags);
-}
-
asmlinkage int sys_gettimeofday(struct timeval *tv, struct timezone *tz)
{
int error;
warp_clock();
}
}
- if (tv) {
- cli();
- /* This is revolting. We need to set the xtime.tv_usec
- * correctly. However, the value in this location is
- * is value at the last tick.
- * Discover what correction gettimeofday
- * would have done, and then undo it!
- */
- new_tv.tv_usec -= do_gettimeoffset();
-
- if (new_tv.tv_usec < 0) {
- new_tv.tv_usec += 1000000;
- new_tv.tv_sec--;
- }
-
- xtime = new_tv;
- time_state = TIME_BAD;
- time_maxerror = 0x70000000;
- time_esterror = 0x70000000;
- sti();
- }
+ if (tv)
+ do_settimeofday(&new_tv);
return 0;
}
memcpy_tofs(txc_p, &txc, sizeof(struct timex));
return time_state;
}
-
-/*
- * In order to set the CMOS clock precisely, set_rtc_mmss has to be
- * called 500 ms after the second nowtime has started, because when
- * nowtime is written into the registers of the CMOS clock, it will
- * jump to the next second precisely 500 ms later. Check the Motorola
- * MC146818A or Dallas DS12887 data sheet for details.
- */
-int set_rtc_mmss(unsigned long nowtime)
-{
- int retval = 0;
- int real_seconds, real_minutes, cmos_minutes;
- unsigned char save_control, save_freq_select;
-
- save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
- CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
-
- save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
- CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
-
- cmos_minutes = CMOS_READ(RTC_MINUTES);
- if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
- BCD_TO_BIN(cmos_minutes);
-
- /* since we're only adjusting minutes and seconds,
- * don't interfere with hour overflow. This avoids
- * messing with unknown time zones but requires your
- * RTC not to be off by more than 15 minutes
- */
- real_seconds = nowtime % 60;
- real_minutes = nowtime / 60;
- if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
- real_minutes += 30; /* correct for half hour time zone */
- real_minutes %= 60;
-
- if (abs(real_minutes - cmos_minutes) < 30)
- {
- if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
- {
- BIN_TO_BCD(real_seconds);
- BIN_TO_BCD(real_minutes);
- }
- CMOS_WRITE(real_seconds,RTC_SECONDS);
- CMOS_WRITE(real_minutes,RTC_MINUTES);
- }
- else
- retval = -1;
-
- /* The following flags have to be released exactly in this order,
- * otherwise the DS12887 (popular MC146818A clone with integrated
- * battery and quartz) will not reset the oscillator and will not
- * update precisely 500 ms later. You won't find this mentioned in
- * the Dallas Semiconductor data sheets, but who believes data
- * sheets anyway ... -- Markus Kuhn
- */
- CMOS_WRITE(save_control, RTC_CONTROL);
- CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
-
- return retval;
-}
o Appletalk TIOCINQ/TIOCOUTQ bug fix [IN]
o Rewrote the IFF_UP/IFF_DOWN handling code [IN]
+-------->>>>> 1.3.29 <<<<<-------
+
+o Major AX.25/NetROM fixes [John Naylor] [IN]
+o Error in ip_mr ioctls fixed [Michael Chastain] [IN]
+o TCP cache zap bugs hopefully fixed [IN]
+o Length checks in udp/raw sending [Craig Metz] [IN]
+
---------- Things I thought Linus had for a while and not merged ----------------
-o Paul Gortmakers 8390 Copy and checksum [PLEASE ADD 8)]
+o Paul Gortmakers 8390 Copy and checksum [Pending]
---------- Things pending from other people to chase -------------
-o Tom May's insw_and_checksum()
---------- Things pending for me to merge --------------
o /dev/skip /dev/ipah etc - Kernel/Usermode communications module (me)
o AF_UNIX garbage collect code
o Faster closedown option for heavy use sites (me)
+o Tom May's insw_and_checksum()
+
-o NEW NET TOOLS..... - wanted one net tools maintainer....
--------------- Tbings That Need Doing Before 1.4 ------------------
* Steven(GW7RRM) Added digi-peating control ioctl.
* Added extended AX.25 support.
* Added AX.25 frame segmentation.
+ * Darryl(G7LED) Changed connect(), recvfrom(), sendto() sockaddr/addrlen to
+ * fall inline with bind() and new policy.
+ * Moved digipeating ctl to new ax25_dev structs.
+ * Fixed ax25_release(), set TCP_CLOSE, wakeup app
+ * context, THEN make the sock dead.
*
* To do:
* Restructure the ax25_rcv code to be cleaner/faster and
* copy only when needed.
* Consider better arbitary protocol support.
- * Fix non-blocking connect failure.
- * Settable protocol id for SEQPACKET sockets
*/
#include <linux/config.h>
#include <net/ip.h>
#include <net/arp.h>
-static int ax25_digi_on = 1;
-
-#define CONFIG_AX25_XDIGI /* Cross port (band) digi stuff */
/**********************************************************************************************************************\
* *
*s++ = n + '0';
*s++ = '\0';
- return(buf);
+ if (*buf == '\0' || *buf == '-')
+ return "*";
+
+ return buf;
}
struct sock *sk;
ax25_cb *ax25;
- if ((sk = (struct sock *)kmalloc(sizeof(*sk), GFP_ATOMIC)) == NULL)
- return -ENOMEM;
-
- if ((ax25 = ax25_create_cb()) == NULL) {
- kfree_s(sk, sizeof(*sk));
- return -ENOMEM;
- }
-
- sk->type = sock->type;
-
switch (sock->type) {
case SOCK_DGRAM:
- case SOCK_SEQPACKET:
- if (protocol == 0)
+ if (protocol == 0 || protocol == AF_AX25)
protocol = AX25_P_TEXT;
break;
+ case SOCK_SEQPACKET:
+ switch (protocol) {
+ case 0:
+ case AF_AX25: /* For CLX */
+ protocol = AX25_P_TEXT;
+ break;
+ case AX25_P_SEGMENT:
+#ifdef CONFIG_INET
+ case AX25_P_ARP:
+ case AX25_P_IP:
+#endif
+#ifdef CONFIG_NETROM
+ case AX25_P_NETROM:
+#endif
+ return -ESOCKTNOSUPPORT;
+ default:
+ break;
+ }
+ break;
case SOCK_RAW:
break;
default:
- kfree_s((void *)sk, sizeof(*sk));
- kfree_s((void *)ax25, sizeof(*ax25));
return -ESOCKTNOSUPPORT;
}
+ if ((sk = (struct sock *)kmalloc(sizeof(*sk), GFP_ATOMIC)) == NULL)
+ return -ENOMEM;
+
+ if ((ax25 = ax25_create_cb()) == NULL) {
+ kfree_s(sk, sizeof(*sk));
+ return -ENOMEM;
+ }
+
skb_queue_head_init(&sk->receive_queue);
skb_queue_head_init(&sk->write_queue);
skb_queue_head_init(&sk->back_log);
sk->socket = sock;
+ sk->type = sock->type;
sk->protocol = protocol;
sk->dead = 0;
sk->next = NULL;
sk->rmem_alloc = 0;
sk->inuse = 0;
sk->debug = 0;
+ sk->destroy = 0;
sk->prot = NULL; /* So we use default free mechanisms */
sk->err = 0;
sk->localroute = 0;
sk->rmem_alloc = 0;
sk->inuse = 0;
sk->ack_backlog = 0;
+ sk->destroy = 0;
sk->prot = NULL; /* So we use default free mechanisms */
sk->err = 0;
sk->localroute = 0;
if (sk->type == SOCK_SEQPACKET) {
switch (sk->ax25->state) {
case AX25_STATE_0:
- sk->dead = 1;
+ sk->state = TCP_CLOSE;
sk->state_change(sk);
+ sk->dead = 1;
ax25_destroy_socket(sk->ax25);
break;
case AX25_STATE_1:
ax25_send_control(sk->ax25, DISC, POLLON, C_COMMAND);
sk->ax25->state = AX25_STATE_0;
- sk->dead = 1;
+ sk->state = TCP_CLOSE;
sk->state_change(sk);
+ sk->dead = 1;
ax25_destroy_socket(sk->ax25);
break;
case AX25_STATE_2:
ax25_send_control(sk->ax25, DM, POLLON, C_RESPONSE);
sk->ax25->state = AX25_STATE_0;
- sk->dead = 1;
+ sk->state = TCP_CLOSE;
sk->state_change(sk);
+ sk->dead = 1;
ax25_destroy_socket(sk->ax25);
break;
sk->ax25->t3timer = 0;
sk->ax25->t1timer = sk->ax25->t1 = ax25_calculate_t1(sk->ax25);
sk->ax25->state = AX25_STATE_2;
+ sk->state = TCP_CLOSE;
sk->state_change(sk);
- sk->dead = 1;
+ sk->dead = 1;
+ sk->destroy = 1;
break;
default:
break;
}
} else {
- sk->dead = 1;
+ sk->state = TCP_CLOSE;
sk->state_change(sk);
+ sk->dead = 1;
ax25_destroy_socket(sk->ax25);
}
sock->data = NULL;
+ sk->socket = NULL; /* Not used, but we should do this. **/
return 0;
}
sk->state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
- if (addr_len > sizeof(*addr)) {
+ if (addr_len != sizeof(struct sockaddr_ax25) && addr_len != sizeof(struct full_sockaddr_ax25))
+ return -EINVAL;
+ if (addr_len == sizeof(struct full_sockaddr_ax25) && addr->sax25_ndigis != 0) {
int ct = 0;
- int ndigi = addr_len - sizeof(*addr);
- ax25_address *ap = (ax25_address *)(((char *)addr) + sizeof(*addr));
-
- /* Size is an exact number of digipeaters ? */
- if (ndigi % sizeof(ax25_address))
- return -EINVAL;
-
- ndigi /= sizeof(ax25_address);
+ struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)addr;
/* Valid number of digipeaters ? */
- if (ndigi < 1 || ndigi > 6)
+ if (addr->sax25_ndigis < 1 || addr->sax25_ndigis > AX25_MAX_DIGIS)
return -EINVAL;
if (sk->ax25->digipeat == NULL) {
return -ENOMEM;
}
- sk->ax25->digipeat->ndigi = ndigi;
+ sk->ax25->digipeat->ndigi = addr->sax25_ndigis;
- while (ct < ndigi) {
+ while (ct < addr->sax25_ndigis) {
sk->ax25->digipeat->repeated[ct] = 0;
- memcpy(&sk->ax25->digipeat->calls[ct], &ap[ct], sizeof(ax25_address));
+ memcpy(&sk->ax25->digipeat->calls[ct], &fsa->fsa_digipeater[ct], sizeof(ax25_address));
ct++;
}
sk->ax25->digipeat->lastrepeat = 0;
- addr_len -= ndigi * sizeof(ax25_address);
}
- if (addr_len != sizeof(struct sockaddr_ax25))
- return -EINVAL;
-
if (sk->zapped) { /* Must bind first - autobinding in this may or may not work */
if ((err = ax25_rt_autobind(sk->ax25, &addr->sax25_call)) < 0)
return err;
if (sk->type == SOCK_SEQPACKET && ax25_find_cb(&sk->ax25->source_addr, &addr->sax25_call, sk->ax25->device) != NULL)
return -EBUSY; /* Already such a connection */
-
+
memcpy(&sk->ax25->dest_addr, &addr->sax25_call, sizeof(ax25_address));
/* First the easy one */
*/
if (dp.lastrepeat + 1 < dp.ndigi) { /* Not yet digipeated completely */
if (ax25cmp(&dp.calls[dp.lastrepeat + 1], dev_addr) == 0) {
+ struct device *dev_out = dev;
+
/* We are the digipeater. Mark ourselves as repeated
and throw the packet back out of the same device */
dp.lastrepeat++;
dp.repeated[(int)dp.lastrepeat] = 1;
-#ifdef CONFIG_AX25_XDIGI
- while (dp.lastrepeat + 1 < dp.ndigi) {
- struct device *dev_scan;
- if ((dev_scan = ax25rtr_get_dev(&dp.calls[dp.lastrepeat + 1])) == NULL)
- break;
- dp.lastrepeat++;
- dp.repeated[(int)dp.lastrepeat] = 1;
- dev = dev_scan;
+
+ if (ax25_dev_get_value(dev, AX25_VALUES_DIGI) & AX25_DIGI_XBAND) {
+ while (dp.lastrepeat + 1 < dp.ndigi) {
+ struct device *dev_scan;
+ if ((dev_scan = ax25rtr_get_dev(&dp.calls[dp.lastrepeat + 1])) == NULL)
+ break;
+ dp.lastrepeat++;
+ dp.repeated[(int)dp.lastrepeat] = 1;
+ dev_out = dev_scan;
+ }
+ if (dev != dev_out && (ax25_dev_get_value(dev_out, AX25_VALUES_DIGI) & AX25_DIGI_XBAND) == 0)
+ kfree_skb(skb, FREE_READ);
}
-#endif
+
+ if (dev == dev_out && (ax25_dev_get_value(dev, AX25_VALUES_DIGI) & AX25_DIGI_INBAND) == 0)
+ kfree_skb(skb, FREE_READ);
+
build_ax25_addr(skb->data, &src, &dest, &dp, type, MODULUS);
skb->arp = 1;
- if (ax25_digi_on) {
- ax25_queue_xmit(skb, dev, SOPRI_NORMAL);
- } else {
- kfree_skb(skb, FREE_READ);
- }
+ ax25_queue_xmit(skb, dev_out, SOPRI_NORMAL);
} else {
kfree_skb(skb, FREE_READ);
}
{
struct sock *sk = (struct sock *)sock->data;
struct sockaddr_ax25 *usax = (struct sockaddr_ax25 *)msg->msg_name;
- unsigned char *uaddr = (unsigned char *)msg->msg_name;
int err;
struct sockaddr_ax25 sax;
struct sk_buff *skb;
return -ENETUNREACH;
if (usax) {
- int ndigi = addr_len - sizeof(sax);
- if (addr_len < sizeof(sax))
+ if (addr_len != sizeof(struct sockaddr_ax25) && addr_len != sizeof(struct full_sockaddr_ax25))
return -EINVAL;
-
- /* Trailing digipeaters on address ?? */
- if (addr_len > sizeof(sax)) {
- int ct = 0;
-
- ax25_address *ap = (ax25_address *)(((char *)uaddr) + sizeof(sax));
- /* Size is an exact number of digipeaters ? */
- if (ndigi % sizeof(ax25_address))
- return -EINVAL;
- ndigi /= sizeof(ax25_address);
+ if (usax->sax25_family != AF_AX25)
+ return -EINVAL;
+ if (addr_len == sizeof(struct full_sockaddr_ax25) && usax->sax25_ndigis != 0) {
+ int ct = 0;
+ struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)usax;
/* Valid number of digipeaters ? */
- if (ndigi < 1 || ndigi > 6)
+ if (usax->sax25_ndigis < 1 || usax->sax25_ndigis > AX25_MAX_DIGIS)
return -EINVAL;
- /* Copy data into digipeat structure */
- while (ct < ndigi) {
+ dtmp.ndigi = usax->sax25_ndigis;
+
+ while (ct < usax->sax25_ndigis) {
dtmp.repeated[ct] = 0;
- memcpy(&dtmp.calls[ct], &ap[ct], sizeof(ax25_address));
+ memcpy(&dtmp.calls[ct], &fsa->fsa_digipeater[ct], sizeof(ax25_address));
ct++;
}
dtmp.lastrepeat = 0;
- dtmp.ndigi = ndigi;
- addr_len -= ndigi * sizeof(ax25_address);
}
memcpy(&sax, usax, sizeof(sax));
if (sk->type == SOCK_SEQPACKET && memcmp(&sk->ax25->dest_addr, &sax.sax25_call, sizeof(ax25_address)) != 0)
return -EISCONN;
- if (sax.sax25_family != AF_AX25)
- return -EINVAL;
- if (ndigi != 0)
- dp = &dtmp;
- else
+ if (usax->sax25_ndigis == 0)
dp = NULL;
+ else
+ dp = &dtmp;
} else {
if (sk->state != TCP_ESTABLISHED)
return -ENOTCONN;
{
struct iovec iov;
struct msghdr msg;
- iov.iov_base=(void *)ubuf;
- iov.iov_len=size;
- msg.msg_name=(void *)sa;
- msg.msg_namelen=addr_len;
- msg.msg_accrights=NULL;
- msg.msg_iov=&iov;
- msg.msg_iovlen=1;
- return ax25_sendmsg(sock,&msg,size,noblock,flags);
-}
+ iov.iov_base = (void *)ubuf;
+ iov.iov_len = size;
+
+ msg.msg_name = (void *)sa;
+ msg.msg_namelen = addr_len;
+ msg.msg_accrights = NULL;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+
+ return ax25_sendmsg(sock, &msg, size, noblock, flags);
+}
static int ax25_send(struct socket *sock, const void *ubuf, int size, int noblock, unsigned flags)
{
{
struct sock *sk = (struct sock *)sock->data;
struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name;
- char *addrptr = (char *)msg->msg_name;
int copied, length;
struct sk_buff *skb;
int er;
skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (sax) {
- struct sockaddr_ax25 addr;
ax25_digi digi;
ax25_address dest;
- unsigned char *dp = skb->data;
- int ct = 0;
-
- ax25_parse_addr(dp, skb->len, NULL, &dest, &digi, NULL);
- addr.sax25_family = AF_AX25;
- memcpy(&addr.sax25_call, &dest, sizeof(ax25_address));
- memcpy(sax,&addr, sizeof(*sax));
- addrptr += sizeof(*sax);
-
- while (ct < digi.ndigi) {
- memcpy(addrptr, &digi. calls[ct], AX25_ADDR_LEN);
- addrptr += AX25_ADDR_LEN;
- ct++;
+
+ if (addr_len == (int *)0)
+ return -EINVAL;
+ if (*addr_len != sizeof(struct sockaddr_ax25) && *addr_len != sizeof(struct full_sockaddr_ax25))
+ return -EINVAL;
+
+ ax25_parse_addr(skb->data, skb->len, NULL, &dest, &digi, NULL);
+
+ sax->sax25_family = AF_AX25;
+ /* We set this correctly, even though we may not let the
+ application know the digi calls further down (because it
+ did NOT ask to know them). This could get political... **/
+ sax->sax25_ndigis = digi.ndigi;
+ memcpy(&sax->sax25_call, &dest, sizeof(ax25_address));
+
+ *addr_len = sizeof(struct sockaddr_ax25);
+
+ if (*addr_len == sizeof(struct full_sockaddr_ax25) && sax->sax25_ndigis != 0) {
+ int ct = 0;
+ struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)sax;
+
+ while (ct < digi.ndigi) {
+ memcpy(&fsa->fsa_digipeater[ct], &digi.calls[ct], sizeof(ax25_address));
+ ct++;
+ }
+
+ *addr_len = sizeof(struct full_sockaddr_ax25);
}
- if (addr_len)
- *addr_len = sizeof(*sax) + AX25_ADDR_LEN * digi.ndigi;
}
skb_free_datagram(skb);
{
struct iovec iov;
struct msghdr msg;
- iov.iov_base=ubuf;
- iov.iov_len=size;
- msg.msg_name=(void *)sa;
- msg.msg_namelen=0;
+
+ iov.iov_base = ubuf;
+ iov.iov_len = size;
+
+ msg.msg_name = (void *)sa;
+ msg.msg_namelen = 0;
if (addr_len)
msg.msg_namelen = *addr_len;
- msg.msg_accrights=NULL;
- msg.msg_iov=&iov;
- msg.msg_iovlen=1;
- return ax25_recvmsg(sock,&msg,size,noblock,flags,addr_len);
+ msg.msg_accrights = NULL;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+
+ return ax25_recvmsg(sock, &msg, size, noblock, flags, addr_len);
}
static int ax25_recv(struct socket *sock, void *ubuf, int size , int noblock,
ax25_uid_policy = amount;
return 0;
- case SIOCAX25DIGCTL:
- if ((err = verify_area(VERIFY_READ, (void *)arg, sizeof(int))) != 0)
- return err;
- if (!suser())
- return -EPERM;
- amount = get_fs_long((void *)arg);
- ax25_digi_on = amount ? 1 : 0;
- return 0;
-
case SIOCAX25GETPARMS:
case SIOCAX25SETPARMS:
return ax25_dev_ioctl(cmd, (void *)arg);
static int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb)
{
int queued = 0;
+ unsigned char pid = *skb->data;
- switch (*skb->data) {
+ switch (pid) {
#ifdef CONFIG_NETROM
case AX25_P_NETROM:
if (ax25_dev_get_value(ax25->device, AX25_VALUES_NETROM)) {
queued = 1;
break;
#endif
- case AX25_P_TEXT:
- if (ax25->sk != NULL && ax25_dev_get_value(ax25->device, AX25_VALUES_TEXT)) {
- if (sock_queue_rcv_skb(ax25->sk, skb) == 0) {
- queued = 1;
- } else {
- ax25->condition |= OWN_RX_BUSY_CONDITION;
- }
- }
- break;
-
case AX25_P_SEGMENT:
skb_pull(skb, 1); /* Remove PID */
queued = ax25_rx_fragment(ax25, skb);
break;
default:
+ if (ax25->sk != NULL && ax25_dev_get_value(ax25->device, AX25_VALUES_TEXT) && ax25->sk->protocol == pid) {
+ if (sock_queue_rcv_skb(ax25->sk, skb) == 0) {
+ queued = 1;
+ } else {
+ ax25->condition |= OWN_RX_BUSY_CONDITION;
+ }
+ }
break;
}
}
break;
- case UA:
- ax25_establish_data_link(ax25);
- ax25->state = AX25_STATE_1;
- break;
-
case DM:
ax25_clear_queues(ax25);
ax25->t3timer = 0;
ax25_calculate_rtt(ax25);
ax25->t1timer = 0;
ax25->t3timer = ax25->t3;
+ ax25_requeue_frames(ax25);
} else {
ax25_nr_error_recovery(ax25);
ax25->state = AX25_STATE_1;
break;
case I:
+#ifndef AX25_BROKEN_NETMAC
if (type != C_COMMAND)
break;
+#endif
if (!ax25_validate_nr(ax25, nr)) {
ax25_nr_error_recovery(ax25);
ax25->state = AX25_STATE_1;
}
break;
- case UA:
- ax25_establish_data_link(ax25);
- ax25->state = AX25_STATE_1;
- break;
-
case DM:
ax25_clear_queues(ax25);
ax25->t3timer = 0;
ax25->t3timer = ax25->t3;
ax25->n2count = 0;
ax25->state = AX25_STATE_3;
+ } else {
+ ax25_requeue_frames(ax25);
}
} else {
ax25_nr_error_recovery(ax25);
ax25->t3timer = ax25->t3;
ax25->n2count = 0;
ax25->state = AX25_STATE_3;
+ } else {
+ ax25_requeue_frames(ax25);
}
} else {
ax25_nr_error_recovery(ax25);
ax25_enquiry_response(ax25);
if (ax25_validate_nr(ax25, nr)) {
ax25_frames_acked(ax25, nr);
+ if(ax25->vs != ax25->va) {
+ ax25_requeue_frames(ax25);
+ }
} else {
ax25_nr_error_recovery(ax25);
ax25->state = AX25_STATE_1;
break;
case I:
+#ifndef AX25_BROKEN_NETMAC
if (type != C_COMMAND)
break;
+#endif
if (!ax25_validate_nr(ax25, nr)) {
ax25_nr_error_recovery(ax25);
ax25->state = AX25_STATE_1;
int ax25_process_rx_frame(ax25_cb *ax25, struct sk_buff *skb, int type)
{
int queued = 0, frametype, ns, nr, pf;
+
+ if (ax25->sk != NULL && ax25->state == AX25_STATE_0 && ax25->sk->dead)
+ return queued;
if (ax25->state != AX25_STATE_1 && ax25->state != AX25_STATE_2 &&
ax25->state != AX25_STATE_3 && ax25->state != AX25_STATE_4) {
mtu = ax25->device->mtu;
- if (skb->len > mtu) {
+ if ((skb->len - 1) > mtu) {
mtu -= 2; /* Allow for fragment control info */
fragno = skb->len / mtu;
* the window is full. Send a poll on the final I frame if
* the window is filled.
*/
- do {
- /*
- * Dequeue the frame and copy it.
- */
- skb = skb_dequeue(&ax25->write_queue);
+ /*
+ * Dequeue the frame and copy it.
+ */
+ skb = skb_dequeue(&ax25->write_queue);
+
+ do {
if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
skb_queue_head(&ax25->write_queue, skb);
return;
#ifdef notdef
} while (!last);
#else
- } while (!last && skb_peek(&ax25->write_queue) != NULL);
+ } while (!last && (skb = skb_dequeue(&ax25->write_queue)) != NULL);
#endif
ax25->condition &= ~ACK_PENDING_CONDITION;
memcpy_fromfs(&route, arg, sizeof(route));
if ((dev = ax25rtr_get_dev(&route.port_addr)) == NULL)
return -EINVAL;
- if (route.digi_count > 6)
+ if (route.digi_count > AX25_MAX_DIGIS)
return -EINVAL;
for (ax25_rt = ax25_route; ax25_rt != NULL; ax25_rt = ax25_rt->next) {
if (ax25cmp(&ax25_rt->callsign, &route.dest_addr) == 0 && ax25_rt->dev == dev) {
len += sprintf(buffer + len, " dg");
break;
default:
- len += sprintf(buffer + len, " ");
+ len += sprintf(buffer + len, " *");
break;
}
ax25_dev->values[AX25_VALUES_CONMODE] = AX25_DEF_CONMODE;
ax25_dev->values[AX25_VALUES_WINDOW] = AX25_DEF_WINDOW;
ax25_dev->values[AX25_VALUES_EWINDOW] = AX25_DEF_EWINDOW;
- ax25_dev->values[AX25_VALUES_T1] = (AX25_DEF_T1 * PR_SLOWHZ) / 2;
+ ax25_dev->values[AX25_VALUES_T1] = AX25_DEF_T1 * PR_SLOWHZ;
ax25_dev->values[AX25_VALUES_T2] = AX25_DEF_T2 * PR_SLOWHZ;
ax25_dev->values[AX25_VALUES_T3] = AX25_DEF_T3 * PR_SLOWHZ;
ax25_dev->values[AX25_VALUES_N2] = AX25_DEF_N2;
+ ax25_dev->values[AX25_VALUES_DIGI] = AX25_DEF_DIGI;
save_flags(flags);
cli();
if (ax25_parms.values[AX25_VALUES_N2] < 1 ||
ax25_parms.values[AX25_VALUES_N2] > 31)
return -EINVAL;
+ if ((ax25_parms.values[AX25_VALUES_DIGI] &
+ ~(AX25_DIGI_INBAND | AX25_DIGI_XBAND)) != 0)
+ return -EINVAL;
memcpy(ax25_dev->values, ax25_parms.values, AX25_MAX_VALUES * sizeof(short));
- ax25_dev->values[AX25_VALUES_T1] *= (PR_SLOWHZ / 2);
+ ax25_dev->values[AX25_VALUES_T1] *= PR_SLOWHZ;
+ ax25_dev->values[AX25_VALUES_T1] /= 2;
ax25_dev->values[AX25_VALUES_T2] *= PR_SLOWHZ;
ax25_dev->values[AX25_VALUES_T3] *= PR_SLOWHZ;
break;
if ((ax25_dev = ax25_dev_get_dev(dev)) == NULL)
return -EINVAL;
memcpy(ax25_parms.values, ax25_dev->values, AX25_MAX_VALUES * sizeof(short));
- ax25_parms.values[AX25_VALUES_T1] /= (PR_SLOWHZ * 2);
+ ax25_parms.values[AX25_VALUES_T1] *= 2;
+ ax25_parms.values[AX25_VALUES_T1] /= PR_SLOWHZ;
ax25_parms.values[AX25_VALUES_T2] /= PR_SLOWHZ;
ax25_parms.values[AX25_VALUES_T3] /= PR_SLOWHZ;
memcpy_tofs(arg, &ax25_parms, sizeof(ax25_parms));
* old BSD code.
* AX.25 030 Jonathan(G4KLX) Added support for extended AX.25.
* Added fragmentation support.
+ * Darryl(G7LED) Added function ax25_requeue_frames() to split
+ * it up from ax25_frames_acked().
*/
#include <linux/config.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
-/* #define NO_BACKOFF */
-
/*
* This routine purges all the queues of frames.
*/
*/
void ax25_frames_acked(ax25_cb *ax25, unsigned short nr)
{
- struct sk_buff *skb, *skb_prev = NULL;
+ struct sk_buff *skb;
/*
* Remove all the ack-ed frames from the ack queue.
ax25->va = (ax25->va + 1) % ax25->modulus;
}
}
+}
+
+/* Maybe this should be your ax25_invoke_retransmission(), which appears
+ * to be used but not do anything. ax25_invoke_retransmission() used to
+ * be in AX 0.29, but has now gone in 0.30.
+ */
+void ax25_requeue_frames(ax25_cb *ax25)
+{
+ struct sk_buff *skb, *skb_prev = NULL;
/*
* Requeue all the un-ack-ed frames on the output queue to be picked
*/
unsigned short ax25_calculate_t1(ax25_cb *ax25)
{
-#ifndef NO_BACKOFF
int n, t = 2;
if (ax25->backoff) {
}
return t * ax25->rtt;
-#else
- return 2 * ax25->rtt;
-#endif
}
/*
if (ax25->t1timer > 0 && ax25->n2count == 0)
ax25->rtt = (9 * ax25->rtt + ax25->t1 - ax25->t1timer) / 10;
- /* Don't go below one second */
- if (ax25->rtt < 1 * PR_SLOWHZ)
- ax25->rtt = 1 * PR_SLOWHZ;
+#ifdef AX25_T1CLAMPLO
+ /* Don't go below one tenth of a second */
+ if (ax25->rtt < (AX25_T1CLAMPLO))
+ ax25->rtt = (AX25_T1CLAMPLO);
+#else /* Failsafe - some people might have sub 1/10th RTTs :-) **/
+ if (ax25->rtt == 0)
+ ax25->rtt = PR_SLOWHZ;
+#endif
+#ifdef AX25_T1CLAMPHI
+ /* OR above clamped seconds **/
+ if (ax25->rtt > (AX25_T1CLAMPHI))
+ ax25->rtt = (AX25_T1CLAMPHI);
+#endif
}
/*
while (!(buf[-1] & LAPB_E))
{
- if (d >= 6) return NULL; /* Max of 6 digis */
+ if (d >= AX25_MAX_DIGIS) return NULL; /* Max of 6 digis */
if (len < 7) return NULL; /* Short packet */
if (digi != NULL) {
*/
void ax25_set_timer(ax25_cb *ax25)
{
- unsigned long flags;
-
+ unsigned long flags;
+
save_flags(flags);
cli();
del_timer(&ax25->timer);
case AX25_STATE_0:
/* Magic here: If we listen() and a new link dies before it
is accepted() it isnt 'dead' so doesnt get removed. */
- if ((ax25->sk != NULL && ax25->sk->dead) || ax25->sk == NULL) {
+ if (ax25->sk == NULL || ax25->sk->destroy || (ax25->sk->state == TCP_LISTEN && ax25->sk->dead)) {
del_timer(&ax25->timer);
ax25_destroy_socket(ax25);
return;
skb->stamp.tv_sec=0; /* No idea about time */
skb->localroute = 0;
skb->ip_summed = 0;
+ memset(skb->proto_priv, 0, sizeof(skb->proto_priv));
save_flags(flags);
cli();
net_skbcount++;
n->daddr=skb->daddr;
n->raddr=skb->raddr;
n->acked=skb->acked;
+ memcpy(n->proto_priv, skb->proto_priv, sizeof(skb->proto_priv));
n->used=skb->used;
n->free=1;
n->arp=skb->arp;
return NULL;
}
+#if 1
if( tmp <= sk->wmem_alloc)
+#else
+ /* ANK: Line above seems either incorrect
+ * or useless. sk->wmem_alloc has a tiny chance to change
+ * between tmp = sk->w... and cli(),
+ * but it might(?) change earlier. In real life
+ * it does not (I never seen the message).
+ * In any case I'd delete this check at all, or
+ * change it to:
+ */
+ if (sk->wmem_alloc + size >= sk->sndbuf)
+#endif
{
+ if (sk->wmem_alloc <= 0)
+ printk("sock.c: Look where I am %ld<%ld\n", tmp, sk->wmem_alloc);
sk->socket->flags &= ~SO_NOSPACE;
interruptible_sleep_on(sk->sleep);
if (current->signal & ~current->blocked)
{
sk->blog = 1;
if (sk->prot->rcv)
- sk->prot->rcv(skb, skb->dev, sk->opt,
+ sk->prot->rcv(skb, skb->dev, (struct options*)skb->proto_priv,
skb->saddr, skb->len, skb->daddr, 1,
/* Only used for/by raw sockets. */
(struct inet_protocol *)sk->pair);
O_TARGET := ipv4.o
IPV4_OBJS := utils.o route.o proc.o timer.o protocol.o packet.o \
arp.o ip.o raw.o icmp.o tcp.o udp.o devinet.o af_inet.o \
- igmp.o ip_fw.o ipip.o ipmr.o
+ igmp.o ip_fw.o ipmr.o
+
+MOD_LIST_NAME := IPV4_MODULES
+M_OBJS :=
ifeq ($(CONFIG_INET_RARP),y)
-IPV4_OBJS := $(IPV4_OBJS) rarp.o
-else
- ifeq ($(CONFIG_INET_RARP),m)
- M_OBJS := rarp.o
- MOD_LIST_NAME := IPV4_MODULES
- endif
+IPV4_OBJS += rarp.o
+elifeq ($(CONFIG_INET_RARP),m)
+M_OBJS += rarp.o
+endif
+
+ifeq ($(CONFIG_NET_IPIP),y)
+IPV4_OBJS := $(IPV4_OBJS) ipip.o
endif
+
+
+
ifdef CONFIG_INET
O_OBJS := $(IPV4_OBJS)
endif
if (sk->dead && sk->rmem_alloc == 0 && sk->wmem_alloc == 0)
{
+ if(sk->opt)
+ kfree(sk->opt);
+ /*
+ * This one is pure paranoia. I'll take it out
+ * later once I know the bug is buried.
+ */
+ tcp_cache_zap();
kfree_s((void *)sk,sizeof(*sk));
}
else
/* This will destroy it. */
sock->data = NULL;
+ /*
+ * Nasty here. release_sock can cause more frames
+ * to be played through the socket. That can
+ * reinitialise the tcp cache after tcp_close();
+ */
release_sock(sk);
+ tcp_cache_zap(); /* Kill the cache again. */
sk->socket = NULL;
return(0);
}
if (sk2->state != TCP_ESTABLISHED && sk2->err > 0)
{
err = inet_error(sk2);
- sk2->dead=1; /* ANK */
+ sk2->dead=1;
destroy_sock(sk2);
newsock->data = NULL;
return err;
int data_len;
struct icmphdr icmph;
unsigned long csum;
+ struct options replyopts;
+ unsigned char optbuf[40];
};
/*
/*
* Driving logic for building and sending ICMP messages.
*/
-
+
static void icmp_build_xmit(struct icmp_bxm *icmp_param, __u32 saddr, __u32 daddr)
{
struct sock *sk=icmp_socket.data;
- sk->saddr=saddr;
icmp_param->icmph.checksum=0;
icmp_out_count(icmp_param->icmph.type);
ip_build_xmit(sk, icmp_glue_bits, icmp_param,
icmp_param->data_len+sizeof(struct icmphdr),
- daddr, 0, IPPROTO_ICMP);
+ daddr, saddr, &icmp_param->replyopts, 0, IPPROTO_ICMP);
}
icmp_param.icmph.type=type;
icmp_param.icmph.code=code;
- icmp_param.icmph.type=type;
- icmp_param.icmph.un.gateway=0;
+ icmp_param.icmph.un.gateway = info;
icmp_param.data_ptr=iph;
icmp_param.data_len=(iph->ihl<<2)+8; /* RFC says return header + 8 bytes */
/*
* Set it to build.
*/
-
- icmp_build_xmit(&icmp_param, saddr, iph->saddr);
+
+ if (ip_options_echo(&icmp_param.replyopts, NULL, saddr, iph->saddr, skb_in) == 0)
+ icmp_build_xmit(&icmp_param, saddr, iph->saddr);
}
icmp_param.icmph.type=ICMP_ECHOREPLY;
icmp_param.data_ptr=(icmph+1);
icmp_param.data_len=len;
- icmp_build_xmit(&icmp_param, daddr, saddr);
+ if (ip_options_echo(&icmp_param.replyopts, NULL, daddr, saddr, skb)==0)
+ icmp_build_xmit(&icmp_param, daddr, saddr);
kfree_skb(skb, FREE_READ);
}
* Fill in the current time as ms since midnight UT:
*/
- times[1] = htonl((xtime.tv_sec % 86400) * 1000 + xtime.tv_usec / 1000);
+ {
+ struct timeval tv;
+ do_gettimeofday(&tv);
+ times[1] = htonl((tv.tv_sec % 86400) * 1000 + tv.tv_usec / 1000);
+ }
times[2] = times[1];
memcpy((void *)×[0], icmph+1, 4); /* Incoming stamp */
icmp_param.icmph=*icmph;
icmp_param.icmph.code=0;
icmp_param.data_ptr=×
icmp_param.data_len=12;
- icmp_build_xmit(&icmp_param, daddr,saddr);
+ if (ip_options_echo(&icmp_param.replyopts, NULL, daddr, saddr, skb)==0)
+ icmp_build_xmit(&icmp_param, daddr, saddr);
kfree_skb(skb,FREE_READ);
}
icmp_param.icmph.un.echo.sequence = icmph->un.echo.sequence;
icmp_param.data_ptr=&dev->pa_mask;
icmp_param.data_len=4;
- icmp_build_xmit(&icmp_param, daddr, saddr);
+ if (ip_options_echo(&icmp_param.replyopts, NULL, daddr, saddr, skb)==0)
+ icmp_build_xmit(&icmp_param, daddr, saddr);
#endif
kfree_skb(skb, FREE_READ);
}
panic("Failed to create the ICMP control socket.\n");
sk=icmp_socket.data;
sk->allocation=GFP_ATOMIC;
+ sk->num = 256; /* Don't receive any data */
}
struct ip_mib ip_statistics={2,64,}; /* Forwarding=No, Default TTL=64 */
#endif
+/*
+ * Write options to IP header, record destination address to
+ * source route option, address of outgoing interface
+ * (we should already know it, so that this function is allowed be
+ * called only after routing decision) and timestamp,
+ * if we originate this datagram.
+ */
+
+static void ip_options_build(struct sk_buff * skb, struct options * opt,
+ __u32 daddr, __u32 saddr,
+ int is_frag) {
+ unsigned char * iph = (unsigned char*)skb->ip_hdr;
+
+ memcpy(skb->proto_priv, opt, sizeof(struct options));
+ memcpy(iph+sizeof(struct iphdr), opt->__data, opt->optlen);
+ opt = (struct options*)skb->proto_priv;
+ opt->is_data = 0;
+
+ if (opt->srr)
+ memcpy(iph+opt->srr+iph[opt->srr+1]-4, &daddr, 4);
+
+ if (!is_frag) {
+ if (opt->rr_needaddr)
+ memcpy(iph+opt->rr+iph[opt->rr+2]-5, &saddr, 4);
+ if (opt->ts_needaddr)
+ memcpy(iph+opt->ts+iph[opt->ts+2]-9, &saddr, 4);
+ if (opt->ts_needtime) {
+ struct timeval tv;
+ __u32 midtime;
+ do_gettimeofday(&tv);
+ midtime = htonl((tv.tv_sec % 86400) * 1000 + tv.tv_usec / 1000);
+ memcpy(iph+opt->ts+iph[opt->ts+2]-5, &midtime, 4);
+ }
+ return;
+ }
+ if (opt->rr) {
+ memset(iph+opt->rr, IPOPT_NOP, iph[opt->rr+1]);
+ opt->rr = 0;
+ opt->rr_needaddr = 0;
+ }
+ if (opt->ts) {
+ memset(iph+opt->ts, IPOPT_NOP, iph[opt->ts+1]);
+ opt->ts = 0;
+ opt->ts_needaddr = opt->ts_needtime = 0;
+ }
+}
+
+int ip_options_echo(struct options * dopt, struct options * sopt,
+ __u32 daddr, __u32 saddr,
+ struct sk_buff * skb) {
+ unsigned char *sptr, *dptr;
+ int soffset, doffset;
+ int optlen;
+
+ memset(dopt, 0, sizeof(struct options));
+
+ dopt->is_data = 1;
+
+ if (!sopt)
+ sopt = (struct options*)skb->proto_priv;
+
+ if (sopt->optlen == 0) {
+ dopt->optlen = 0;
+ return 0;
+ }
+
+ sptr = (sopt->is_data ? sopt->__data - sizeof(struct iphdr) :
+ (unsigned char *)skb->ip_hdr);
+ dptr = dopt->__data;
+
+ if (sopt->rr) {
+ optlen = sptr[sopt->rr+1];
+ soffset = sptr[sopt->rr+2];
+ dopt->rr = dopt->optlen + sizeof(struct iphdr);
+ memcpy(dptr, sptr+sopt->rr, optlen);
+ if (sopt->rr_needaddr && soffset <= optlen) {
+ if (soffset + 3 > optlen)
+ return -EINVAL;
+ dptr[2] = soffset + 4;
+ dopt->rr_needaddr = 1;
+ }
+ dptr += optlen;
+ dopt->optlen += optlen;
+ }
+ if (sopt->ts) {
+ optlen = sptr[sopt->ts+1];
+ soffset = sptr[sopt->ts+2];
+ dopt->ts = dopt->optlen + sizeof(struct iphdr);
+ memcpy(dptr, sptr+sopt->ts, optlen);
+ if (soffset <= optlen) {
+ if (dopt->ts_needaddr) {
+ if (soffset + 3 > optlen)
+ return -EINVAL;
+ dopt->ts_needaddr = 1;
+ soffset += 4;
+ }
+ if (dopt->ts_needtime) {
+ if (soffset + 3 > optlen)
+ return -EINVAL;
+ dopt->ts_needtime = 1;
+ soffset += 4;
+ }
+ if (((struct timestamp*)(dptr+1))->flags == IPOPT_TS_PRESPEC) {
+ __u32 addr;
+ memcpy(&addr, sptr+soffset-9, 4);
+ if (ip_chk_addr(addr) == 0) {
+ dopt->ts_needtime = 0;
+ dopt->ts_needaddr = 0;
+ soffset -= 8;
+ }
+ }
+ dptr[2] = soffset;
+ }
+ dptr += optlen;
+ dopt->optlen += optlen;
+ }
+ if (sopt->srr) {
+ unsigned char * start = sptr+sopt->srr;
+ __u32 faddr;
+
+ optlen = start[1];
+ soffset = start[2];
+ doffset = 0;
+ if (soffset > optlen)
+ soffset = optlen + 1;
+ soffset -= 4;
+ if (soffset > 3) {
+ memcpy(&faddr, &start[soffset-1], 4);
+ for (soffset-=4, doffset=4; soffset > 3; soffset-=4, doffset+=4)
+ memcpy(&dptr[doffset-1], &start[soffset-1], 4);
+ /*
+ * RFC1812 requires to fix illegal source routes.
+ */
+ if (memcmp(&saddr, &start[soffset+3], 4) == 0)
+ doffset -= 4;
+ }
+ if (doffset > 3) {
+ memcpy(&start[doffset-1], &daddr, 4);
+ dopt->faddr = faddr;
+ dptr[0] = start[0];
+ dptr[1] = doffset+3;
+ dptr[2] = 4;
+ dptr += doffset+3;
+ dopt->srr = dopt->optlen + sizeof(struct iphdr);
+ dopt->optlen += doffset+3;
+ dopt->is_strictroute = sopt->is_strictroute;
+ }
+ }
+ while (dopt->optlen & 3) {
+ *dptr++ = IPOPT_END;
+ dopt->optlen++;
+ }
+ return 0;
+}
+
+static void ip_options_fragment(struct sk_buff * skb) {
+ unsigned char * optptr = (unsigned char*)skb->ip_hdr;
+ struct options * opt = (struct options*)skb->proto_priv;
+ int l = opt->optlen;
+ int optlen;
+
+ while (l > 0) {
+ switch (*optptr) {
+ case IPOPT_END:
+ return;
+ case IPOPT_NOOP:
+ l--;
+ optptr++;
+ continue;
+ }
+ optlen = optptr[1];
+ if (l<2 || optlen>l)
+ return;
+ if (!(*optptr & 0x80))
+ memset(optptr, IPOPT_NOOP, optlen);
+ l -= optlen;
+ optptr += optlen;
+ }
+ opt->ts = 0;
+ opt->rr = 0;
+ opt->rr_needaddr = 0;
+ opt->ts_needaddr = 0;
+ opt->ts_needtime = 0;
+ return;
+}
+
+/*
+ * Verify options and fill pointers in struct optinos.
+ * Caller should clear *opt, and set opt->data.
+ * If opt == NULL, then skb->data should point to IP header.
+ */
+
+int ip_options_compile(struct options * opt, struct sk_buff * skb)
+{
+ int l;
+ unsigned char * iph;
+ unsigned char * optptr;
+ int optlen;
+ unsigned char * pp_ptr = NULL;
+
+ if (!opt) {
+ opt = (struct options*)skb->proto_priv;
+ memset(opt, 0, sizeof(struct options));
+ iph = (unsigned char*)skb->ip_hdr;
+ opt->optlen = ((struct iphdr *)iph)->ihl*4 - sizeof(struct iphdr);
+ optptr = iph + sizeof(struct iphdr);
+ opt->is_data = 0;
+ } else {
+ optptr = opt->is_data ? opt->__data : (unsigned char*)&skb->ip_hdr[1];
+ iph = optptr - sizeof(struct iphdr);
+ }
+
+ for (l = opt->optlen; l > 0; ) {
+ switch (*optptr) {
+ case IPOPT_END:
+ for (optptr++, l--; l>0; l--) {
+ if (*optptr != IPOPT_END) {
+ *optptr = IPOPT_END;
+ opt->is_changed = 1;
+ }
+ }
+ goto eol;
+ case IPOPT_NOOP:
+ l--;
+ optptr++;
+ continue;
+ }
+ optlen = optptr[1];
+ if (l<2 || optlen>l) {
+ pp_ptr = optptr;
+ break;
+ }
+ switch (*optptr) {
+ case IPOPT_SSRR:
+ case IPOPT_LSRR:
+ if (optlen < 3) {
+ pp_ptr = optptr + 1;
+ break;
+ }
+ if (optptr[2] < 4) {
+ pp_ptr = optptr + 2;
+ break;
+ }
+ /* NB: cf RFC-1812 5.2.4.1 */
+ if (opt->srr) {
+ pp_ptr = optptr;
+ break;
+ }
+ if (!skb) {
+ if (optptr[2] != 4 || optlen < 7 || ((optlen-3) & 3)) {
+ pp_ptr = optptr + 1;
+ break;
+ }
+ memcpy(&opt->faddr, &optptr[3], 4);
+ if (optlen > 7)
+ memmove(&optptr[3], &optptr[7], optlen-7);
+ }
+ opt->is_strictroute = (optptr[0] == IPOPT_SSRR);
+ opt->srr = optptr - iph;
+ break;
+ case IPOPT_RR:
+ if (opt->rr) {
+ pp_ptr = optptr;
+ break;
+ }
+ if (optlen < 3) {
+ pp_ptr = optptr + 1;
+ break;
+ }
+ if (optptr[2] < 4) {
+ pp_ptr = optptr + 2;
+ break;
+ }
+ if (optptr[2] <= optlen) {
+ if (optptr[2]+3 > optlen) {
+ pp_ptr = optptr + 2;
+ break;
+ }
+ if (skb) {
+ memcpy(&optptr[optptr[2]-1], &skb->dev->pa_addr, 4);
+ opt->is_changed = 1;
+ }
+ optptr[2] += 4;
+ opt->rr_needaddr = 1;
+ }
+ opt->rr = optptr - iph;
+ break;
+ case IPOPT_TIMESTAMP:
+ if (opt->ts) {
+ pp_ptr = optptr;
+ break;
+ }
+ if (optlen < 4) {
+ pp_ptr = optptr + 1;
+ break;
+ }
+ if (optptr[2] < 5) {
+ pp_ptr = optptr + 2;
+ break;
+ }
+ if (optptr[2] <= optlen) {
+ struct timestamp * ts = (struct timestamp*)(optptr+1);
+ __u32 * timeptr = NULL;
+ if (ts->ptr+3 > ts->len) {
+ pp_ptr = optptr + 2;
+ break;
+ }
+ switch (ts->flags) {
+ case IPOPT_TS_TSONLY:
+ opt->ts = optptr - iph;
+ if (skb) {
+ timeptr = (__u32*)&optptr[ts->ptr-1];
+ opt->is_changed = 1;
+ }
+ ts->ptr += 4;
+ break;
+ case IPOPT_TS_TSANDADDR:
+ if (ts->ptr+7 > ts->len) {
+ pp_ptr = optptr + 2;
+ break;
+ }
+ opt->ts = optptr - iph;
+ if (skb) {
+ memcpy(&optptr[ts->ptr-1], &skb->dev->pa_addr, 4);
+ timeptr = (__u32*)&optptr[ts->ptr+3];
+ }
+ opt->ts_needaddr = 1;
+ opt->ts_needtime = 1;
+ ts->ptr += 8;
+ break;
+ case IPOPT_TS_PRESPEC:
+ if (ts->ptr+7 > ts->len) {
+ pp_ptr = optptr + 2;
+ break;
+ }
+ opt->ts = optptr - iph;
+ {
+ __u32 addr;
+ memcpy(&addr, &optptr[ts->ptr-1], 4);
+ if (ip_chk_addr(addr) == 0)
+ break;
+ if (skb)
+ timeptr = (__u32*)&optptr[ts->ptr+3];
+ }
+ opt->ts_needaddr = 1;
+ opt->ts_needtime = 1;
+ ts->ptr += 8;
+ break;
+ default:
+ pp_ptr = optptr + 3;
+ break;
+ }
+ if (timeptr) {
+ struct timeval tv;
+ __u32 midtime;
+ do_gettimeofday(&tv);
+ midtime = htonl((tv.tv_sec % 86400) * 1000 + tv.tv_usec / 1000);
+ memcpy(timeptr, &midtime, sizeof(__u32));
+ opt->is_changed = 1;
+ }
+ } else {
+ struct timestamp * ts = (struct timestamp*)(optptr+1);
+ if (ts->overflow == 15) {
+ pp_ptr = optptr + 3;
+ break;
+ }
+ opt->ts = optptr - iph;
+ if (skb) {
+ ts->overflow++;
+ opt->is_changed = 1;
+ }
+ }
+ break;
+ case IPOPT_SEC:
+ case IPOPT_SID:
+ default:
+ if (!skb) {
+ pp_ptr = optptr;
+ break;
+ }
+ break;
+ }
+ l -= optlen;
+ optptr += optlen;
+ }
+
+eol:
+ if (!pp_ptr)
+ return 0;
+
+ if (skb) {
+ icmp_send(skb, ICMP_PARAMETERPROB, 0, pp_ptr-iph, skb->dev);
+ kfree_skb(skb, FREE_READ);
+ }
+ return -EINVAL;
+}
+
/*
* Handle the issuing of an ioctl() request
* for the ip device. This is scheduled to
int tmp;
__u32 src;
struct iphdr *iph;
+ __u32 final_daddr = daddr;
+
+ if (opt && opt->srr)
+ daddr = opt->faddr;
/*
* See if we need to look up the device.
skb->dev = *dev;
skb->saddr = saddr;
- if (skb->sk)
- skb->sk->saddr = saddr;
/*
* Now build the IP header.
* Build the IP addresses
*/
- iph=(struct iphdr *)skb_put(skb,sizeof(struct iphdr));
+ if (opt)
+ iph=(struct iphdr *)skb_put(skb,sizeof(struct iphdr) + opt->optlen);
+ else
+ iph=(struct iphdr *)skb_put(skb,sizeof(struct iphdr));
iph->version = 4;
iph->ihl = 5;
iph->protocol = type;
skb->ip_hdr = iph;
- return(20 + tmp); /* IP header plus MAC header size */
+ if (!opt || !opt->optlen)
+ return sizeof(struct iphdr) + tmp;
+ if (opt->is_strictroute && rt && rt->rt_gateway) {
+ ip_statistics.IpOutNoRoutes++;
+ return -ENETUNREACH;
+ }
+ iph->ihl += opt->optlen>>2;
+ ip_options_build(skb, opt, final_daddr, (*dev)->pa_addr, 0);
+ return iph->ihl*4 + tmp;
}
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
}
+
/************************ Fragment Handlers From NET2E **********************************/
}
offset <<= 3; /* offset is in 8-byte chunks */
+ ihl = iph->ihl * 4;
/*
* If the queue already existed, keep restarting its timer as long
if (qp != NULL)
{
+ /* ANK. If the first fragment is received,
+ * we should remember the correct IP header (with options)
+ */
+ if (offset == 0)
+ {
+ qp->ihlen = ihl;
+ memcpy(qp->iph, iph, ihl+8);
+ }
del_timer(&qp->timer);
qp->timer.expires = jiffies + IP_FRAG_TIME; /* about 30 seconds */
qp->timer.data = (unsigned long) qp; /* pointer to queue */
* Determine the position of this fragment.
*/
- ihl = iph->ihl * 4;
end = offset + ntohs(iph->tot_len) - ihl;
/*
*
* Yes this is inefficient, feel free to submit a quicker one.
*
- * **Protocol Violation**
- * We copy all the options to each fragment. !FIXME!
*/
-void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag)
+static void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag)
{
struct iphdr *iph;
unsigned char *raw;
if (ntohs(iph->frag_off) & IP_DF)
{
- /*
- * Reply giving the MTU of the failed hop.
- */
ip_statistics.IpFragFails++;
- icmp_send(skb,ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, dev->mtu, dev);
+ printk("ip_queue_xmit: frag needed\n");
return;
}
iph = (struct iphdr *)(skb2->h.raw/*+dev->hard_header_len*/);
iph->frag_off = htons((offset >> 3));
skb2->ip_hdr = iph;
+
+ /* ANK: dirty, but effective trick. Upgrade options only if
+ * the segment to be fragmented was THE FIRST (otherwise,
+ * options are already fixed) and make it ONCE
+ * on the initial skb, so that all the following fragments
+ * will inherit fixed options.
+ */
+ if (offset == 0)
+ ip_options_fragment(skb);
+
/*
* Added AC : If we are fragmenting a fragment thats not the
* last fragment then keep MF on each bit
* Forward an IP datagram to its next destination.
*/
-int ip_forward(struct sk_buff *skb, struct device *dev, int is_frag, unsigned long target_addr, int target_strict)
+int ip_forward(struct sk_buff *skb, struct device *dev, int is_frag,
+ __u32 target_addr)
{
struct device *dev2; /* Output device */
struct iphdr *iph; /* Our header */
struct rtable *rt; /* Route we use */
unsigned char *ptr; /* Data pointer */
unsigned long raddr; /* Router IP address */
+ struct options * opt = (struct options*)skb->proto_priv;
#ifdef CONFIG_IP_FIREWALL
int fw_res = 0; /* Forwarding result */
#ifdef CONFIG_IP_MASQUERADE
/*
* Strict routing permits no gatewaying
*/
-
- if(target_strict)
+
+ if (opt->is_strictroute)
{
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_SR_FAILED, 0, dev);
return -1;
* Gateways cannot in turn be gatewayed.
*/
+#if 0
rt = ip_rt_route(raddr, NULL, NULL);
if (rt == NULL)
{
}
if (rt->rt_gateway != 0)
raddr = rt->rt_gateway;
+#endif
}
else
raddr = target_addr;
* we calculated.
*/
#ifndef CONFIG_IP_NO_ICMP_REDIRECT
- if (dev == dev2 && !((iph->saddr^iph->daddr)&dev->pa_mask) && (rt->rt_flags&RTF_MODIFIED))
+ if (dev == dev2 && !((iph->saddr^iph->daddr)&dev->pa_mask) &&
+ (rt->rt_flags&RTF_MODIFIED) && !opt->srr)
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, raddr, dev);
#endif
#endif
IS_SKB(skb);
+ if (skb->len > dev2->mtu && (ntohs(iph->frag_off) & IP_DF)) {
+ ip_statistics.IpFragFails++;
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, dev2->mtu, dev);
+ return -1;
+ }
+
if(skb_headroom(skb)<dev2->hard_header_len)
{
skb2 = alloc_skb(dev2->hard_header_len + skb->len + 15, GFP_ATOMIC);
* Copy the packet data into the new buffer.
*/
memcpy(ptr, skb->h.raw, skb->len);
+ memcpy(skb2->proto_priv, skb->proto_priv, sizeof(skb->proto_priv));
+ iph = skb2->ip_hdr = skb2->h.iph;
}
else
{
}
ip_statistics.IpForwDatagrams++;
}
+
+ if (opt->optlen) {
+ unsigned char * optptr;
+ if (opt->rr_needaddr) {
+ optptr = (unsigned char *)iph + opt->rr;
+ memcpy(&optptr[optptr[2]-5], &dev2->pa_addr, 4);
+ opt->is_changed = 1;
+ }
+ if (opt->srr_is_hit) {
+ int srrptr, srrspace;
+
+ optptr = (unsigned char *)iph + opt->srr;
+
+ for ( srrptr=optptr[2], srrspace = optptr[1];
+ srrptr <= srrspace;
+ srrptr += 4
+ ) {
+ if (srrptr + 3 > srrspace)
+ break;
+ if (memcmp(&target_addr, &optptr[srrptr-1], 4) == 0)
+ break;
+ }
+ if (srrptr + 3 <= srrspace) {
+ opt->is_changed = 1;
+ memcpy(&optptr[srrptr-1], &dev2->pa_addr, 4);
+ iph->daddr = target_addr;
+ optptr[2] = srrptr+4;
+ } else
+ printk("ip_forward(): Argh! Destination lost!\n");
+ }
+ if (opt->ts_needaddr) {
+ optptr = (unsigned char *)iph + opt->ts;
+ memcpy(&optptr[optptr[2]-9], &dev2->pa_addr, 4);
+ opt->is_changed = 1;
+ }
+ if (opt->is_changed) {
+ opt->is_changed = 0;
+ ip_send_check(iph);
+ }
+ }
+/*
+ * ANK: this is point of "no return", we cannot send an ICMP,
+ * because we changed SRR option.
+ */
+
/*
* See if it needs fragmenting. Note in ip_rcv we tagged
* the fragment type. This must be right so that
#endif
+
/*
* This function receives all incoming IP datagrams.
*
unsigned char flag = 0;
struct inet_protocol *ipprot;
int brd=IS_MYADDR;
- unsigned long target_addr;
- int target_strict=0;
+ struct options * opt = NULL;
int is_frag=0;
#ifdef CONFIG_IP_FIREWALL
int err;
*/
skb_trim(skb,ntohs(iph->tot_len));
+
+ if (iph->ihl > 5) {
+ skb->ip_summed = 0;
+ if (ip_options_compile(NULL, skb))
+ return(0);
+ opt = (struct options*)skb->proto_priv;
+#ifdef CONFIG_IP_NOSR
+ if (opt->srr) {
+ kfree_skb(skb, FREE_READ);
+ return -EINVAL;
+ }
+#endif
+ }
/*
* See if the firewall wants to dispose of the packet.
#endif
-
- /*
- * Next analyse the packet for options. Studies show under one packet in
- * a thousand have options....
- */
-
- target_addr = iph->daddr;
-
- if (iph->ihl != 5)
- {
- /* Humph.. options. Lots of annoying fiddly bits */
-
- /*
- * This is straight from the RFC. It might even be right ;)
- *
- * RFC 1122: 3.2.1.8 STREAMID option is obsolete and MUST be ignored.
- * RFC 1122: 3.2.1.8 MUST NOT crash on a zero length option.
- * RFC 1122: 3.2.1.8 MUST support acting as final destination of a source route.
- */
-
- int opt_space=4*(iph->ihl-5);
- int opt_size;
- unsigned char *opt_ptr=skb->h.raw+sizeof(struct iphdr);
-
- skb->ip_summed=0; /* Our free checksum is bogus for this case */
-
- while(opt_space>0)
- {
- if(*opt_ptr==IPOPT_NOOP)
- {
- opt_ptr++;
- opt_space--;
- continue;
- }
- if(*opt_ptr==IPOPT_END)
- break; /* Done */
- if(opt_space<2 || (opt_size=opt_ptr[1])<2 || opt_ptr[1]>opt_space)
- {
- /*
- * RFC 1122: 3.2.2.5 SHOULD send parameter problem reports.
- */
- icmp_send(skb, ICMP_PARAMETERPROB, 0, 0, skb->dev);
- kfree_skb(skb, FREE_READ);
- return -EINVAL;
- }
- switch(opt_ptr[0])
- {
- case IPOPT_SEC:
- /* Should we drop this ?? */
- break;
- case IPOPT_SSRR: /* These work almost the same way */
- target_strict=1;
- /* Fall through */
- case IPOPT_LSRR:
-#ifdef CONFIG_IP_NOSR
- kfree_skb(skb, FREE_READ);
- return -EINVAL;
-#endif
- case IPOPT_RR:
- /*
- * RFC 1122: 3.2.1.8 Support for RR is OPTIONAL.
- */
- if (iph->daddr!=skb->dev->pa_addr && (brd = ip_chk_addr(iph->daddr)) == 0)
- break;
- if((opt_size<3) || ( opt_ptr[0]==IPOPT_RR && opt_ptr[2] > opt_size-4 ))
- {
- if(ip_chk_addr(iph->daddr))
- icmp_send(skb, ICMP_PARAMETERPROB, 0, 0, skb->dev);
- kfree_skb(skb, FREE_READ);
- return -EINVAL;
- }
- if(opt_ptr[2] > opt_size-4 )
- break;
- /* Bytes are [IPOPT_xxRR][Length][EntryPointer][Entry0][Entry1].... */
- /* This isn't going to be too portable - FIXME */
- if(opt_ptr[0]!=IPOPT_RR)
- {
- int t;
- target_addr=*(u32 *)(&opt_ptr[opt_ptr[2]]); /* Get hop */
- t=ip_chk_addr(target_addr);
- if(t==IS_MULTICAST||t==IS_BROADCAST)
- {
- if(ip_chk_addr(iph->daddr))
- icmp_send(skb, ICMP_PARAMETERPROB, 0, 0, skb->dev);
- kfree_skb(skb,FREE_READ);
- return -EINVAL;
- }
- }
- *(u32 *)(&opt_ptr[opt_ptr[2]])=skb->dev->pa_addr; /* Record hop */
- break;
- case IPOPT_TIMESTAMP:
- /*
- * RFC 1122: 3.2.1.8 The timestamp option is OPTIONAL but if implemented
- * MUST meet various rules (read the spec).
- */
- NETDEBUG(printk("ICMP: Someone finish the timestamp routine ;)\n"));
- break;
- default:
- break;
- }
- opt_ptr+=opt_size;
- opt_space-=opt_size;
- }
-
- }
-
-
/*
* Remember if the frame is fragmented.
*/
if ( iph->daddr == skb->dev->pa_addr || (brd = ip_chk_addr(iph->daddr)) != 0)
{
+ if (opt && opt->srr) {
+ int srrspace, srrptr;
+ __u32 nexthop;
+ unsigned char * optptr = ((unsigned char *)iph) + opt->srr;
+
+ if (brd != IS_MYADDR || skb->pkt_type != PACKET_HOST) {
+ kfree_skb(skb, FREE_WRITE);
+ return 0;
+ }
+
+ for ( srrptr=optptr[2], srrspace = optptr[1];
+ srrptr <= srrspace;
+ srrptr += 4
+ ) {
+ int brd2;
+ if (srrptr + 3 > srrspace) {
+ icmp_send(skb, ICMP_PARAMETERPROB, 0, opt->srr+2,
+ skb->dev);
+ kfree_skb(skb, FREE_WRITE);
+ return 0;
+ }
+ memcpy(&nexthop, &optptr[srrptr-1], 4);
+ if ((brd2 = ip_chk_addr(nexthop)) == 0)
+ break;
+ if (brd2 != IS_MYADDR) {
+/* ANK: should we implement weak tunneling of multicasts?
+ * Are they obsolete? DVMRP specs (RFC-1075) is old enough...
+ */
+ kfree_skb(skb, FREE_WRITE);
+ return -EINVAL;
+ }
+ }
+ if (srrptr <= srrspace) {
+ opt->srr_is_hit = 1;
+ opt->is_changed = 1;
+#ifdef CONFIG_IP_FORWARD
+ if (ip_forward(skb, dev, is_frag, nexthop))
+ kfree_skb(skb, FREE_WRITE);
+#else
+ ip_statistics.IpInAddrErrors++;
+ kfree_skb(skb, FREE_WRITE);
+#endif
+ return 0;
+ }
+ }
+
#ifdef CONFIG_IP_MULTICAST
if(!(dev->flags&IFF_ALLMULTI) && brd==IS_MULTICAST && iph->daddr!=IGMP_ALL_HOSTS && !(dev->flags&IFF_LOOPBACK))
{
if (ip_fw_demasquerade(skb))
{
struct iphdr *iph=skb->h.iph;
- if(ip_forward(skb, dev, is_frag|4, iph->daddr, 0))
+ if (ip_forward(skb, dev, is_frag|4, iph->daddr))
kfree_skb(skb, FREE_WRITE);
return(0);
}
* check the protocol handler's return values here...
*/
- ipprot->handler(skb2, dev, NULL, iph->daddr,
+ ipprot->handler(skb2, dev, opt, iph->daddr,
(ntohs(iph->tot_len) - (iph->ihl * 4)),
iph->saddr, 0, ipprot);
-
}
/*
*/
#ifdef CONFIG_IP_FORWARD
- if(ip_forward(skb, dev, is_frag, target_addr, target_strict))
+ if (opt && opt->is_strictroute) {
+ icmp_send(skb, ICMP_PARAMETERPROB, 0, 16, skb->dev);
+ kfree_skb(skb, FREE_WRITE);
+ return -1;
+ }
+ if (ip_forward(skb, dev, is_frag, iph->daddr))
kfree_skb(skb, FREE_WRITE);
#else
/* printk("Machine %lx tried to use us as a forwarder to %lx but we have forwarding disabled!\n",
* Add the rest of the data space.
*/
newskb->ip_hdr=(struct iphdr *)skb_put(newskb, len);
+ memcpy(newskb->proto_priv, skb->proto_priv, sizeof(skb->proto_priv));
+
/*
* Copy the data
*/
switch(optname)
{
+ case IP_OPTIONS:
+ {
+ struct options * opt = NULL;
+ struct options * old_opt;
+ if (optlen > 40 || optlen < 0)
+ return -EINVAL;
+ err = verify_area(VERIFY_READ, optval, optlen);
+ if (err)
+ return err;
+ opt = kmalloc(sizeof(struct options)+((optlen+3)&~3), GFP_KERNEL);
+ if (!opt)
+ return -ENOMEM;
+ memset(opt, 0, sizeof(struct options));
+ if (optlen)
+ memcpy_fromfs(opt->__data, optval, optlen);
+ while (optlen & 3)
+ opt->__data[optlen++] = IPOPT_END;
+ opt->optlen = optlen;
+ opt->is_data = 1;
+ opt->is_setbyuser = 1;
+ if (optlen && ip_options_compile(opt, NULL)) {
+ kfree_s(opt, sizeof(struct options) + optlen);
+ return -EINVAL;
+ }
+ /*
+ * ANK: I'm afraid that receive handler may change
+ * options from under us.
+ */
+ cli();
+ old_opt = sk->opt;
+ sk->opt = opt;
+ sti();
+ if (old_opt)
+ kfree_s(old_opt, sizeof(struct optlen) + old_opt->optlen);
+ return 0;
+ }
case IP_TOS:
if(val<0||val>255)
return -EINVAL;
switch(optname)
{
+ case IP_OPTIONS:
+ {
+ unsigned char optbuf[sizeof(struct options)+40];
+ struct options * opt = (struct options*)optbuf;
+ err = verify_area(VERIFY_WRITE, optlen, sizeof(int));
+ if (err)
+ return err;
+ cli();
+ opt->optlen = 0;
+ if (sk->opt)
+ memcpy(optbuf, sk->opt, sizeof(struct options)+sk->opt->optlen);
+ sti();
+ if (opt->optlen == 0) {
+ put_fs_long(0,(unsigned long *) optlen);
+ return 0;
+ }
+ err = verify_area(VERIFY_WRITE, optval, opt->optlen);
+ if (err)
+ return err;
+/*
+ * Now we should undo all the changes done by ip_options_compile().
+ */
+ if (opt->srr) {
+ unsigned char * optptr = opt->__data+opt->srr-sizeof(struct iphdr);
+ memmove(optptr+7, optptr+4, optptr[1]-7);
+ memcpy(optptr+3, &opt->faddr, 4);
+ }
+ if (opt->rr_needaddr) {
+ unsigned char * optptr = opt->__data+opt->rr-sizeof(struct iphdr);
+ memset(&optptr[optptr[2]-1], 0, 4);
+ optptr[2] -= 4;
+ }
+ if (opt->ts) {
+ unsigned char * optptr = opt->__data+opt->ts-sizeof(struct iphdr);
+ if (opt->ts_needtime) {
+ memset(&optptr[optptr[2]-1], 0, 4);
+ optptr[2] -= 4;
+ }
+ if (opt->ts_needaddr) {
+ memset(&optptr[optptr[2]-1], 0, 4);
+ optptr[2] -= 4;
+ }
+ }
+ put_fs_long(opt->optlen, (unsigned long *) optlen);
+ memcpy_tofs(optval, opt->__data, opt->optlen);
+ }
+ return 0;
case IP_TOS:
val=sk->ip_tos;
break;
const void *frag,
unsigned short int length,
__u32 daddr,
+ __u32 user_saddr,
+ struct options * opt,
int flags,
int type)
{
int local=0;
struct device *dev;
int nfrags=0;
+ __u32 true_daddr = daddr;
+
+ if (opt && opt->srr && !sk->ip_hdrincl)
+ daddr = opt->faddr;
ip_statistics.IpOutRequests++;
*/
saddr=sk->ip_route_saddr;
- if(!rt || sk->ip_route_stamp != rt_stamp || daddr!=sk->ip_route_daddr || sk->ip_route_local!=local || sk->saddr!=sk->ip_route_saddr)
+ if(!rt || sk->ip_route_stamp != rt_stamp ||
+ daddr!=sk->ip_route_daddr || sk->ip_route_local!=local ||
+ (sk->saddr && sk->saddr != saddr))
{
if(local)
rt = ip_rt_local(daddr, NULL, &saddr);
#ifdef CONFIG_IP_MULTICAST
}
#endif
+ if (user_saddr)
+ saddr = user_saddr;
/*
* Now compute the buffer space we require
* Try the simple case first. This leaves broadcast, multicast, fragmented frames, and by
* choice RAW frames within 20 bytes of maximum size(rare) to the long path
*/
-
- if(length+20 <= dev->mtu && !MULTICAST(daddr) && daddr!=0xFFFFFFFF && daddr!=dev->pa_brdaddr)
+
+ length += 20;
+ if (!sk->ip_hdrincl && opt) {
+ length += opt->optlen;
+ if (opt->is_strictroute && rt && rt->rt_gateway) {
+ ip_statistics.IpOutNoRoutes++;
+ return -ENETUNREACH;
+ }
+ }
+ if(length <= dev->mtu && !MULTICAST(daddr) && daddr!=0xFFFFFFFF && daddr!=dev->pa_brdaddr)
{
int error;
- struct sk_buff *skb=sock_alloc_send_skb(sk, length+20+15+dev->hard_header_len,0, 0,&error);
+ struct sk_buff *skb=sock_alloc_send_skb(sk, length+15+dev->hard_header_len,0, 0,&error);
if(skb==NULL)
{
ip_statistics.IpOutDiscards++;
skb->sk=sk;
skb->arp=0;
skb->saddr=saddr;
- length+=20; /* We do this twice so the subtract once is quicker */
skb->raddr=(rt&&rt->rt_gateway)?rt->rt_gateway:daddr;
skb_reserve(skb,(dev->hard_header_len+15)&~15);
if(sk->ip_hcache_state>0)
iph->protocol=type;
iph->saddr=saddr;
iph->daddr=daddr;
+ if (opt) {
+ iph->ihl += opt->optlen>>2;
+ ip_options_build(skb, opt,
+ true_daddr, dev->pa_addr, 0);
+ }
iph->check=0;
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
- getfrag(frag,saddr,(void *)(iph+1),0, length-20);
+ getfrag(frag,saddr,((char *)iph)+iph->ihl*4,0, length-iph->ihl*4);
}
else
getfrag(frag,saddr,(void *)iph,0,length-20);
}
return 0;
}
-
-
- fragheaderlen = dev->hard_header_len;
- if(!sk->ip_hdrincl)
- fragheaderlen += 20;
+ length-=20;
+ if (sk && !sk->ip_hdrincl && opt) {
+ length -= opt->optlen;
+ fragheaderlen = dev->hard_header_len + sizeof(struct iphdr) + opt->optlen;
+ maxfraglen = ((dev->mtu-sizeof(struct iphdr)-opt->optlen) & ~7) + fragheaderlen;
+ } else {
+ fragheaderlen = dev->hard_header_len;
+ if(!sk->ip_hdrincl)
+ fragheaderlen += 20;
/*
* Fragheaderlen is the size of 'overhead' on each buffer. Now work
* out the size of the frames to send.
*/
- maxfraglen = ((dev->mtu-20) & ~7) + fragheaderlen;
+ maxfraglen = ((dev->mtu-20) & ~7) + fragheaderlen;
+ }
/*
* Start at the end of the frame by handling the remainder.
iph->version = 4;
iph->ihl = 5; /* ugh */
+ if (opt) {
+ iph->ihl += opt->optlen>>2;
+ ip_options_build(skb, opt,
+ true_daddr, dev->pa_addr, offset);
+ }
iph->tos = sk->ip_tos;
iph->tot_len = htons(fraglen - fragheaderlen + iph->ihl*4);
iph->id = id;
#include <linux/version.h>
static char kernel_version[] = UTS_RELEASE;
-
#else
#define MOD_INC_USE_COUNT
#define MOD_DEC_USE_COUNT
skb->h.iph=(struct iphdr *)skb->data;
skb->ip_hdr=(struct iphdr *)skb->data;
+ memset(skb->proto_priv, 0, sizeof(struct options));
+ if (skb->ip_hdr->ihl > 5) {
+ if (ip_options_compile(NULL, skb))
+ return 0;
+ }
#ifdef CONFIG_IP_FIREWALL
/*
* Feed to IP forward.
*/
- if(ip_forward(skb, dev, 0, daddr, 0))
+ if(ip_forward(skb, dev, 0, daddr))
kfree_skb(skb, FREE_READ);
MOD_DEC_USE_COUNT;
return(0);
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
+ *
+ *
+ * Fixes:
+ * Michael Chastain : Incorrect size of copying.
+ *
+ *
+ * Status:
+ * Tree building works. Cache manager to be added next.
*/
#include <asm/system.h>
err=verify_area(VERIFY_WRITE, (void *)arg, sizeof(vr));
if(err)
return err;
- memcpy_fromfs(&vr,(void *)arg,sizeof(sr));
+ memcpy_fromfs(&vr,(void *)arg,sizeof(vr));
if(vr.vifi>=MAXVIFS)
return -EINVAL;
vif=&vif_table[vr.vifi];
vr.ocount=vif->pkt_out;
vr.ibytes=vif->bytes_in;
vr.obytes=vif->bytes_out;
- memcpy_tofs((void *)arg,&vr,sizeof(sr));
+ memcpy_tofs((void *)arg,&vr,sizeof(vr));
return 0;
}
return -EADDRNOTAVAIL;
return -EACCES;
if(sk->ip_hdrincl)
- err=ip_build_xmit(sk, raw_getrawfrag, from, len, sin.sin_addr.s_addr, flags, sin.sin_port);
+ {
+ if(len>65535)
+ return -EMSGSIZE;
+ err=ip_build_xmit(sk, raw_getrawfrag, from, len, sin.sin_addr.s_addr, 0, sk->opt, flags, sin.sin_port);
+ }
else
- err=ip_build_xmit(sk, raw_getfrag, from, len, sin.sin_addr.s_addr, flags, sin.sin_port);
+ {
+ if(len>65535-sizeof(struct iphdr))
+ return -EMSGSIZE;
+ err=ip_build_xmit(sk, raw_getfrag, from, len, sin.sin_addr.s_addr, 0, sk->opt, flags, sin.sin_port);
+ }
return err<0?err:len;
}
}
memcpy(newsk, sk, sizeof(*newsk));
+ newsk->opt = NULL;
+ if (opt && opt->optlen) {
+ sk->opt = (struct options*)kmalloc(sizeof(struct options)+opt->optlen, GFP_ATOMIC);
+ if (!sk->opt) {
+ kfree_s(newsk, sizeof(struct sock));
+ tcp_statistics.TcpAttemptFails++;
+ kfree_skb(skb, FREE_READ);
+ return;
+ }
+ if (ip_options_echo(sk->opt, opt, daddr, saddr, skb)) {
+ kfree_s(sk->opt, sizeof(struct options)+opt->optlen);
+ kfree_s(newsk, sizeof(struct sock));
+ tcp_statistics.TcpAttemptFails++;
+ kfree_skb(skb, FREE_READ);
+ return;
+ }
+ }
skb_queue_head_init(&newsk->write_queue);
skb_queue_head_init(&newsk->receive_queue);
newsk->send_head = NULL;
* Put in the IP header and routing stuff.
*/
- rt=ip_rt_route(sk->daddr, NULL, NULL);
-
+ if (sk->localroute)
+ rt=ip_rt_local(sk->daddr, NULL, sk->saddr ? NULL : &sk->saddr);
+ else
+ rt=ip_rt_route(sk->daddr, NULL, sk->saddr ? NULL : &sk->saddr);
/*
* We need to build the routing stuff from the things saved in skb.
*/
if(saddr==th_cache_saddr && daddr==th_cache_daddr && th->dest==th_cache_dport && th->source==th_cache_sport)
+ {
sk=(struct sock *)th_cache_sk;
+ /*
+ * We think this is causing the bug so
+ */
+ if(sk!=get_sock(&tcp_prot,th->dest, saddr, th->source, daddr))
+ printk("Cache mismatch on TCP.\n");
+ }
else
{
sk = get_sock(&tcp_prot, th->dest, saddr, th->source, daddr);
/*
* Send UDP frames.
*/
-
+
static int udp_send(struct sock *sk, struct sockaddr_in *sin,
- const unsigned char *from, int len, int rt)
+ const unsigned char *from, int len, int rt,
+ __u32 saddr)
{
int ulen = len + sizeof(struct udphdr);
int a;
struct udpfakehdr ufh;
+
+ if(ulen>65535-sizeof(struct iphdr))
+ return -EMSGSIZE;
ufh.uh.source = sk->dummy_th.source;
ufh.uh.dest = sin->sin_port;
if(sk->no_check)
a = ip_build_xmit(sk, udp_getfrag_nosum, &ufh, ulen,
- sin->sin_addr.s_addr, rt, IPPROTO_UDP);
+ sin->sin_addr.s_addr, saddr, sk->opt, rt, IPPROTO_UDP);
else
a = ip_build_xmit(sk, udp_getfrag, &ufh, ulen,
- sin->sin_addr.s_addr, rt, IPPROTO_UDP);
+ sin->sin_addr.s_addr, saddr, sk->opt, rt, IPPROTO_UDP);
if(a<0)
return a;
udp_statistics.UdpOutDatagrams++;
{
struct sockaddr_in sin;
int tmp;
+ __u32 saddr=0;
/*
* Check the flags. We support no flags for UDP sending
*/
+
if (flags&~MSG_DONTROUTE)
return(-EINVAL);
/*
sk->inuse = 1;
/* Send the packet. */
- tmp = udp_send(sk, usin, from, len, flags);
+ tmp = udp_send(sk, usin, from, len, flags, saddr);
/* The datagram has been sent off. Release the socket. */
release_sock(sk);
O_TARGET := netrom.o
O_OBJS := af_netrom.o
-ifdef CONFIG_AX25
+ifdef CONFIG_NETROM
O_OBJS += nr_dev.o nr_in.o nr_out.o nr_route.o nr_subr.o nr_timer.o
endif
* Alan(GW4PTS) Trivial tweaks into new format.
* NET/ROM 003 Jonathan(G4KLX) Added G8BPQ extensions.
* Added NET/ROM routing ioctl.
- *
- * To do:
- * Fix non-blocking connect failure.
+ * Darryl(G7LED) Fix autobinding (on connect).
+ * Fixed nr_release(), set TCP_CLOSE, wakeup app
+ * context, THEN make the sock dead.
+ * Circuit ID check before allocating it on
+ * a connection.
*/
#include <linux/config.h>
#include <net/ip.h>
#include <net/arp.h>
#include <linux/if_arp.h>
-#include <linux/proc_fs.h>
/************************************************************************\
* *
struct sock *sk;
nr_cb *nr;
+ if (sock->type != SOCK_SEQPACKET || protocol != 0)
+ return -ESOCKTNOSUPPORT;
+
if ((sk = (struct sock *)kmalloc(sizeof(*sk), GFP_ATOMIC)) == NULL)
return -ENOMEM;
return -ENOMEM;
}
- sk->type = sock->type;
-
- switch (sock->type) {
- case SOCK_SEQPACKET:
- break;
- default:
- kfree_s((void *)sk, sizeof(*sk));
- kfree_s((void *)nr, sizeof(*nr));
- return -ESOCKTNOSUPPORT;
- }
-
skb_queue_head_init(&sk->receive_queue);
skb_queue_head_init(&sk->write_queue);
skb_queue_head_init(&sk->back_log);
init_timer(&sk->timer);
sk->socket = sock;
+ sk->type = sock->type;
sk->protocol = protocol;
sk->dead = 0;
sk->next = NULL;
sk->rmem_alloc = 0;
sk->inuse = 0;
sk->debug = 0;
+ sk->destroy = 0;
sk->prot = NULL; /* So we use default free mechanisms */
sk->err = 0;
sk->localroute = 0;
nr->my_index = 0;
nr->my_id = 0;
- nr->rtt = nr_default.timeout;
+ nr->rtt = nr_default.timeout / 2;
nr->t1 = nr_default.timeout;
nr->t2 = nr_default.ack_delay;
nr->n2 = nr_default.tries;
struct sock *sk;
nr_cb *nr;
+ if (osk->type != SOCK_SEQPACKET)
+ return NULL;
+
if ((sk = (struct sock *)kmalloc(sizeof(*sk), GFP_ATOMIC)) == NULL)
return NULL;
return NULL;
}
- sk->type = osk->type;
- sk->socket = osk->socket;
-
- switch (osk->type) {
- case SOCK_SEQPACKET:
- break;
- default:
- kfree_s((void *)sk, sizeof(*sk));
- kfree_s((void *)nr, sizeof(*nr));
- return NULL;
- }
-
skb_queue_head_init(&sk->receive_queue);
skb_queue_head_init(&sk->write_queue);
skb_queue_head_init(&sk->back_log);
init_timer(&sk->timer);
+ sk->type = osk->type;
+ sk->socket = osk->socket;
sk->dead = 0;
sk->next = NULL;
sk->priority = osk->priority;
sk->rmem_alloc = 0;
sk->inuse = 0;
sk->ack_backlog = 0;
+ sk->destroy = 0;
sk->prot = NULL; /* So we use default free mechanisms */
sk->err = 0;
sk->localroute = 0;
if (sk->type == SOCK_SEQPACKET) {
switch (sk->nr->state) {
case NR_STATE_0:
- sk->dead = 1;
+ sk->state = TCP_CLOSE;
sk->state_change(sk);
+ sk->dead = 1;
nr_destroy_socket(sk);
break;
case NR_STATE_1:
sk->nr->state = NR_STATE_0;
- sk->dead = 1;
+ sk->state = TCP_CLOSE;
sk->state_change(sk);
+ sk->dead = 1;
nr_destroy_socket(sk);
break;
case NR_STATE_2:
nr_write_internal(sk, NR_DISCACK);
sk->nr->state = NR_STATE_0;
- sk->dead = 1;
+ sk->state = TCP_CLOSE;
sk->state_change(sk);
+ sk->dead = 1;
nr_destroy_socket(sk);
break;
sk->nr->t2timer = 0;
sk->nr->t4timer = 0;
sk->nr->state = NR_STATE_2;
+ sk->state = TCP_CLOSE;
sk->state_change(sk);
sk->dead = 1;
+ sk->destroy = 1;
break;
default:
break;
}
} else {
- sk->dead = 1;
+ sk->state = TCP_CLOSE;
sk->state_change(sk);
+ sk->dead = 1;
nr_destroy_socket(sk);
}
sock->data = NULL;
+ sk->socket = NULL; /* Not used, but we should do this. **/
return 0;
}
memcpy(&sk->nr->user_addr, user, sizeof(ax25_address));
memcpy(&sk->nr->source_addr, source, sizeof(ax25_address));
+ sk->nr->device = dev;
+
nr_insert_socket(sk); /* Finish the bind */
}
-
+
memcpy(&sk->nr->dest_addr, &addr->sax25_call, sizeof(ax25_address));
+ while (nr_find_socket((unsigned char)circuit / 256, (unsigned char)circuit % 256, SOCK_SEQPACKET) != NULL)
+ circuit++;
+
sk->nr->my_index = circuit / 256;
sk->nr->my_id = circuit % 256;
sk = nr_find_listener(dest, SOCK_SEQPACKET);
+ user = (ax25_address *)(skb->data + 21);
+
if (sk == NULL || sk->ack_backlog == sk->max_ack_backlog || (make = nr_make_new(sk)) == NULL) {
nr_transmit_dm(skb);
return 0;
}
- user = (ax25_address *)(skb->data + 21);
window = skb->data[20];
skb->sk = make;
memcpy(&make->nr->source_addr, dest, sizeof(ax25_address));
memcpy(&make->nr->dest_addr, src, sizeof(ax25_address));
memcpy(&make->nr->user_addr, user, sizeof(ax25_address));
-
+
make->nr->your_index = circuit_index;
make->nr->your_id = circuit_id;
if (sk->debug)
printk("NET/ROM: sendto: building packet.\n");
- size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 3 + NR_NETWORK_LEN + NR_TRANSPORT_LEN;
+ size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + NR_NETWORK_LEN + NR_TRANSPORT_LEN;
if ((skb = sock_alloc_send_skb(sk, size, 0, 0, &err)) == NULL)
return err;
{
struct iovec iov;
struct msghdr msg;
- iov.iov_base=(void *)ubuf;
- iov.iov_len=size;
- msg.msg_name=(void *)sa;
- msg.msg_namelen=addr_len;
- msg.msg_accrights=NULL;
- msg.msg_iov=&iov;
- msg.msg_iovlen=1;
- return nr_sendmsg(sock,&msg,size,noblock,flags);
+
+ iov.iov_base = (void *)ubuf;
+ iov.iov_len = size;
+
+ msg.msg_name = (void *)sa;
+ msg.msg_namelen = addr_len;
+ msg.msg_accrights = NULL;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ return nr_sendmsg(sock, &msg, size, noblock, flags);
}
static int nr_send(struct socket *sock, const void *ubuf, int size, int noblock, unsigned flags)
{
struct iovec iov;
struct msghdr msg;
- iov.iov_base=ubuf;
- iov.iov_len=size;
- msg.msg_name=(void *)sa;
- msg.msg_namelen=0;
+
+ iov.iov_base = ubuf;
+ iov.iov_len = size;
+
+ msg.msg_name = (void *)sa;
+ msg.msg_namelen = 0;
if (addr_len)
msg.msg_namelen = *addr_len;
- msg.msg_accrights=NULL;
- msg.msg_iov=&iov;
- msg.msg_iovlen=1;
- return nr_recvmsg(sock,&msg,size,noblock,flags,addr_len);
+ msg.msg_accrights = NULL;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+
+ return nr_recvmsg(sock, &msg, size, noblock, flags, addr_len);
}
dev->stop = nr_close;
dev->hard_header = nr_header;
- dev->hard_header_len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 3 + NR_NETWORK_LEN + NR_TRANSPORT_LEN;
+ dev->hard_header_len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + NR_NETWORK_LEN + NR_TRANSPORT_LEN;
dev->addr_len = AX25_ADDR_LEN;
dev->type = ARPHRD_NETROM;
dev->rebuild_header = nr_rebuild_header;
* History
* NET/ROM 001 Jonathan(G4KLX) Cloned from ax25_in.c
* NET/ROM 003 Jonathan(G4KLX) Added NET/ROM fragment reception.
+ * Darryl(G7LED) Added missing INFO with NAK case, optimized
+ * INFOACK handling, removed reconnect on error.
*/
#include <linux/config.h>
case NR_DISCREQ:
nr_write_internal(sk, NR_DISCACK);
- break;
case NR_DISCACK:
sk->nr->state = NR_STATE_0;
case NR_CONNREQ:
nr_write_internal(sk, NR_CONNACK);
- sk->nr->condition = 0x00;
- sk->nr->t1timer = 0;
- sk->nr->t2timer = 0;
- sk->nr->t4timer = 0;
- sk->nr->vs = 0;
- sk->nr->va = 0;
- sk->nr->vr = 0;
- sk->nr->vl = 0;
break;
case NR_DISCREQ:
case NR_INFOACK:
case NR_INFOACK | NR_CHOKE_FLAG:
+ case NR_INFOACK | NR_NAK_FLAG:
+ case NR_INFOACK | NR_NAK_FLAG | NR_CHOKE_FLAG:
if (frametype & NR_CHOKE_FLAG) {
sk->nr->condition |= PEER_RX_BUSY_CONDITION;
sk->nr->t4timer = nr_default.busy_delay;
sk->nr->t4timer = 0;
}
if (!nr_validate_nr(sk, nr)) {
- nr_nr_error_recovery(sk);
- sk->nr->state = NR_STATE_1;
break;
}
- if (sk->nr->condition & PEER_RX_BUSY_CONDITION) {
- nr_frames_acked(sk, nr);
- } else {
- nr_check_iframes_acked(sk, nr);
- }
- break;
-
- case NR_INFOACK | NR_NAK_FLAG:
- case NR_INFOACK | NR_NAK_FLAG | NR_CHOKE_FLAG:
- if (frametype & NR_CHOKE_FLAG) {
- sk->nr->condition |= PEER_RX_BUSY_CONDITION;
- sk->nr->t4timer = nr_default.busy_delay;
- } else {
- sk->nr->condition &= ~PEER_RX_BUSY_CONDITION;
- sk->nr->t4timer = 0;
- }
- if (nr_validate_nr(sk, nr)) {
+ if (frametype & NR_NAK_FLAG) {
nr_frames_acked(sk, nr);
nr_send_nak_frame(sk);
} else {
- nr_nr_error_recovery(sk);
- sk->nr->state = NR_STATE_1;
+ if (sk->nr->condition & PEER_RX_BUSY_CONDITION) {
+ nr_frames_acked(sk, nr);
+ } else {
+ nr_check_iframes_acked(sk, nr);
+ }
}
break;
case NR_INFO:
+ case NR_INFO | NR_NAK_FLAG:
case NR_INFO | NR_CHOKE_FLAG:
case NR_INFO | NR_MORE_FLAG:
+ case NR_INFO | NR_NAK_FLAG | NR_CHOKE_FLAG:
case NR_INFO | NR_CHOKE_FLAG | NR_MORE_FLAG:
+ case NR_INFO | NR_NAK_FLAG | NR_MORE_FLAG:
+ case NR_INFO | NR_NAK_FLAG | NR_CHOKE_FLAG | NR_MORE_FLAG:
if (frametype & NR_CHOKE_FLAG) {
sk->nr->condition |= PEER_RX_BUSY_CONDITION;
sk->nr->t4timer = nr_default.busy_delay;
sk->nr->condition &= ~PEER_RX_BUSY_CONDITION;
sk->nr->t4timer = 0;
}
- if (!nr_validate_nr(sk, nr)) {
- nr_nr_error_recovery(sk);
- sk->nr->state = NR_STATE_1;
- break;
- }
- if (sk->nr->condition & PEER_RX_BUSY_CONDITION) {
- nr_frames_acked(sk, nr);
- } else {
- nr_check_iframes_acked(sk, nr);
+ if (nr_validate_nr(sk, nr)) {
+ if (frametype & NR_NAK_FLAG) {
+ nr_frames_acked(sk, nr);
+ nr_send_nak_frame(sk);
+ } else {
+ if (sk->nr->condition & PEER_RX_BUSY_CONDITION) {
+ nr_frames_acked(sk, nr);
+ } else {
+ nr_check_iframes_acked(sk, nr);
+ }
+ }
}
queued = 1;
skb_queue_head(&sk->nr->reseq_queue, skb);
int nr_process_rx_frame(struct sock *sk, struct sk_buff *skb)
{
int queued = 0, frametype;
+
+ if (sk->nr->state == NR_STATE_0 && sk->dead)
+ return queued;
if (sk->nr->state != NR_STATE_1 && sk->nr->state != NR_STATE_2 &&
sk->nr->state != NR_STATE_3) {
* History
* NET/ROM 001 Jonathan(G4KLX) Cloned from ax25_out.c
* NET/ROM 003 Jonathan(G4KLX) Added NET/ROM fragmentation.
+ * Darryl(G7LED) Fixed NAK, to give out correct reponse.
*/
#include <linux/config.h>
if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL)
return;
- nr_send_iframe(sk, skbn);
+ skbn->data[2] = sk->nr->va;
+ skbn->data[3] = sk->nr->vr;
+
+ if (sk->nr->condition & OWN_RX_BUSY_CONDITION)
+ skbn->data[4] |= NR_CHOKE_FLAG;
+
+ nr_transmit_buffer(sk, skbn);
sk->nr->condition &= ~ACK_PENDING_CONDITION;
sk->nr->vl = sk->nr->vr;
* Transmit data until either we're out of data to send or
* the window is full.
*/
- do {
- /*
- * Dequeue the frame and copy it.
- */
- skb = skb_dequeue(&sk->write_queue);
+ /*
+ * Dequeue the frame and copy it.
+ */
+ skb = skb_dequeue(&sk->write_queue);
+
+ do {
if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
skb_queue_head(&sk->write_queue, skb);
return;
*/
skb_queue_tail(&sk->nr->ack_queue, skb);
- } while (!last && skb_peek(&sk->write_queue) != NULL);
+ } while (!last && (skb = skb_dequeue(&sk->write_queue)) != NULL);
sk->nr->vl = sk->nr->vr;
sk->nr->condition &= ~ACK_PENDING_CONDITION;
* Networking Conference paper, as is the whole state machine.
*/
-void nr_nr_error_recovery(struct sock *sk)
-{
- nr_establish_data_link(sk);
-}
-
void nr_establish_data_link(struct sock *sk)
{
sk->nr->condition = 0x00;
{
if (sk->nr->vs == nr) {
nr_frames_acked(sk, nr);
- nr_requeue_frames(sk);
nr_calculate_rtt(sk);
sk->nr->t1timer = 0;
sk->nr->n2count = 0;
} else {
if (sk->nr->va != nr) {
nr_frames_acked(sk, nr);
- nr_requeue_frames(sk);
sk->nr->t1timer = sk->nr->t1 = nr_calculate_t1(sk);
}
}
*/
int nr_in_rx_window(struct sock *sk, unsigned short ns)
{
- unsigned short vc = sk->nr->vl;
+ unsigned short vc = sk->nr->vr;
unsigned short vt = (sk->nr->vl + sk->window) % NR_MODULUS;
while (vc != vt) {
if (ns == vc) return 1;
vc = (vc + 1) % NR_MODULUS;
}
-
- if (ns == vt) return 1;
return 0;
}
unsigned char *dptr;
int len, timeout;
- len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 3 + NR_NETWORK_LEN + NR_TRANSPORT_LEN;
+ len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + NR_NETWORK_LEN + NR_TRANSPORT_LEN;
switch (frametype & 0x0F) {
case NR_CONNREQ:
/*
* Space for AX.25 and NET/ROM network header
*/
- skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 2 + NR_NETWORK_LEN);
+ skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + NR_NETWORK_LEN);
dptr = skb_put(skb, skb_tailroom(skb));
unsigned char *dptr;
int len;
- len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 3 + NR_NETWORK_LEN + NR_TRANSPORT_LEN + 1;
+ len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + NR_NETWORK_LEN + NR_TRANSPORT_LEN + 1;
if ((skbn = alloc_skb(len, GFP_ATOMIC)) == NULL)
return;
- skb_reserve(skbn, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 2);
+ skb_reserve(skbn, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN);
dptr = skb_put(skbn, NR_NETWORK_LEN + NR_TRANSPORT_LEN);
if (sk->nr->t1timer > 0 && sk->nr->n2count == 0)
sk->nr->rtt = (9 * sk->nr->rtt + sk->nr->t1 - sk->nr->t1timer) / 10;
- /* Don't go below one second */
- if (sk->nr->rtt < 1 * PR_SLOWHZ)
- sk->nr->rtt = 1 * PR_SLOWHZ;
+#ifdef NR_T1CLAMPLO
+ /* Don't go below one tenth of a second */
+ if (sk->nr->rtt < (NR_T1CLAMPLO))
+ sk->nr->rtt = (NR_T1CLAMPLO);
+#else /* Failsafe - some people might have sub 1/10th RTTs :-) **/
+ if (sk->nr->rtt == 0)
+ sk->nr->rtt = PR_SLOWHZ;
+#endif
+#ifdef NR_T1CLAMPHI
+ /* OR above clamped seconds **/
+ if (sk->nr->rtt > (NR_T1CLAMPHI))
+ sk->nr->rtt = (NR_T1CLAMPHI);
+#endif
}
#endif
case NR_STATE_0:
/* Magic here: If we listen() and a new link dies before it
is accepted() it isnt 'dead' so doesnt get removed. */
- if (sk->dead) {
+ if (sk->destroy || (sk->state == TCP_LISTEN && sk->dead)) {
del_timer(&sk->timer);
nr_destroy_socket(sk);
return;