S: Germany
N: David Mosberger-Tang
-E: davidm@azstarnet.com
+E: David.Mosberger@acm.org
D: Linux/Alpha
-S: 2552 E. Copper Street
-S: Tucson, Arizona 85716-2406
+S: 35706 Runckel Lane
+S: Fremont, CA 94536
S: USA
N: Ian A. Murdock
S: 6525 EZ Nijmegen
S: The Netherlands
+N: Ulrich Windl
+E: Ulrich.Windl@rz.uni-regensburg.de
+P: 1024/E843660D CF D7 43 A1 5A 49 14 25 7C 04 A0 6E 4C 3A AC 6D
+D: Bug fixes for adjtimex() and some other time stuff.
+S: Alte Regensburger Str. 11a
+S: 93149 Nittenau
+S: Germany
+
N: Lars Wirzenius
E: liw@iki.fi
D: Linux System Administrator's Guide
VERSION = 2
PATCHLEVEL = 0
-SUBLEVEL = 31
+SUBLEVEL = 32
ARCH = i386
* High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
*/
-#include <string.h>
-
#include <asm/segment.h>
#include <asm/io.h>
+#include <linux/types.h>
/*
* gzip declarations
#define OF(args) args
#define STATIC static
+void* memset(void* s, int c, size_t n);
+void* memcpy(void* __dest, __const void* __src,
+ size_t __n);
+
#define memzero(s, n) memset ((s), 0, (n))
+
typedef unsigned char uch;
typedef unsigned short ush;
typedef unsigned long ulg;
static void gzip_mark(void **);
static void gzip_release(void **);
-#ifndef STANDALONE_DEBUG
static void puts(const char *);
extern int end;
outb_p(0xff & (pos >> 1), vidport+1);
}
-__ptr_t memset(__ptr_t s, int c, size_t n)
+void* memset(void* s, int c, size_t n)
{
int i;
char *ss = (char*)s;
for (i=0;i<n;i++) ss[i] = c;
}
-__ptr_t memcpy(__ptr_t __dest, __const __ptr_t __src,
+void* memcpy(void* __dest, __const void* __src,
size_t __n)
{
int i;
for (i=0;i<__n;i++) d[i] = s[i];
}
-#endif
/* ===========================================================================
* Fill the input buffer. This is called only when the buffer is empty
short b;
} stack_start = { & user_stack [STACK_SIZE] , KERNEL_DS };
-#ifdef STANDALONE_DEBUG
-
-static void gzip_mark(void **ptr)
-{
-}
-
-static void gzip_release(void **ptr)
-{
-}
-
-char output_buffer[1024 * 800];
-
-int
-main(argc, argv)
- int argc;
- char **argv;
-{
- output_data = output_buffer;
-
- makecrc();
- puts("Uncompressing Linux...");
- gunzip();
- puts("done.\n");
- return 0;
-}
-
-#else
void setup_normal_output_buffer()
{
if (high_loaded) close_output_buffer_if_we_run_high(mv);
return high_loaded;
}
-#endif
-
-
-
-
{
int fpvalid;
-/* Flag indicating the math stuff is valid. We don't support this for the
- soft-float routines yet */
if (hard_math) {
if ((fpvalid = current->used_math) != 0) {
- if (last_task_used_math == current)
+#ifdef __SMP__
+ if (current->flags & PF_USEDFPU)
+#else
+ if (last_task_used_math == current)
+#endif
__asm__("clts ; fnsave %0": :"m" (*fpu));
else
memcpy(fpu,¤t->tss.i387.hard,sizeof(*fpu));
}
- } else {
- /* we should dump the emulator state here, but we need to
- convert it into standard 387 format first.. */
+ } else {
+ /* We dump the emulator state here.
+ We convert it into standard 387 format first.. */
+#ifdef CONFIG_MATH_EMULATION
+ int i;
+ unsigned long top;
+ char (*hardreg)[10];
+ struct i387_soft_struct *soft_fpu = ¤t->tss.i387.soft;
+ struct fpu_reg* softreg;
+ long int control_word = soft_fpu->cwd;
+
+ fpu->cwd = soft_fpu->cwd;
+ fpu->swd = soft_fpu->swd;
+ fpu->twd = soft_fpu->twd;
+ fpu->fip = soft_fpu->fip;
+ fpu->fcs = soft_fpu->fcs;
+ fpu->foo = soft_fpu->foo;
+ fpu->fos = soft_fpu->fos;
+ hardreg = (char (*)[10]) &fpu->st_space[0];
+ top = (unsigned long) soft_fpu->top % 8;
+ softreg = &soft_fpu->regs[top];
+ for (i = top ; i < 8; i ++) {
+ softreg_to_hardreg(softreg, *hardreg, control_word);
+ hardreg++;
+ softreg++;
+ }
+ softreg = &soft_fpu->regs[0];
+ for (i = 0; i < top; i++) {
+ softreg_to_hardreg(softreg, *hardreg, control_word);
+ hardreg++;
+ softreg++;
+ }
+ fpvalid = 1;
+#else /* defined(CONFIG_MATH_EMULATION) */
fpvalid = 0;
+#endif /* !defined(CONFIG_MATH_EMULATION) */
}
return fpvalid;
put_long(tsk, vma, addr, data);
return 0;
}
+#ifdef CONFIG_MATH_EMULATION
+static void write_emulator_word(struct task_struct *child,
+ unsigned long register_offset,
+ long data)
+{
+ int i, j;
+ struct i387_soft_struct *soft_fpu;
+ struct fpu_reg *this_fpreg, *next_fpreg;
+ char hard_reg[2][10];
+ int control_word;
+ unsigned long top;
+ i = register_offset / 10;
+ j = register_offset % 10;
+ soft_fpu = &child->tss.i387.soft;
+ top = i + (unsigned long) soft_fpu->top;
+ control_word = soft_fpu->cwd;
+ this_fpreg = &soft_fpu->regs[(top + i) % 8];
+ next_fpreg = &soft_fpu->regs[(top + i + 1) % 8];
+ softreg_to_hardreg(this_fpreg, hard_reg[0], control_word);
+ if (j > 6)
+ softreg_to_hardreg(next_fpreg, hard_reg[1], control_word);
+ *(long *) &hard_reg[0][j] = data;
+ hardreg_to_softreg(hard_reg[0], this_fpreg);
+ if (j > 6)
+ hardreg_to_softreg(hard_reg[1], next_fpreg);
+}
+#endif /* defined(CONFIG_MATH_EMULATION) */
+
+/* Put a word to the part of the user structure containing
+ * floating point registers
+ * Floating point support added to ptrace by Ramon Garcia,
+ * ramon@juguete.quim.ucm.es
+ */
+
+static int put_fpreg_word (struct task_struct *child,
+ unsigned long addr, long data)
+{
+ struct user *dummy = NULL;
+ if (addr < (long) (&dummy->i387.st_space))
+ return -EIO;
+ addr -= (long) (&dummy->i387.st_space);
+
+ if (!hard_math) {
+#ifdef CONFIG_MATH_EMULATION
+ write_emulator_word(child, addr, data);
+#else
+ return 0;
+#endif
+ }
+ else
+#ifndef __SMP__
+ if (last_task_used_math == child) {
+ clts();
+ __asm__("fsave %0; fwait":"=m" (child->tss.i387));
+ last_task_used_math = current;
+ stts();
+ }
+#endif
+ *(long *)
+ ((char *) (child->tss.i387.hard.st_space) + addr) = data;
+ return 0;
+}
+
+#ifdef CONFIG_MATH_EMULATION
+
+static unsigned long get_emulator_word(struct task_struct *child,
+ unsigned long register_offset)
+{
+ char hard_reg[2][10];
+ int i, j;
+ struct fpu_reg *this_fpreg, *next_fpreg;
+ struct i387_soft_struct *soft_fpu;
+ long int control_word;
+ unsigned long top;
+ unsigned long tmp;
+ i = register_offset / 10;
+ j = register_offset % 10;
+ soft_fpu = &child->tss.i387.soft;
+ top = (unsigned long) soft_fpu->top;
+ this_fpreg = &soft_fpu->regs[(top + i) % 8];
+ next_fpreg = &soft_fpu->regs[(top + i + 1) % 8];
+ control_word = soft_fpu->cwd;
+ softreg_to_hardreg(this_fpreg, hard_reg[0], control_word);
+ if (j > 6)
+ softreg_to_hardreg(next_fpreg, hard_reg[1], control_word);
+ tmp = *(long *)
+ &hard_reg[0][j];
+ return tmp;
+}
+
+#endif /* defined(CONFIG_MATH_EMULATION) */
+/* Get a word from the part of the user structure containing
+ * floating point registers
+ */
+static unsigned long get_fpreg_word(struct task_struct *child,
+ unsigned long addr)
+{
+ struct user *dummy = NULL;
+ unsigned long tmp;
+ addr -= (long) (&dummy->i387.st_space);
+ if (!hard_math) {
+#ifdef CONFIG_MATH_EMULATION
+ tmp = get_emulator_word(child, addr);
+#else
+ tmp = 0;
+#endif /* !defined(CONFIG_MATH_EMULATION) */
+ } else {
+#ifndef __SMP__
+ if (last_task_used_math == child) {
+ clts();
+ __asm__("fsave %0; fwait":"=m" (child->tss.i387));
+ last_task_used_math = current;
+ stts();
+ }
+#endif
+ tmp = *(long *)
+ ((char *) (child->tss.i387.hard.st_space) +
+ addr);
+ }
+ return tmp;
+}
asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
{
case PTRACE_PEEKUSR: {
unsigned long tmp;
int res;
-
- if ((addr & 3) || addr < 0 ||
- addr > sizeof(struct user) - 3)
+
+ if ((addr & 3 &&
+ (addr < (long) (&dummy->i387) ||
+ addr > (long) (&dummy->i387.st_space[20]) )) ||
+ addr < 0 || addr > sizeof(struct user) - 3)
return -EIO;
-
+
res = verify_area(VERIFY_WRITE, (void *) data, sizeof(long));
if (res)
return res;
tmp = 0; /* Default return condition */
+ if (addr >= (long) (&dummy->i387) &&
+ addr < (long) (&dummy->i387.st_space[20]) ) {
+#ifndef CONFIG_MATH_EMULATION
+ if (!hard_math)
+ return -EIO;
+#endif /* defined(CONFIG_MATH_EMULATION) */
+ tmp = get_fpreg_word(child, addr);
+ }
if(addr < 17*sizeof(long)) {
addr = addr >> 2; /* temporary hack. */
return write_long(child,addr,data);
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
- if ((addr & 3) || addr < 0 ||
+ if ((addr & 3 &&
+ (addr < (long) (&dummy->i387.st_space[0]) ||
+ addr > (long) (&dummy->i387.st_space[20]) )) ||
+ addr < 0 ||
addr > sizeof(struct user) - 3)
return -EIO;
-
+
+ if (addr >= (long) (&dummy->i387.st_space[0]) &&
+ addr < (long) (&dummy->i387.st_space[20]) ) {
+#ifndef CONFIG_MATH_EMULATION
+ if (!hard_math)
+ return -EIO;
+#endif /* defined(CONFIG_MATH_EMULATION) */
+ return put_fpreg_word(child, addr, data);
+ }
addr = addr >> 2; /* temporary hack. */
-
+
if (addr == ORIG_EAX)
return -EIO;
if (addr == DS || addr == ES ||
* precision CMOS clock update
* 1996-05-03 Ingo Molnar
* fixed time warps in do_[slow|fast]_gettimeoffset()
+ * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
+ * "A Kernel Model for Precision Timekeeping" by Dave Mills
*/
#include <linux/errno.h>
#include <linux/sched.h>
}
xtime = *tv;
- time_state = TIME_BAD;
- time_maxerror = MAXPHASE;
- time_esterror = MAXPHASE;
+ time_adjust = 0; /* stop active adjtime() */
+ time_status |= STA_UNSYNC;
+ time_state = TIME_ERROR; /* p. 24, (a) */
+ time_maxerror = NTP_PHASE_LIMIT;
+ time_esterror = NTP_PHASE_LIMIT;
sti();
}
* nowtime is written into the registers of the CMOS clock, it will
* jump to the next second precisely 500 ms later. Check the Motorola
* MC146818A or Dallas DS12887 data sheet for details.
+ *
+ * BUG: This routine does not handle hour overflow properly; it just
+ * sets the minutes. Usually you'll only notice that after reboot!
*/
static int set_rtc_mmss(unsigned long nowtime)
{
}
CMOS_WRITE(real_seconds,RTC_SECONDS);
CMOS_WRITE(real_minutes,RTC_MINUTES);
- } else
+ } else {
+ printk(KERN_WARNING
+ "set_rtc_mmss: can't update from %d to %d\n",
+ cmos_minutes, real_minutes);
retval = -1;
+ }
/* The following flags have to be released exactly in this order,
* otherwise the DS12887 (popular MC146818A clone with integrated
* CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
* called as close as possible to 500 ms before the new second starts.
*/
- if (time_state != TIME_BAD && xtime.tv_sec > last_rtc_update + 660 &&
+ if ((time_status & STA_UNSYNC) == 0 &&
+ xtime.tv_sec > last_rtc_update + 660 &&
xtime.tv_usec > 500000 - (tick >> 1) &&
xtime.tv_usec < 500000 + (tick >> 1))
if (set_rtc_mmss(xtime.tv_sec) == 0)
reg_round.o \
wm_shrx.o wm_sqrt.o \
div_Xsig.o polynom_Xsig.o round_Xsig.o \
- shr_Xsig.o mul_Xsig.o
+ shr_Xsig.o mul_Xsig.o \
+ fpu_debug.o
include $(TOPDIR)/Rules.make
--- /dev/null
+/* Interface with ptrace and core-dumping routines */
+
+
+#include "fpu_system.h"
+#include "exception.h"
+#include "reg_constant.h"
+#include "fpu_emu.h"
+#include "control_w.h"
+#include "status_w.h"
+
+
+#define EXTENDED_Ebias 0x3fff
+#define EXTENDED_Emin (-0x3ffe) /* smallest valid exponent */
+
+#define DOUBLE_Emax 1023 /* largest valid exponent */
+#define DOUBLE_Ebias 1023
+#define DOUBLE_Emin (-1022) /* smallest valid exponent */
+
+#define SINGLE_Emax 127 /* largest valid exponent */
+#define SINGLE_Ebias 127
+#define SINGLE_Emin (-126) /* smallest valid exponent */
+
+
+/* Copy and paste from round_to_int. Original comments maintained */
+/*===========================================================================*/
+
+/* r gets mangled such that sig is int, sign:
+ it is NOT normalized */
+/* The return value (in eax) is zero if the result is exact,
+ if bits are changed due to rounding, truncation, etc, then
+ a non-zero value is returned */
+/* Overflow is signalled by a non-zero return value (in eax).
+ In the case of overflow, the returned significand always has the
+ largest possible value */
+
+static int round_to_int_cwd(FPU_REG *r, long int user_control_word)
+{
+ char very_big;
+ unsigned eax;
+
+ if (r->tag == TW_Zero)
+ {
+ /* Make sure that zero is returned */
+ significand(r) = 0;
+ return 0; /* o.k. */
+ }
+
+ if (r->exp > EXP_BIAS + 63)
+ {
+ r->sigl = r->sigh = ~0; /* The largest representable number */
+ return 1; /* overflow */
+ }
+
+ eax = shrxs(&r->sigl, EXP_BIAS + 63 - r->exp);
+ very_big = !(~(r->sigh) | ~(r->sigl)); /* test for 0xfff...fff */
+#define half_or_more (eax & 0x80000000)
+#define frac_part (eax)
+#define more_than_half ((eax & 0x80000001) == 0x80000001)
+ switch (user_control_word & CW_RC)
+ {
+ case RC_RND:
+ if ( more_than_half /* nearest */
+ || (half_or_more && (r->sigl & 1)) ) /* odd -> even */
+ {
+ if ( very_big ) return 1; /* overflow */
+ significand(r) ++;
+ return PRECISION_LOST_UP;
+ }
+ break;
+ case RC_DOWN:
+ if (frac_part && r->sign)
+ {
+ if ( very_big ) return 1; /* overflow */
+ significand(r) ++;
+ return PRECISION_LOST_UP;
+ }
+ break;
+ case RC_UP:
+ if (frac_part && !r->sign)
+ {
+ if ( very_big ) return 1; /* overflow */
+ significand(r) ++;
+ return PRECISION_LOST_UP;
+ }
+ break;
+ case RC_CHOP:
+ break;
+ }
+
+ return eax ? PRECISION_LOST_DOWN : 0;
+
+}
+
+
+
+/* Conver a number in the emulator format to the
+ * hardware format.
+ * Taken from the emulator sources, function reg_load_extended
+ */
+
+/* Get a long double from the debugger */
+void hardreg_to_softreg(const char hardreg[10],
+ FPU_REG *soft_reg)
+
+{
+ unsigned long sigl, sigh, exp;
+
+ sigl = *((unsigned long *) hardreg);
+ sigh = *(1 + (unsigned long *) hardreg);
+ exp = *(4 + (unsigned short *) hardreg);
+
+ soft_reg->tag = TW_Valid; /* Default */
+ soft_reg->sigl = sigl;
+ soft_reg->sigh = sigh;
+ if (exp & 0x8000)
+ soft_reg->sign = SIGN_NEG;
+ else
+ soft_reg->sign = SIGN_POS;
+ exp &= 0x7fff;
+ soft_reg->exp = exp - EXTENDED_Ebias + EXP_BIAS;
+
+ if ( exp == 0 )
+ {
+ if ( !(sigh | sigl) )
+ {
+ soft_reg->tag = TW_Zero;
+ return;
+ }
+ /* The number is a de-normal or pseudodenormal. */
+ if (sigh & 0x80000000)
+ {
+ /* Is a pseudodenormal. */
+ /* Convert it for internal use. */
+ /* This is non-80486 behaviour because the number
+ loses its 'denormal' identity. */
+ soft_reg->exp++;
+ return;
+ }
+ else
+ {
+ /* Is a denormal. */
+ /* Convert it for internal use. */
+ soft_reg->exp++;
+ normalize_nuo(soft_reg);
+ return;
+ }
+ }
+ else if ( exp == 0x7fff )
+ {
+ if ( !((sigh ^ 0x80000000) | sigl) )
+ {
+ /* Matches the bit pattern for Infinity. */
+ soft_reg->exp = EXP_Infinity;
+ soft_reg->tag = TW_Infinity;
+ return;
+ }
+
+ soft_reg->exp = EXP_NaN;
+ soft_reg->tag = TW_NaN;
+ if ( !(sigh & 0x80000000) )
+ {
+ /* NaNs have the ms bit set to 1. */
+ /* This is therefore an Unsupported NaN data type. */
+ /* This is non 80486 behaviour */
+ /* This should generate an Invalid Operand exception
+ later, so we convert it to a SNaN */
+ soft_reg->sigh = 0x80000000;
+ soft_reg->sigl = 0x00000001;
+ soft_reg->sign = SIGN_NEG;
+ return;
+ }
+ return;
+ }
+
+ if ( !(sigh & 0x80000000) )
+ {
+ /* Unsupported data type. */
+ /* Valid numbers have the ms bit set to 1. */
+ /* Unnormal. */
+ /* Convert it for internal use. */
+ /* This is non-80486 behaviour */
+ /* This should generate an Invalid Operand exception
+ later, so we convert it to a SNaN */
+ soft_reg->sigh = 0x80000000;
+ soft_reg->sigl = 0x00000001;
+ soft_reg->sign = SIGN_NEG;
+ soft_reg->exp = EXP_NaN;
+ soft_reg->tag = TW_NaN;
+ return;
+ }
+ return;
+}
+
+/* Conver a number in the emulator format to the
+ * hardware format.
+ * Adapted from function write_to_extended
+ */
+
+
+void softreg_to_hardreg(const FPU_REG *rp, char d[10], long int user_control_word)
+{
+ long e;
+ FPU_REG tmp;
+ e = rp->exp - EXP_BIAS + EXTENDED_Ebias;
+
+ /*
+ All numbers except denormals are stored internally in a
+ format which is compatible with the extended real number
+ format.
+ */
+ if (e > 0) {
+ *(unsigned long *) d = rp->sigl;
+ *(unsigned long *) (d + 4) = rp->sigh;
+ } else {
+ /*
+ The number is a de-normal stored as a normal using our
+ extra exponent range, or is Zero.
+ Convert it back to a de-normal, or leave it as Zero.
+ */
+ reg_move(rp, &tmp);
+ tmp.exp += -EXTENDED_Emin + 63; /* largest exp to be 63 */
+ round_to_int_cwd(&tmp, user_control_word);
+ e = 0;
+ *(unsigned long *) d= tmp.sigl;
+ *(unsigned long *) (d + 4) = tmp.sigh;
+ }
+ e |= rp->sign == SIGN_POS ? 0 : 0x8000;
+ *(unsigned short *) (d + 8) = e;
+}
+
* User-defined bell sound, new setterm control sequences and printk
* redirection by Martin Mares <mj@k332.feld.cvut.cz> 19-Nov-95
*
+ * APM screenblank bug fixed Takashi Manabe <manabe@roy.dsl.tutics.tut.jp>
+ * Backported to 2.0.31 by Adam Bradley <artdodge@cs.bu.edu>
*/
#define BLANK 0x0020
hide_cursor();
console_blanked = fg_console + 1;
+ if(!nopowersave)
+ {
#ifdef CONFIG_APM
- if (apm_display_blank())
- return;
+ if (apm_display_blank())
+ return;
#endif
- if(!nopowersave)
- vesa_blank();
+ vesa_blank();
+ }
}
void do_unblank_screen(void)
paging), then only one adapter can be supported. */
/* determine how much of total RAM is mapped into PC space */
- ti->mapped_ram_size=1<<(((readb(ti->mmio+ ACA_OFFSET + ACA_RW + RRR_ODD)) >>2) +4);
+ ti->mapped_ram_size=1<<((((readb(ti->mmio+ ACA_OFFSET + ACA_RW + RRR_ODD)) >>2) & 0x03) + 4);
ti->page_mask=0;
if (ti->shared_ram_paging == 0xf) { /* No paging in adapter */
ti->mapped_ram_size = ti->avail_shared_ram;
if (cardpresent==TR_ISA) {
static unsigned char ram_bndry_mask[]={0xfe, 0xfc, 0xf8, 0xf0};
unsigned char new_base, rrr_32, chk_base, rbm;
- rrr_32 = (readb(ti->mmio+ ACA_OFFSET + ACA_RW + RRR_ODD))>>2;
+ rrr_32 = ((readb(ti->mmio+ ACA_OFFSET + ACA_RW + RRR_ODD))>>2) & 0x03;
rbm = ram_bndry_mask[rrr_32];
new_base = (Shared_Ram_Base + (~rbm)) & rbm; /* up to boundary */
chk_base = new_base + (ti->mapped_ram_size>>3);
DEVICE( VIA, VIA_82C576, "VT 82C576 3V"),
DEVICE( VIA, VIA_82C585, "VT 82C585VP Apollo VP-1"),
DEVICE( VIA, VIA_82C586_0, "VT 82C586 Apollo VP-1"),
- DEVICE( VIA, VIA_82C416, "VT 82C416MV"),
DEVICE( VIA, VIA_82C926, "VT 82C926 Amazon"),
+ DEVICE( VIA, VIA_82C416, "VT 82C416MV"),
DEVICE( VORTEX, VORTEX_GDT60x0, "GDT 60x0"),
DEVICE( VORTEX, VORTEX_GDT6000B,"GDT 6000b"),
DEVICE( VORTEX, VORTEX_GDT6x10, "GDT 6110/6510"),
{ "Future Domain Corp. V1.0008/18/93", 5, 33, 3, 4, 0 },
{ "Future Domain Corp. V1.0008/18/93", 26, 33, 3, 4, 1 },
{ "Adaptec AHA-2920 PCI-SCSI Card", 42, 31, 3, -1, 1 },
+ { "IBM F1 P264/32", 5, 14, 3, -1, 1 },
/* This next signature may not be a 3.5 bios */
{ "Future Domain Corp. V2.0108/18/93", 5, 33, 3, 5, 0 },
{ "FUTURE DOMAIN CORP. V3.5008/18/93", 5, 34, 3, 5, 0 },
if (filp->f_mode & 2)
return -EROFS;
+ if(sr_template.usage_count) (*sr_template.usage_count)++;
+
sr_ioctl(inode,filp,CDROMCLOSETRAY,0);
check_disk_change(inode->i_rdev);
sr_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
if (scsi_CDs[MINOR(inode->i_rdev)].device->host->hostt->usage_count)
(*scsi_CDs[MINOR(inode->i_rdev)].device->host->hostt->usage_count)++;
- if(sr_template.usage_count) (*sr_template.usage_count)++;
sr_photocd(inode);
int dummy3; /* unused */
} b_un;
unsigned int data[N_PARAM];
-} bdf_prm = {{60, 500, 64, 256, 15, 30*HZ, 5*HZ, 1884, 2}};
+} bdf_prm = {{40, 500, 64, 64, 15, 30*HZ, 5*HZ, 1884, 2}};
/* These are the min and max parameter values that we will allow to be assigned */
int bdflush_min[N_PARAM] = { 0, 10, 5, 25, 0, 100, 100, 1, 1};
return NULL;
}
+static void put_unused_buffer_head(struct buffer_head * bh)
+{
+ if (nr_unused_buffer_heads >= MAX_UNUSED_BUFFERS) {
+ nr_buffer_heads--;
+ kfree(bh);
+ return;
+ }
+ memset(bh,0,sizeof(*bh));
+ nr_unused_buffer_heads++;
+ bh->b_next_free = unused_list;
+ unused_list = bh;
+ wake_up(&buffer_wait);
+}
+
+/*
+ * We can't put completed temporary IO buffer_heads directly onto the
+ * unused_list when they become unlocked, since the device driver
+ * end_request routines still expect access to the buffer_head's
+ * fields after the final unlock. So, the device driver puts them on
+ * the reuse_list instead once IO completes, and we recover these to
+ * the unused_list here.
+ *
+ * The reuse_list receives buffers from interrupt routines, so we need
+ * to be IRQ-safe here (but note that interrupts only _add_ to the
+ * reuse_list, never take away. So we don't need to worry about the
+ * reuse_list magically emptying).
+ */
+static inline void recover_reusable_buffer_heads(void)
+{
+ if (reuse_list) {
+ struct buffer_head *head;
+
+ head = xchg(&reuse_list, NULL);
+
+ do {
+ struct buffer_head *bh = head;
+ head = head->b_next_free;
+ put_unused_buffer_head(bh);
+ } while (head);
+ }
+}
+
extern void allow_interrupts(void);
static void refill_freelist(int size)
repeat:
allow_interrupts();
+ recover_reusable_buffer_heads();
if(needed <= 0)
return;
goto repeat;
}
- /* Dirty buffers should not overtake, wakeup_bdflush(1) calls
- bdflush and sleeps, therefore kswapd does his important work. */
- if (nr_buffers_type[BUF_DIRTY] > nr_buffers * bdf_prm.b_un.nfract/100)
- wakeup_bdflush(1);
-
/* Too bad, that was not enough. Try a little harder to grow some. */
if (nr_free_pages > limit) {
/* If we are not bdflush we should wake up bdflush and try it again. */
- if (current != bdflush_tsk) {
+ if (current != bdflush_tsk &&
+ (buffermem >> PAGE_SHIFT) > (MAP_NR(high_memory) >> 2) &&
+ nr_buffers_type[BUF_DIRTY] > bdf_prm.b_un.nref_dirt) {
wakeup_bdflush(1);
needed -= PAGE_SIZE;
goto repeat;
}
- /* We are bdflush: let's try our best */
-
/*
* In order to protect our reserved pages,
* return now if we got any buffers.
return;
/* and repeat until we find something good */
- grow_buffers(GFP_BUFFER, size);
+ i = grow_buffers(GFP_BUFFER, size);
+
+ if (current != bdflush_tsk && !i && nr_buffers_type[BUF_DIRTY] > 0)
+ wakeup_bdflush(1);
+ else if (!i)
+ grow_buffers(GFP_IO, size);
/* decrease needed even if there is no success */
needed -= PAGE_SIZE;
return bh;
}
- while(!free_list[isize]) {
- allow_interrupts();
- refill_freelist(size);
- }
-
- if (find_buffer(dev,block,size))
- goto repeat;
-
+get_free:
bh = free_list[isize];
+ if (!bh)
+ goto refill;
remove_from_free_list(bh);
-/* OK, FINALLY we know that this buffer is the only one of its kind, */
-/* and that it's unused (b_count=0), unlocked (buffer_locked=0), and clean */
+ /* OK, FINALLY we know that this buffer is the only one of its kind,
+ * and that it's unused (b_count=0), unlocked (buffer_locked=0),
+ * and clean */
bh->b_count=1;
+ bh->b_list=BUF_CLEAN;
bh->b_flushtime=0;
bh->b_state=(1<<BH_Touched);
bh->b_dev=dev;
bh->b_blocknr=block;
insert_into_queues(bh);
return bh;
+
+refill:
+ allow_interrupts();
+ refill_freelist(size);
+ if (!find_buffer(dev,block,size))
+ goto get_free;
+ goto repeat;
}
void set_writetime(struct buffer_head * buf, int flag)
return NULL;
}
-static void put_unused_buffer_head(struct buffer_head * bh)
-{
- if (nr_unused_buffer_heads >= MAX_UNUSED_BUFFERS) {
- nr_buffer_heads--;
- kfree(bh);
- return;
- }
- memset(bh,0,sizeof(*bh));
- nr_unused_buffer_heads++;
- bh->b_next_free = unused_list;
- unused_list = bh;
- wake_up(&buffer_wait);
-}
-
-/*
- * We can't put completed temporary IO buffer_heads directly onto the
- * unused_list when they become unlocked, since the device driver
- * end_request routines still expect access to the buffer_head's
- * fields after the final unlock. So, the device driver puts them on
- * the reuse_list instead once IO completes, and we recover these to
- * the unused_list here.
- *
- * The reuse_list receives buffers from interrupt routines, so we need
- * to be IRQ-safe here (but note that interrupts only _add_ to the
- * reuse_list, never take away. So we don't need to worry about the
- * reuse_list magically emptying).
- */
-static inline void recover_reusable_buffer_heads(void)
-{
- if (reuse_list) {
- struct buffer_head *head;
-
- head = xchg(&reuse_list, NULL);
-
- do {
- struct buffer_head *bh = head;
- head = head->b_next_free;
- put_unused_buffer_head(bh);
- } while (head);
- }
-}
-
static void get_more_buffer_heads(void)
{
struct buffer_head * bh;
if (wait) {
run_task_queue(&tq_disk);
sleep_on(&bdflush_done);
+ recover_reusable_buffer_heads();
}
}
for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
if (bprm->page[i]) {
free_page(bprm->page[i]);
- bprm->page[i]=NULL;
+ bprm->page[i] = 0;
}
}
}
return 0;
}
+ flush_cache_mm(current->mm);
exit_mmap(current->mm);
clear_page_tables(current);
+ flush_tlb_mm(current->mm);
return 0;
}
struct task_struct *blocked_task);
static void posix_remove_locks(struct file_lock **before, struct task_struct *task);
static void flock_remove_locks(struct file_lock **before, struct file *filp);
-
-static struct file_lock *locks_alloc_lock(struct file_lock *fl);
+static struct file_lock *locks_empty_lock(void);
+static struct file_lock *locks_init_lock(struct file_lock *,
+ struct file_lock *);
static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl);
static void locks_delete_lock(struct file_lock **thisfl_p, unsigned int wait);
static char *lock_get_status(struct file_lock *fl, int id, char *pfx);
static struct file_lock *file_lock_table = NULL;
+/* Allocate a new lock, and initialize its fields from fl.
+ * The lock is not inserted into any lists until locks_insert_lock() or
+ * locks_insert_block() are called.
+ */
+static inline struct file_lock *locks_alloc_lock(struct file_lock *fl)
+{
+ return locks_init_lock(locks_empty_lock(), fl);
+}
+
/* Free lock not inserted in any queue.
*/
static inline void locks_free_lock(struct file_lock *fl)
if (!filp->f_inode || !posix_make_lock(filp, &file_lock, &flock))
return (-EINVAL);
+ flock.l_type = F_UNLCK;
for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
if (!(fl->fl_flags & FL_POSIX))
break;
fl->fl_end - fl->fl_start + 1;
flock.l_whence = 0;
flock.l_type = fl->fl_type;
- memcpy_tofs(l, &flock, sizeof(flock));
- return (0);
+ break;
}
}
- flock.l_type = F_UNLCK; /* no conflict found */
memcpy_tofs(l, &flock, sizeof(flock));
return (0);
}
if (!(inode = filp->f_inode))
return (-EINVAL);
+ /*
+ * This might block, so we do it before checking the inode.
+ */
+ memcpy_fromfs(&flock, l, sizeof(flock));
+
/* Don't allow mandatory locks on files that may be memory mapped
* and shared.
*/
} while (vma != inode->i_mmap);
}
- memcpy_fromfs(&flock, l, sizeof(flock));
if (!posix_make_lock(filp, &file_lock, &flock))
return (-EINVAL);
unsigned int wait)
{
struct file_lock *fl;
- struct file_lock *new_fl;
+ struct file_lock *new_fl = NULL;
struct file_lock **before;
- int change = 0;
+ int error;
+ int change;
+ int unlock = (caller->fl_type == F_UNLCK);
+
+ /*
+ * If we need a new lock, get it in advance to avoid races.
+ */
+ if (!unlock) {
+ error = -ENOLCK;
+ new_fl = locks_alloc_lock(caller);
+ if (!new_fl)
+ goto out;
+ }
+
+
+ error = 0;
+search:
+ change = 0;
before = &filp->f_inode->i_flock;
- if ((fl = *before) && (fl->fl_flags & FL_POSIX))
- return (-EBUSY);
+ if ((fl = *before) && (fl->fl_flags & FL_POSIX)) {
+ error = -EBUSY;
+ goto out;
+ }
while ((fl = *before) != NULL) {
if (caller->fl_file == fl->fl_file) {
if (caller->fl_type == fl->fl_type)
- return (0);
+ goto out;
change = 1;
break;
}
/* change means that we are changing the type of an existing lock, or
* or else unlocking it.
*/
- if (change)
- locks_delete_lock(before, caller->fl_type != F_UNLCK);
- if (caller->fl_type == F_UNLCK)
- return (0);
- if ((new_fl = locks_alloc_lock(caller)) == NULL)
- return (-ENOLCK);
-repeat:
- if ((fl = filp->f_inode->i_flock) && (fl->fl_flags & FL_POSIX)) {
- locks_free_lock(new_fl);
- return (-EBUSY);
+ if (change) {
+ /* N.B. What if the wait argument is false? */
+ locks_delete_lock(before, !unlock);
+ /*
+ * If we waited, another lock may have been added ...
+ */
+ if (!unlock)
+ goto search;
}
+ if (unlock)
+ goto out;
+
+repeat:
+ /* Check signals each time we start */
+ error = -ERESTARTSYS;
+ if (current->signal & ~current->blocked)
+ goto out;
+ error = -EBUSY;
+ if ((fl = filp->f_inode->i_flock) && (fl->fl_flags & FL_POSIX))
+ goto out;
while (fl != NULL) {
if (!flock_locks_conflict(new_fl, fl)) {
fl = fl->fl_next;
continue;
}
- if (!wait) {
- locks_free_lock(new_fl);
- return (-EAGAIN);
- }
- if (current->signal & ~current->blocked) {
- /* Note: new_fl is not in any queue at this
- * point, so we must use locks_free_lock()
- * instead of locks_delete_lock()
- * Dmitry Gorodchanin 09/02/96.
- */
- locks_free_lock(new_fl);
- return (-ERESTARTSYS);
- }
+ error = -EAGAIN;
+ if (!wait)
+ goto out;
locks_insert_block(fl, new_fl);
interruptible_sleep_on(&new_fl->fl_wait);
locks_delete_block(fl, new_fl);
- if (current->signal & ~current->blocked) {
- /* Awakened by a signal. Free the new
- * lock and return an error.
- */
- locks_free_lock(new_fl);
- return (-ERESTARTSYS);
- }
goto repeat;
}
locks_insert_lock(&filp->f_inode->i_flock, new_fl);
- return (0);
+ new_fl = NULL;
+ error = 0;
+
+out:
+ if (new_fl)
+ locks_free_lock(new_fl);
+ return (error);
}
/* Add a POSIX style lock to a file.
unsigned int wait)
{
struct file_lock *fl;
- struct file_lock *new_fl;
+ struct file_lock *new_fl, *new_fl2;
struct file_lock *left = NULL;
struct file_lock *right = NULL;
struct file_lock **before;
+ int error;
int added = 0;
+ /*
+ * We may need two file_lock structures for this operation,
+ * so we get them in advance to avoid races.
+ */
+ new_fl = locks_empty_lock();
+ new_fl2 = locks_empty_lock();
+ error = -ENOLCK; /* "no luck" */
+ if (!(new_fl && new_fl2))
+ goto out;
+
if (caller->fl_type != F_UNLCK) {
repeat:
+ error = -EBUSY;
if ((fl = filp->f_inode->i_flock) && (fl->fl_flags & FL_FLOCK))
- return (-EBUSY);
+ goto out;
while (fl != NULL) {
if (!posix_locks_conflict(caller, fl)) {
fl = fl->fl_next;
continue;
}
+ error = -EAGAIN;
if (!wait)
- return (-EAGAIN);
- if (current->signal & ~current->blocked)
- return (-ERESTARTSYS);
+ goto out;
+ error = -EDEADLK;
if (posix_locks_deadlock(caller->fl_owner, fl->fl_owner))
- return (-EDEADLK);
+ goto out;
+ error = -ERESTARTSYS;
+ if (current->signal & ~current->blocked)
+ goto out;
locks_insert_block(fl, caller);
interruptible_sleep_on(&caller->fl_wait);
locks_delete_block(fl, caller);
- if (current->signal & ~current->blocked)
- return (-ERESTARTSYS);
goto repeat;
}
}
- /* Find the first old lock with the same owner as the new lock.
+ /*
+ * We've allocated the new locks in advance, so there are no
+ * errors possible (and no blocking operations) from here on.
+ *
+ * Find the first old lock with the same owner as the new lock.
*/
before = &filp->f_inode->i_flock;
+ error = -EBUSY;
if ((*before != NULL) && ((*before)->fl_flags & FL_FLOCK))
- return (-EBUSY);
+ goto out;
/* First skip locks owned by other processes.
*/
before = &fl->fl_next;
}
+ error = 0;
if (!added) {
if (caller->fl_type == F_UNLCK)
- return (0);
- if ((new_fl = locks_alloc_lock(caller)) == NULL)
- return (-ENOLCK);
+ goto out;
+ locks_init_lock(new_fl, caller);
locks_insert_lock(before, new_fl);
+ new_fl = NULL;
}
if (right) {
if (left == right) {
- /* The new lock breaks the old one in two pieces, so we
- * have to allocate one more lock (in this case, even
- * F_UNLCK may fail!).
- */
- if ((left = locks_alloc_lock(right)) == NULL) {
- if (!added)
- locks_delete_lock(before, 0);
- return (-ENOLCK);
- }
+ /* The new lock breaks the old one in two pieces,
+ * so we have to use the second new lock (in this
+ * case, even F_UNLCK may fail!).
+ left = locks_init_lock(new_fl2, right);
locks_insert_lock(before, left);
+ new_fl2 = NULL;
}
right->fl_start = caller->fl_end + 1;
locks_wake_up_blocks(right, 0);
left->fl_end = caller->fl_start - 1;
locks_wake_up_blocks(left, 0);
}
- return (0);
+out:
+ /*
+ * Free any unused locks. (They haven't
+ * ever been used, so we use kfree().)
+ */
+ if (new_fl)
+ kfree(new_fl);
+ if (new_fl2)
+ kfree(new_fl2);
+ return error;
}
-/* Allocate new lock.
- * Initialize its fields from fl. The lock is not inserted into any
- * lists until locks_insert_lock() or locks_insert_block() are called.
+/*
+ * Allocate an empty lock structure. We can use GFP_KERNEL now that
+ * all allocations are done in advance.
*/
-static struct file_lock *locks_alloc_lock(struct file_lock *fl)
+static struct file_lock *locks_empty_lock(void)
{
- struct file_lock *tmp;
-
- /* Okay, let's make a new file_lock structure... */
- if ((tmp = (struct file_lock *)kmalloc(sizeof(struct file_lock),
- GFP_ATOMIC)) == NULL)
- return (tmp);
-
- memset(tmp, 0, sizeof(*tmp));
-
- tmp->fl_flags = fl->fl_flags;
- tmp->fl_owner = fl->fl_owner;
- tmp->fl_file = fl->fl_file;
- tmp->fl_type = fl->fl_type;
- tmp->fl_start = fl->fl_start;
- tmp->fl_end = fl->fl_end;
+ return ((struct file_lock *) kmalloc(sizeof(struct file_lock),
+ GFP_KERNEL));
+}
- return (tmp);
+/*
+ * Initialize a new lock from an existing file_lock structure.
+ */
+static struct file_lock *locks_init_lock(struct file_lock *new,
+ struct file_lock *fl)
+{
+ if (new) {
+ memset(new, 0, sizeof(*new));
+ new->fl_owner = fl->fl_owner;
+ new->fl_file = fl->fl_file;
+ new->fl_flags = fl->fl_flags;
+ new->fl_type = fl->fl_type;
+ new->fl_start = fl->fl_start;
+ new->fl_end = fl->fl_end;
+ }
+ return new;
}
/* Insert file lock fl into an inode's lock list at the position indicated
if (!pid || i >= NR_TASKS)
return -ENOENT;
- if (fd >= NR_OPEN || !p->files->fd[fd] || !p->files->fd[fd]->f_inode)
- return -ENOENT;
+ if (fd >= NR_OPEN || !p->files || !p->files->fd[fd] || !p->files->fd[fd]->f_inode)
+ return -ENOENT;
ino = (pid << 16) + (PROC_PID_FD_DIR << 8) + fd;
switch (ino >> 8) {
case PROC_PID_FD_DIR:
ino &= 0xff;
- if (ino >= NR_OPEN || !p->files->fd[ino])
+ if (ino >= NR_OPEN || !p->files || !p->files->fd[ino])
return;
inode->i_op = &proc_link_inode_operations;
inode->i_size = 64;
if (!p->files)
break;
ino &= 0xff;
- if (ino < NR_OPEN && p->files->fd[ino]) {
+ if (ino < NR_OPEN && p->files && p->files->fd[ino]) {
new_inode = p->files->fd[ino]->f_inode;
}
break;
#define ERESTART 127 /* Interrupted system call should be restarted */
#define ESTRPIPE 128 /* Streams pipe error */
+#define ENOMEDIUM 129 /* No medium found */
+#define EMEDIUMTYPE 130 /* Wrong medium type */
+
#endif
#ifndef _ASM_SOCKET_H
#define _ASM_SOCKET_H
-#include <linux/types.h>
-#include <asm/ioctl.h>
#include <asm/sockios.h>
/* For setsockoptions(2) */
#define EREMOTEIO 121 /* Remote I/O error */
#define EDQUOT 122 /* Quota exceeded */
+#define ENOMEDIUM 123 /* No medium found */
+#define EMEDIUMTYPE 124 /* Wrong medium type */
+
#endif
long ___vm86_gs;
};
+/* Interface for converting data between the emulator format
+ * and the hardware format. Used for core dumping and for
+ * ptrace(2) */
+void hardreg_to_softreg(const char hardreg[10],
+ struct fpu_reg *soft_reg);
+
+void softreg_to_hardreg(const struct fpu_reg *rp, char d[10],
+ long int control_word);
+
#endif
#define EREMOTEIO 121 /* Remote I/O error */
#define EDQUOT 122 /* Quota exceeded */
+#define ENOMEDIUM 123 /* No medium found */
+#define EMEDIUMTYPE 124 /* Wrong medium type */
+
#endif /* _M68K_ERRNO_H */
#define EINPROGRESS 150 /* Operation now in progress */
#define ESTALE 151 /* Stale NFS file handle */
#define ECANCELED 158 /* AIO operation canceled */
+#define ENOMEDIUM 159 /* No medium found */
+#define EMEDIUMTYPE 160 /* Wrong medium type */
#define EDQUOT 1133 /* Quota exceeded */
#define ENFSREMOTE 1134 /* ??? */
#define EISNAM 120 /* Is a named type file */
#define EREMOTEIO 121 /* Remote I/O error */
#define EDQUOT 122 /* Quota exceeded */
+#define ENOMEDIUM 123 /* No medium found */
+#define EMEDIUMTYPE 124 /* Wrong medium type */
/* Should never be seen by user programs */
#define ERESTARTSYS 512
#define ELIBMAX 123 /* Atmpt to link in too many shared libs */
#define ELIBSCN 124 /* .lib section in a.out corrupted */
+#define ENOMEDIUM 125 /* No medium found */
+#define EMEDIUMTYPE 126 /* Wrong medium type */
+
#endif
#define GFP_KERNEL 0x03
#define GFP_NOBUFFER 0x04
#define GFP_NFS 0x05
+#define GFP_IO 0x06
/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
platforms, used as appropriate on others */
#undef __FDMASK
#define __FDMASK(d) (1UL << ((d) % __NFDBITS))
-typedef struct fd_set {
+typedef struct {
unsigned long fds_bits [__FDSET_LONGS];
} __kernel_fd_set;
* Derived linux/timex.h
* 1995-08-13 Torsten Duwe
* kernel PLL updated to 1994-12-13 specs (rfc-1589)
+ * 1997-08-30 Ulrich Windl
+ * Added new constant NTP_PHASE_LIMIT. Corrected MAXFREQ.
*/
#ifndef _LINUX_TIMEX_H
#define _LINUX_TIMEX_H
#define FINEUSEC (1L << SHIFT_SCALE) /* 1 us in phase units */
#define MAXPHASE 512000L /* max phase error (us) */
-#define MAXFREQ (512L << SHIFT_USEC) /* max frequency error (ppm) */
+#define MAXFREQ (200L << SHIFT_USEC) /* max frequency error (ppm) */
#define MAXTIME (200L << PPS_AVG) /* max PPS error (jitter) (200 us) */
#define MINSEC 16L /* min interval between updates (s) */
#define MAXSEC 1200L /* max interval between updates (s) */
+#define NTP_PHASE_LIMIT (MAXPHASE << 5) /* beyond max. dispersion */
/*
* The following defines are used only if a pulse-per-second (PPS)
tmp->vm_flags &= ~VM_LOCKED;
tmp->vm_mm = mm;
tmp->vm_next = NULL;
- if (copy_page_range(mm, current->mm, tmp)) {
- kfree(tmp);
- exit_mmap(mm);
- return -ENOMEM;
- }
if (tmp->vm_inode) {
tmp->vm_inode->i_count++;
/* insert tmp into the share list, just after mpnt */
mpnt->vm_next_share = tmp;
tmp->vm_prev_share = mpnt;
}
+ if (copy_page_range(mm, current->mm, tmp)) {
+ if (mpnt->vm_next_share == tmp) {
+ tmp->vm_prev_share->vm_next_share = tmp->vm_next_share;
+ tmp->vm_next_share->vm_prev_share = tmp->vm_prev_share;
+ }
+ kfree(tmp);
+ exit_mmap(mm);
+ return -ENOMEM;
+ }
if (tmp->vm_ops && tmp->vm_ops->open)
tmp->vm_ops->open(tmp);
*p = tmp;
p = &tmp->vm_next;
}
build_mmap_avl(mm);
+ flush_tlb_mm(current->mm);
return 0;
}
if (!(clone_flags & CLONE_VM)) {
struct mm_struct * mm = kmalloc(sizeof(*tsk->mm), GFP_KERNEL);
if (!mm)
- return -1;
+ return -ENOMEM;
*mm = *current->mm;
mm->count = 1;
mm->def_flags = 0;
tsk->min_flt = tsk->maj_flt = 0;
tsk->cmin_flt = tsk->cmaj_flt = 0;
tsk->nswap = tsk->cnswap = 0;
- if (new_page_tables(tsk))
+ if (new_page_tables(tsk)) {
+ tsk->mm = NULL;
+ mm->pgd = NULL;
+ exit_mmap(mm);
goto free_mm;
+ }
if (dup_mmap(mm)) {
+ tsk->mm = NULL;
free_page_tables(mm);
free_mm:
kfree(mm);
- return -1;
+ return -ENOMEM;
}
return 0;
}
X(sys_call_table),
X(hard_reset_now),
X(_ctype),
+ X(get_random_bytes),
/* Signal interfaces */
X(send_sig),
* 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
* make semaphores SMP safe
* 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
+ * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
+ * "A Kernel Model for Precision Timekeeping" by Dave Mills
*/
/*
long time_constant = 2; /* pll time constant */
long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */
long time_precision = 1; /* clock precision (us) */
-long time_maxerror = MAXPHASE; /* maximum error (us) */
-long time_esterror = MAXPHASE; /* estimated error (us) */
+long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
+long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
long time_phase = 0; /* phase offset (scaled us) */
long time_freq = ((1000000 + HZ/2) % HZ - HZ/2) << SHIFT_USEC; /* frequency offset (scaled ppm) */
long time_adj = 0; /* tick adjust (scaled 1 / HZ) */
/* Bump the maxerror field */
time_maxerror += time_tolerance >> SHIFT_USEC;
- if ( time_maxerror > MAXPHASE )
- time_maxerror = MAXPHASE;
+ if ( time_maxerror > NTP_PHASE_LIMIT ) {
+ time_maxerror = NTP_PHASE_LIMIT;
+ time_state = TIME_ERROR; /* p. 17, sect. 4.3, (b) */
+ time_status |= STA_UNSYNC;
+ }
/*
* Leap second processing. If in leap-insert state at
if (xtime.tv_sec % 86400 == 0) {
xtime.tv_sec--;
time_state = TIME_OOP;
- printk("Clock: inserting leap second 23:59:60 UTC\n");
+ printk(KERN_NOTICE "Clock: inserting leap second 23:59:60 UTC\n");
}
break;
if ((xtime.tv_sec + 1) % 86400 == 0) {
xtime.tv_sec++;
time_state = TIME_WAIT;
- printk("Clock: deleting leap second 23:59:59 UTC\n");
+ printk(KERN_NOTICE "Clock: deleting leap second 23:59:59 UTC\n");
}
break;
* the pll and the PPS signal.
*/
pps_valid++;
- if (pps_valid == PPS_VALID) {
+ if (pps_valid == PPS_VALID) { /* PPS signal lost */
pps_jitter = MAXTIME;
pps_stabil = MAXFREQ;
time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
}
ltemp = time_freq + pps_freq;
if (ltemp < 0)
- time_adj -= -ltemp >>
- (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
+ time_adj -= -ltemp >> (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
else
- time_adj += ltemp >>
- (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
+ time_adj += ltemp >> (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
#if HZ == 100
- /* compensate for (HZ==100) != 128. Add 25% to get 125; => only 3% error */
+ /* Compensate for (HZ==100) != (1 << SHIFT_HZ).
+ * Add 25% and 3.125% to get 128.125; => only 0.125% error (p. 14)
+ */
if (time_adj < 0)
- time_adj -= -time_adj >> 2;
+ time_adj -= (-time_adj >> 2) + (-time_adj >> 5);
else
- time_adj += time_adj >> 2;
+ time_adj += (time_adj >> 2) + (time_adj >> 5);
#endif
}
/* in the NTP reference this is called "hardclock()" */
static void update_wall_time_one_tick(void)
{
+ if ( (time_adjust_step = time_adjust) != 0 ) {
+ /* We are doing an adjtime thing.
+ *
+ * Prepare time_adjust_step to be within bounds.
+ * Note that a positive time_adjust means we want the clock
+ * to run faster.
+ *
+ * Limit the amount of the step to be in the range
+ * -tickadj .. +tickadj
+ */
+ if (time_adjust > tickadj)
+ time_adjust_step = tickadj;
+ else if (time_adjust < -tickadj)
+ time_adjust_step = -tickadj;
+
+ /* Reduce by this step the amount of time left */
+ time_adjust -= time_adjust_step;
+ }
+ xtime.tv_usec += tick + time_adjust_step;
/*
* Advance the phase, once it gets to one microsecond, then
* advance the tick more.
if (time_phase <= -FINEUSEC) {
long ltemp = -time_phase >> SHIFT_SCALE;
time_phase += ltemp << SHIFT_SCALE;
- xtime.tv_usec += tick + time_adjust_step - ltemp;
+ xtime.tv_usec -= ltemp;
}
else if (time_phase >= FINEUSEC) {
long ltemp = time_phase >> SHIFT_SCALE;
time_phase -= ltemp << SHIFT_SCALE;
- xtime.tv_usec += tick + time_adjust_step + ltemp;
- } else
- xtime.tv_usec += tick + time_adjust_step;
-
- if (time_adjust) {
- /* We are doing an adjtime thing.
- *
- * Modify the value of the tick for next time.
- * Note that a positive delta means we want the clock
- * to run fast. This means that the tick should be bigger
- *
- * Limit the amount of the step for *next* tick to be
- * in the range -tickadj .. +tickadj
- */
- if (time_adjust > tickadj)
- time_adjust_step = tickadj;
- else if (time_adjust < -tickadj)
- time_adjust_step = -tickadj;
- else
- time_adjust_step = time_adjust;
-
- /* Reduce by this step the amount of time left */
- time_adjust -= time_adjust_step;
+ xtime.tv_usec += ltemp;
}
- else
- time_adjust_step = 0;
}
/*
* adjtime interface update and CMOS clock write code
* 1995-08-13 Torsten Duwe
* kernel PLL updated to 1994-12-13 specs (rfc-1589)
+ * 1996-10-22, 1997-09-13 Ulrich Windl
+ * support for external PPS signal, error checking in adjtimex()
+ * Updated NTP code according to technical memorandum Jan '96
+ * "A Kernel Model for Precision Timekeeping" by Dave Mills
*/
#include <linux/errno.h>
cli();
xtime.tv_sec = value;
xtime.tv_usec = 0;
- time_state = TIME_ERROR;
- time_maxerror = MAXPHASE;
- time_esterror = MAXPHASE;
+ time_adjust = 0; /* stop active adjtime() */
+ time_status |= STA_UNSYNC;
+ time_state = TIME_ERROR; /* p. 24, (a) */
+ time_maxerror = NTP_PHASE_LIMIT;
+ time_esterror = NTP_PHASE_LIMIT;
sti();
return 0;
}
asmlinkage int sys_adjtimex(struct timex *txc_p)
{
long ltemp, mtemp, save_adjust;
- int error;
+ int error = 0;
/* Local copy of parameter */
struct timex txc;
error = verify_area(VERIFY_WRITE, txc_p, sizeof(struct timex));
if (error)
- return error;
+ return error; /* do not write results */
/* Copy the user data space into the kernel copy
* structure. But bear in mind that the structures
/* Now we validate the data before disabling interrupts
*/
-
if (txc.modes != ADJ_OFFSET_SINGLESHOT && (txc.modes & ADJ_OFFSET))
- /* adjustment Offset limited to +- .512 seconds */
- if (txc.offset <= - MAXPHASE || txc.offset >= MAXPHASE )
- return -EINVAL;
-
- /* if the quartz is off by more than 10% something is VERY wrong ! */
- if (txc.modes & ADJ_TICK)
- if (txc.tick < 900000/HZ || txc.tick > 1100000/HZ)
- return -EINVAL;
+ /* adjustment Offset limited to +- .512 seconds */
+ if (txc.offset <= - MAXPHASE || txc.offset >= MAXPHASE )
+ return -EINVAL;
cli();
- /* Save for later - semantics of adjtime is to return old value */
+ /* Save for later - semantics of adjtime() is to return old value */
save_adjust = time_adjust;
/* If there are input parameters, then process them */
+#if 0 /* STA_CLOCKERR is never set yet */
+ time_status &= ~STA_CLOCKERR; /* reset STA_CLOCKERR */
+#endif
if (txc.modes)
{
- if (time_state == TIME_BAD)
- time_state = TIME_OK;
+ if (time_state == TIME_ERROR)
+ time_state = TIME_OK; /* reset error -- why? */
- if (txc.modes & ADJ_STATUS)
- time_status = txc.status;
+ if (txc.modes & ADJ_STATUS) /* only set allowed bits */
+ time_status = (txc.status & ~STA_RONLY) |
+ (time_status & STA_RONLY);
- if (txc.modes & ADJ_FREQUENCY)
- time_freq = txc.freq;
+ if (txc.modes & ADJ_FREQUENCY) { /* p. 22 */
+ if (txc.freq > MAXFREQ || txc.freq < -MAXFREQ) {
+ error = -EINVAL;
+ goto leave;
+ }
+ time_freq = txc.freq - pps_freq;
+ }
- if (txc.modes & ADJ_MAXERROR)
+ if (txc.modes & ADJ_MAXERROR) {
+ if (txc.maxerror < 0 || txc.maxerror >= NTP_PHASE_LIMIT) {
+ error = -EINVAL;
+ goto leave;
+ }
time_maxerror = txc.maxerror;
+ }
- if (txc.modes & ADJ_ESTERROR)
+ if (txc.modes & ADJ_ESTERROR) {
+ if (txc.esterror < 0 || txc.esterror >= NTP_PHASE_LIMIT) {
+ error = -EINVAL;
+ goto leave;
+ }
time_esterror = txc.esterror;
+ }
- if (txc.modes & ADJ_TIMECONST)
+ if (txc.modes & ADJ_TIMECONST) { /* p. 24 */
+ if (txc.constant < 0 || txc.constant > MAXTC) {
+ error = -EINVAL;
+ goto leave;
+ }
time_constant = txc.constant;
+ }
- if (txc.modes & ADJ_OFFSET)
- if ((txc.modes == ADJ_OFFSET_SINGLESHOT)
- || !(time_status & STA_PLL))
- {
- time_adjust = txc.offset;
+ if (txc.modes & ADJ_OFFSET) { /* values checked earlier */
+ if (txc.modes == ADJ_OFFSET_SINGLESHOT) {
+ /* adjtime() is independent from ntp_adjtime() */
+ time_adjust = txc.offset;
}
- else if ((time_status & STA_PLL)||(time_status & STA_PPSTIME))
- {
- ltemp = (time_status & STA_PPSTIME &&
- time_status & STA_PPSSIGNAL) ?
- pps_offset : txc.offset;
-
- /*
- * Scale the phase adjustment and
- * clamp to the operating range.
- */
- if (ltemp > MAXPHASE)
- time_offset = MAXPHASE << SHIFT_UPDATE;
- else if (ltemp < -MAXPHASE)
- time_offset = -(MAXPHASE << SHIFT_UPDATE);
- else
- time_offset = ltemp << SHIFT_UPDATE;
-
- /*
- * Select whether the frequency is to be controlled and in which
- * mode (PLL or FLL). Clamp to the operating range. Ugly
- * multiply/divide should be replaced someday.
- */
-
- if (time_status & STA_FREQHOLD || time_reftime == 0)
+ else if ( time_status & (STA_PLL | STA_PPSTIME) ) {
+ ltemp = (time_status & (STA_PPSTIME | STA_PPSSIGNAL)) ==
+ (STA_PPSTIME | STA_PPSSIGNAL) ?
+ pps_offset : txc.offset;
+
+ /*
+ * Scale the phase adjustment and
+ * clamp to the operating range.
+ */
+ if (ltemp > MAXPHASE)
+ time_offset = MAXPHASE << SHIFT_UPDATE;
+ else if (ltemp < -MAXPHASE)
+ time_offset = -(MAXPHASE << SHIFT_UPDATE);
+ else
+ time_offset = ltemp << SHIFT_UPDATE;
+
+ /*
+ * Select whether the frequency is to be controlled
+ * and in which mode (PLL or FLL). Clamp to the operating
+ * range. Ugly multiply/divide should be replaced someday.
+ */
+
+ if (time_status & STA_FREQHOLD || time_reftime == 0)
+ time_reftime = xtime.tv_sec;
+ mtemp = xtime.tv_sec - time_reftime;
time_reftime = xtime.tv_sec;
- mtemp = xtime.tv_sec - time_reftime;
- time_reftime = xtime.tv_sec;
- if (time_status & STA_FLL)
- {
- if (mtemp >= MINSEC)
- {
- ltemp = ((time_offset / mtemp) << (SHIFT_USEC -
- SHIFT_UPDATE));
- if (ltemp < 0)
- time_freq -= -ltemp >> SHIFT_KH;
- else
- time_freq += ltemp >> SHIFT_KH;
- }
- }
- else
- {
- if (mtemp < MAXSEC)
- {
- ltemp *= mtemp;
- if (ltemp < 0)
- time_freq -= -ltemp >> (time_constant +
- time_constant + SHIFT_KF -
- SHIFT_USEC);
- else
- time_freq += ltemp >> (time_constant +
- time_constant + SHIFT_KF -
- SHIFT_USEC);
- }
+ if (time_status & STA_FLL) {
+ if (mtemp >= MINSEC) {
+ ltemp = (time_offset / mtemp) << (SHIFT_USEC -
+ SHIFT_UPDATE);
+ if (ltemp < 0)
+ time_freq -= -ltemp >> SHIFT_KH;
+ else
+ time_freq += ltemp >> SHIFT_KH;
+ } else /* calibration interval too short (p. 12) */
+ time_state = TIME_ERROR;
+ } else { /* PLL mode */
+ if (mtemp < MAXSEC) {
+ ltemp *= mtemp;
+ if (ltemp < 0)
+ time_freq -= -ltemp >> (time_constant +
+ time_constant +
+ SHIFT_KF - SHIFT_USEC);
+ else
+ time_freq += ltemp >> (time_constant +
+ time_constant +
+ SHIFT_KF - SHIFT_USEC);
+ } else /* calibration interval too long (p. 12) */
+ time_state = TIME_ERROR;
}
- if (time_freq > time_tolerance)
- time_freq = time_tolerance;
- else if (time_freq < -time_tolerance)
- time_freq = -time_tolerance;
+ if (time_freq > time_tolerance)
+ time_freq = time_tolerance;
+ else if (time_freq < -time_tolerance)
+ time_freq = -time_tolerance;
} /* STA_PLL || STA_PPSTIME */
- if (txc.modes & ADJ_TICK)
- tick = txc.tick;
-
+ } /* txc.modes & ADJ_OFFSET */
+ if (txc.modes & ADJ_TICK) {
+ /* if the quartz is off by more than 10% something is
+ VERY wrong ! */
+ if (txc.tick < 900000/HZ || txc.tick > 1100000/HZ) {
+ error = -EINVAL;
+ goto leave;
+ }
+ tick = txc.tick;
+ }
+ } /* txc.modes */
+leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0
+ || ((time_status & (STA_PPSFREQ|STA_PPSTIME)) != 0
+ && (time_status & STA_PPSSIGNAL) == 0)
+ /* p. 24, (b) */
+ || ((time_status & (STA_PPSTIME|STA_PPSJITTER))
+ == (STA_PPSTIME|STA_PPSJITTER))
+ /* p. 24, (c) */
+ || ((time_status & STA_PPSFREQ) != 0
+ && (time_status & (STA_PPSWANDER|STA_PPSERROR)) != 0))
+ /* p. 24, (d) */
+ time_state = TIME_ERROR;
+
+ if ((txc.modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT)
+ txc.offset = save_adjust;
+ else {
+ if (time_offset < 0)
+ txc.offset = -(-time_offset >> SHIFT_UPDATE);
+ else
+ txc.offset = time_offset >> SHIFT_UPDATE;
}
- txc.offset = save_adjust;
- txc.freq = time_freq;
+ txc.freq = time_freq + pps_freq;
txc.maxerror = time_maxerror;
txc.esterror = time_esterror;
txc.status = time_status;
txc.constant = time_constant;
txc.precision = time_precision;
txc.tolerance = time_tolerance;
- txc.time = xtime;
+ do_gettimeofday(&txc.time);
txc.tick = tick;
txc.ppsfreq = pps_freq;
- txc.jitter = pps_jitter;
+ txc.jitter = pps_jitter >> PPS_AVG;
txc.shift = pps_shift;
txc.stabil = pps_stabil;
txc.jitcnt = pps_jitcnt;
sti();
memcpy_tofs(txc_p, &txc, sizeof(struct timex));
- return time_state;
+ return(error < 0 ? error : time_state);
}
}
}
-int shrink_mmap(int priority, int dma, int can_do_io)
+int shrink_mmap(int priority, int dma, int free_buf)
{
static int clock = 0;
struct page * page;
}
/* is it a buffer cache page? */
- if (can_do_io && bh && try_to_free_buffer(bh, &bh, 6))
+ if (free_buf && bh && try_to_free_buffer(bh, &bh, 6))
return 1;
break;
pos += nr;
read += nr;
count -= nr;
- if (count)
+ if (count) {
+ /*
+ * to prevent hogging the CPU on well-cached systems,
+ * schedule if needed, it's safe to do it here:
+ */
+ if (need_resched)
+ schedule();
continue;
+ }
break;
}
}
{
static unsigned long last = 0;
- if (priority != GFP_BUFFER && (last + 10 * HZ < jiffies)) {
+ if (priority != GFP_BUFFER && priority != GFP_IO &&
+ (last + 10 * HZ < jiffies)) {
last = jiffies;
printk("Couldn't get a free page.....\n");
}
long freepages;
freepages = buffermem >> PAGE_SHIFT;
freepages += page_cache_size;
- freepages >>= 1;
+ if (freepages <= (MAP_NR(high_memory) >> 4) + 48)
+ freepages >>= 1;
freepages += nr_free_pages;
freepages += nr_swap_pages;
freepages -= MAP_NR(high_memory) >> 4;
reserved_pages = 5;
if (priority != GFP_NFS)
reserved_pages = min_free_pages;
+ if ((priority == GFP_BUFFER || priority == GFP_IO) && reserved_pages >= 48)
+ reserved_pages -= (12 + (reserved_pages>>3));
save_flags(flags);
repeat:
cli();
return 0;
}
restore_flags(flags);
- if (try_to_free_page(priority, dma, 1))
+ if (priority != GFP_BUFFER && try_to_free_page(priority, dma, 1))
goto repeat;
return 0;
}
/*
* select nr of pages we try to keep free for important stuff
- * with a minimum of 24 pages. This is totally arbitrary
+ * with a minimum of 48 pages. This is totally arbitrary
*/
i = (end_mem - PAGE_OFFSET) >> (PAGE_SHIFT+7);
if (i < 24)
i = 24;
+ i += 24; /* The limit for buffer pages in __get_free_pages is
+ * decreased by 12+(i>>3) */
min_free_pages = i;
free_pages_low = i + (i>>1);
free_pages_high = i + i;
can_do_io = 1;
if (wait)
stop = 0;
- if (priority == GFP_BUFFER)
+ if (priority == GFP_IO)
can_do_io = 0;
switch (state) {
do {
*/
int kswapd(void *unused)
{
- int i;
+ int i, j;
current->session = 1;
current->pgrp = 1;
swapstats.wakeups++;
/* Protect our reserved pages: */
i = 0;
- if (nr_free_pages <= min_free_pages)
- i = (1+min_free_pages) - nr_free_pages;
+ j = (min_free_pages >= 48 ? min_free_pages-12 : min_free_pages);
+ if (nr_free_pages <= j)
+ i = (1+j) - nr_free_pages;
/* Do the background pageout: */
for (i += kswapd_ctl.maxpages; i > 0; i--)
- try_to_free_page(GFP_KERNEL, 0,
- (nr_free_pages <= min_free_pages));
+ try_to_free_page(GFP_KERNEL, 0, (nr_free_pages <= j));
}
}
{
/* The buffer get won't block, or use the atomic queue. It does
produce annoying no free page messages still.... */
- skb = sock_wmalloc(sk, size, 0 , GFP_BUFFER);
+ skb = sock_wmalloc(sk, size, 0 , GFP_IO);
if(!skb)
skb=sock_wmalloc(sk, fallback, 0, GFP_KERNEL);
}
dev->pa_addr, skb->redirport, dev);
else
#endif
- sk = NULL;
+ sk = __tcp_v4_lookup(th, saddr, th->source, daddr, th->dest, dev);
/* this is not really correct: we should check sk->users */
if (sk && sk->state==TCP_LISTEN)
{