VERSION = 2
PATCHLEVEL = 1
-SUBLEVEL = 4
+SUBLEVEL = 5
ARCH = i386
lda $30,0x10($30)
ret ($26)
.end wrfpcr
-
-#define ex_count 0($16)
-#define ex_r9 8($16)
-#define ex_r10 16($16)
-#define ex_r11 24($16)
-#define ex_r12 32($16)
-#define ex_r13 40($16)
-#define ex_r14 48($16)
-#define ex_r15 56($16)
-#define ex_r26 64($16)
-#define ex_r30 72($16)
-
- .align 3
- .globl __exception
- .ent __exception
-__exception:
- ldq $1,ex_count
- bis $31,$31,$0 /* return 0 */
- addq $1,1,$2
- bne $1,1f /* don't save state if orig_count != 0 */
- stq $9,ex_r9
- stq $10,ex_r10
- stq $11,ex_r11
- stq $12,ex_r12
- stq $13,ex_r13
- stq $14,ex_r14
- stq $15,ex_r15
- stq $26,ex_r26
- stq $30,ex_r30
-1: stq $2,ex_count
- ret ($26)
- .end __exception
-
- .align 3
- .globl __handle_exception
- .ent __handle_exception
-__handle_exception:
- ldq $9,ex_r9
- ldq $10,ex_r10
- ldq $11,ex_r11
- ldq $12,ex_r12
- ldq $13,ex_r13
- ldq $14,ex_r14
- ldq $15,ex_r15
- ldq $26,ex_r26
- ldq $30,ex_r30
- bis $31,1,$0 /* return 1 */
- ret ($26)
- .end __handle_exception
dma_outb(0, DMA1_CLR_MASK_REG);
dma_outb(0, DMA2_CLR_MASK_REG);
#if NR_IRQS == 48
- *(unsigned int *)GRU_INT_MASK = ~(irq_mask >> 16); mb();/* invert */
+ *(unsigned int *)GRU_INT_MASK = ~(irq_mask >> 16); mb();/* invert */
+ *(unsigned int *)GRU_INT_EDGE = 0UL; mb();/* all are level */
+ *(unsigned int *)GRU_INT_HILO = 0x80000000UL; mb();/* ISA only HI */
+ *(unsigned int *)GRU_INT_CLEAR = 0UL; mb();/* all clear */
enable_irq(16 + 31); /* enable (E)ISA PIC cascade */
#elif NR_IRQS == 33
outl(irq_mask >> 16, 0x804);
/* $16-$18 are PAL-saved, and are offset by 19 entries */
if (reg >= 16 && reg <= 18)
reg += 19;
- switch (opcode) {
- case 0x28: /* ldl */
- *(reg+regs.regs) = get_unaligned((int *)va);
- return;
- case 0x29: /* ldq */
- *(reg+regs.regs) = get_unaligned((long *)va);
- return;
- case 0x2c: /* stl */
- put_unaligned(*(reg+regs.regs), (int *)va);
- return;
- case 0x2d: /* stq */
- put_unaligned(*(reg+regs.regs), (long *)va);
- return;
+
+ {
+ /* Set up an exception handler address just in case we are
+ handling an unaligned fixup within get_user(). Notice
+ that we do *not* change the exception count because we
+ only want to bounce possible exceptions on through. */
+
+ __label__ handle_ex;
+ register void *ex_vector __asm__("$28");
+ __asm__ __volatile__ ("" : "=r"(ex_vector) : "0"(&&handle_ex));
+
+ switch (opcode) {
+ case 0x28: /* ldl */
+ *(reg+regs.regs) = get_unaligned((int *)va);
+ return;
+ case 0x29: /* ldq */
+ *(reg+regs.regs) = get_unaligned((long *)va);
+ return;
+ case 0x2c: /* stl */
+ put_unaligned(*(reg+regs.regs), (int *)va);
+ return;
+ case 0x2d: /* stq */
+ put_unaligned(*(reg+regs.regs), (long *)va);
+ return;
+
+ /* We'll only get back here if we are handling a
+ valid exception. */
+ handle_ex:
+ (®s)->pc = *(28+regs.regs);
+ return;
+ }
}
printk("Bad unaligned kernel access at %016lx: %p %lx %ld\n",
regs.pc, va, opcode, reg);
reg_addr += (reg - 9);
break;
- case 16: case 17: case 18:
+ case 16: case 17: case 18:
/* a0-a2 in PAL frame */
reg_addr += 7 + 20 + 3 + (reg - 16);
break;
- case 19: case 20: case 21: case 22: case 23:
+ case 19: case 20: case 21: case 22: case 23:
case 24: case 25: case 26: case 27: case 28:
/* a3-at in SAVE_ALL frame */
reg_addr += 7 + 9 + (reg - 19);
case 0x23: /* ldt */
alpha_write_fp_reg(reg, get_unaligned((unsigned long *)va));
- break;
+ break;
case 0x27: /* stt */
put_unaligned(alpha_read_fp_reg(reg), (unsigned long *)va);
break;
if (opcode >= 0x28 && reg == 30 && dir == VERIFY_WRITE) {
wrusp(usp);
- }
+ }
}
/*
OBJS = __divqu.o __remqu.o __divlu.o __remlu.o memset.o memcpy.o io.o \
checksum.o csum_partial_copy.o strlen.o \
- get_user.o put_user.o copy_user.o
+ strcat.o strcpy.o strncat.o strncpy.o stxcpy.o stxncpy.o \
+ strchr.o strrchr.o \
+ copy_user.o clear_user.o strncpy_from_user.o
lib.a: $(OBJS)
$(AR) rcs lib.a $(OBJS)
--- /dev/null
+/*
+ * arch/alpha/lib/clear_user.S
+ * Contributed by Richard Henderson <rth@tamu.edu>
+ *
+ * Zero user space, handling exceptions as we go.
+ *
+ * We have to make sure that $0 is always up-to-date and contains the
+ * right "bytes left to zero" value (and that it is updated only _after_
+ * a successful copy). There is also some rather minor exception setup
+ * stuff.
+ *
+ * NOTE! This is not directly C-callable, because the calling semantics
+ * are different:
+ *
+ * Inputs:
+ * length in $0
+ * destination address in $6
+ * exception pointer in $7
+ * return address in $28 (exceptions expect it there)
+ *
+ * Outputs:
+ * bytes left to copy in $0
+ *
+ * Clobbers:
+ * $1,$2,$3,$4,$5,$6
+ */
+
+ .set noat
+ .set noreorder
+ .align 4
+
+ .globl __clear_user
+ .ent __clear_user
+ .frame $30, 0, $28
+ .prologue 0
+
+$loop:
+ and $1, 3, $4 # e0 :
+ beq $4, 1f # .. e1 :
+
+0: stq_u $31, 0($6) # e0 : zero one word
+ subq $0, 8, $0 # .. e1 :
+ subq $4, 1, $4 # e0 :
+ addq $6, 8, $6 # .. e1 :
+ bne $4, 0b # e1 :
+ unop # :
+
+1: bic $1, 3, $1 # e0 :
+ beq $1, $tail # .. e1 :
+
+2: stq_u $31, 0($6) # e0 : zero four words
+ subq $0, 8, $0 # .. e1 :
+ stq_u $31, 8($6) # e0 :
+ subq $0, 8, $0 # .. e1 :
+ stq_u $31, 16($6) # e0 :
+ subq $0, 8, $0 # .. e1 :
+ stq_u $31, 24($6) # e0 :
+ subq $0, 8, $0 # .. e1 :
+ subq $1, 4, $1 # e0 :
+ addq $6, 32, $6 # .. e1 :
+ bne $1, 2b # e1 :
+
+$tail:
+ bne $2, 1f # e1 : is there a tail to do?
+
+ stq $3, 0($7) # e0 : decrement exception count
+ ret $31, ($28), 1 # .. e1 :
+
+1: ldq_u $5, 0($6) # e1 :
+ mskqh $5, $0, $5 # e0 :
+ stq_u $5, 0($6) # e0 :
+ clr $0 # .. e1 :
+ stq $3, 0($7) # e0 : decrement exception count
+ ret $31, ($28), 1 # .. e1 :
+
+__clear_user:
+ ldq $3, 0($7) # e0 : load exception count for increment
+ beq $0, $zerolength # .. e1 :
+ and $6, 7, $4 # e0 : find dest misalignment
+ addq $0, $4, $1 # e1 : bias counter
+ addq $3, 1, $5 # e0 :
+ and $1, 7, $2 # .. e1 : number of bytes in tail
+ srl $1, 3, $1 # e0 :
+ unop # :
+ stq $5, 0($7) # e0 : increment exception count
+ beq $4, $loop # .. e1 :
+
+ ldq_u $5, 0($6) # e0 : load dst word to mask back in
+ beq $1, $oneword # .. e1 : sub-word store?
+
+ mskql $5, $6, $5 # e0 : take care of misaligned head
+ addq $6, 8, $6 # .. e1 :
+ stq_u $5, -8($6) # e0 :
+ addq $0, $4, $0 # .. e1 : bytes left -= 8 - misalignment
+ subq $1, 1, $1 # e0 :
+ subq $0, 8, $0 # .. e1 :
+ br $loop # e1 :
+ unop # :
+
+$oneword:
+ mskql $5, $6, $4 # e0 :
+ mskqh $5, $2, $5 # e0 :
+ or $5, $4, $5 # e1 :
+ stq_u $5, 0($6) # e0 :
+ clr $0 # .. e1 :
+ stq $3, 0($7) # e0 : decrement exception count
+
+$zerolength:
+ ret $31, ($28), 1 # .. e1 :
+
+ .end __clear_user
+++ /dev/null
-/*
- * arch/alpha/lib/get_user.S
- *
- * (C) Copyright 1996 Linus Torvalds
- */
-
-/*
- * This does simple reads from user mode, returning zero for
- * success and -EINVAL for a fault. Note that we may NOT do
- * unaligned accesses, because the unaligned fault handler
- * must not take the exception..
- *
- * NOTE! These are NOT normal function calls callable from C.
- * As we have two return values (the actual value gotten from
- * user space, and the error return value) the calling sequence
- * is different.
- *
- * Input:
- * user address in $2
- * exception structure address in $3
- * return address in $28 (exceptions expect it there)
- *
- * Output:
- * error number in $0
- * actual result in $1
- *
- * Clobbers:
- * $4,$5
- */
- .set noat
- .align 3
- .globl __get_user_8
- .ent __get_user_8
-__get_user_8:
- ldq $4,0($3)
- lda $0,-14
- addq $4,1,$5
- stq $5,0($3)
- ldq_u $1,0($2)
- stq $4,0($3)
- bis $31,$31,$0
- extbl $1,$2,$1
- ret $31,($28),1
- .end __get_user_8
-
- .align 3
- .globl __get_user_16
- .ent __get_user_16
-__get_user_16:
- ldq $4,0($3)
- lda $0,-14
- addq $4,1,$5
- stq $5,0($3)
- ldq_u $1,0($2)
- ldq_u $5,1($2)
- stq $4,0($3)
- extwl $1,$2,$1
- bis $31,$31,$0
- extwh $5,$2,$5
- bis $1,$5,$1
- ret $31,($28),1
- .end __get_user_16
-
- .align 3
- .globl __get_user_32
- .ent __get_user_32
-__get_user_32:
- ldq $4,0($3)
- lda $0,-14
- addq $4,1,$5
- stq $5,0($3)
- ldq_u $1,0($2)
- ldq_u $5,3($2)
- stq $4,0($3)
- extll $1,$2,$1
- bis $31,$31,$0
- extlh $5,$2,$5
- bis $1,$5,$1
- ret $31,($28),1
- .end __get_user_32
-
- .align 3
- .globl __get_user_64
- .ent __get_user_64
-__get_user_64:
- ldq $4,0($3)
- lda $0,-14
- addq $4,1,$5
- stq $5,0($3)
- ldq_u $1,0($2)
- ldq_u $5,7($2)
- stq $4,0($3)
- extql $1,$2,$1
- bis $31,$31,$0
- extqh $5,$2,$5
- bis $1,$5,$1
- ret $31,($28),1
- .end __get_user_64
+++ /dev/null
-/*
- * arch/alpha/lib/put_user.S
- *
- * (C) Copyright 1996 Linus Torvalds
- */
-
-/*
- * This does simple writes to user mode, returning zero for
- * success and -EINVAL for a fault. Note that we may NOT do
- * unaligned accesses, because the unaligned fault handler
- * must not take the exception..
- *
- * NOTE! These are NOT normal function calls callable from C.
- * As we have two return values (the actual value gotten from
- * user space, and the error return value) the calling sequence
- * is different.
- *
- * Input:
- * value to be written in $6
- * user address in $7
- * exception pointer in $8
- * return address in $28 (exceptions expect it there)
- * Output:
- * return value in $0
- * Clobbers:
- * $1,$2,$3,$4,$5,$6
- */
- .set noat
- .align 3
- .globl __put_user_8
- .ent __put_user_8
-__put_user_8:
- ldq $2,0($8)
- lda $0,-14
- addq $2,1,$1
- stq $1,0($8)
- ldq_u $1,0($7)
- insbl $6,$7,$6
- mskbl $1,$7,$1
- bis $6,$1,$6
- stq_u $6,0($7)
- stq $2,0($8)
- bis $31,$31,$0
- ret $31,($28),1
- .end __put_user_8
-
- .align 3
- .globl __put_user_16
- .ent __put_user_16
-__put_user_16:
- ldq $2,0($8)
- lda $0,-14
- addq $2,1,$1
- stq $1,0($8)
- ldq_u $4,1($7)
- ldq_u $5,0($7)
- inswh $6,$7,$1
- inswl $6,$7,$3
- mskwh $4,$7,$4
- mskwl $5,$7,$5
- bis $4,$1,$4
- bis $5,$3,$5
- stq_u $4,1($7)
- stq_u $5,0($7)
- stq $2,0($8)
- bis $31,$31,$0
- ret $31,($28),1
- .end __put_user_16
-
- .align 3
- .globl __put_user_32
- .ent __put_user_32
-__put_user_32:
- ldq $5,0($8)
- lda $0,-14
- and $7,3,$2
- addq $5,1,$1
- stq $1,0($8)
- bne $2,__una32
- stl $6,0($7)
- stq $5,0($8)
- bis $31,$31,$0
- ret $31,($28),1
- .align 4
-__una32:
- ldq_u $3,3($7)
- ldq_u $4,0($7)
- insll $6,$7,$2
- inslh $6,$7,$1
- msklh $3,$7,$3
- mskll $4,$7,$4
- bis $3,$1,$3
- bis $4,$2,$4
- stq_u $3,3($7)
- stq_u $4,0($7)
- stq $5,0($8)
- bis $31,$31,$0
- ret $31,($28),1
- .end __put_user_32
-
- .align 3
- .globl __put_user_64
- .ent __put_user_64
-__put_user_64:
- ldq $5,0($8)
- lda $0,-14
- and $7,7,$2
- addq $5,1,$1
- stq $1,0($8)
- bne $2,__una64
- stq $6,0($7)
- stq $5,0($8)
- bis $31,$31,$0
- ret $31,($28),1
- .align 4
-__una64:
- ldq_u $4,0($7)
- ldq_u $3,8($7)
- insql $6,$7,$2
- insqh $6,$7,$1
- mskql $4,$7,$4
- mskqh $3,$7,$3
- bis $4,$2,$4
- bis $3,$1,$3
- stq_u $4,0($7)
- stq_u $3,8($7)
- stq $5,0($8)
- bis $31,$31,$0
- ret $31,($28),1
- .end __put_user_64
--- /dev/null
+/*
+ * arch/alpha/lib/strcat.S
+ * Contributed by Richard Henderson (rth@tamu.edu)
+ *
+ * Append a null-terminated string from SRC to DST.
+ */
+
+ .text
+
+ .align 3
+ .globl strcat
+ .ent strcat
+strcat:
+ .frame $30, 0, $26
+ .prologue 0
+
+ mov $16, $0 # set up return value
+
+ /* Find the end of the string. */
+
+ ldq_u $1, 0($16) # load first quadword (a0 may be misaligned)
+ lda $2, -1
+ insqh $2, $16, $2
+ andnot $16, 7, $16
+ or $2, $1, $1
+ cmpbge $31, $1, $2 # bits set iff byte == 0
+ bne $2, $found
+
+$loop: ldq $1, 8($16)
+ addq $16, 8, $16
+ cmpbge $31, $1, $2
+ beq $2, $loop
+
+$found: negq $2, $3 # clear all but least set bit
+ and $2, $3, $2
+
+ and $2, 0xf0, $3 # binary search for that set bit
+ and $2, 0xcc, $4
+ and $2, 0xaa, $5
+ cmovne $3, 4, $3
+ cmovne $4, 2, $4
+ cmovne $5, 1, $5
+ addq $3, $4, $3
+ addq $16, $5, $16
+ addq $16, $3, $16
+
+ /* Now do the append. */
+
+ mov $26, $23
+ br __stxcpy
+
+ .end strcat
--- /dev/null
+/*
+ * arch/alpha/lib/strchr.S
+ * Contributed by Richard Henderson (rth@tamu.edu)
+ *
+ * Return the address of a given character within a null-terminated
+ * string, or null if it is not found.
+ */
+
+#include <alpha/regdef.h>
+
+ .set noreorder
+ .set noat
+
+ .align 3
+ .globl strchr
+ .ent strchr
+strchr:
+ .frame sp, 0, ra
+ .prologue 0
+
+ zapnot a1, 1, a1 # e0 : zero extend the search character
+ ldq_u t0, 0(a0) # .. e1 : load first quadword
+ sll a1, 8, t5 # e0 : replicate the search character
+ andnot a0, 7, v0 # .. e1 : align our loop pointer
+ or t5, a1, a1 # e0 :
+ lda t4, -1 # .. e1 : build garbage mask
+ sll a1, 16, t5 # e0 :
+ cmpbge zero, t0, t2 # .. e1 : bits set iff byte == zero
+ mskqh t4, a0, t4 # e0 :
+ or t5, a1, a1 # .. e1 :
+ sll a1, 32, t5 # e0 :
+ cmpbge zero, t4, t4 # .. e1 : bits set iff byte is garbage
+ or t5, a1, a1 # e0 :
+ xor t0, a1, t1 # .. e1 : make bytes == c zero
+ cmpbge zero, t1, t3 # e0 : bits set iff byte == c
+ or t2, t3, t0 # e1 : bits set iff char match or zero match
+ andnot t0, t4, t0 # e0 : clear garbage bits
+ bne t0, $found # .. e1 (zdb)
+
+$loop: ldq t0, 8(v0) # e0 :
+ addq v0, 8, v0 # .. e1 :
+ nop # e0 :
+ xor t0, a1, t1 # .. e1 (ev5 data stall)
+ cmpbge zero, t0, t2 # e0 : bits set iff byte == 0
+ cmpbge zero, t1, t3 # .. e1 : bits set iff byte == c
+ or t2, t3, t0 # e0 :
+ beq t0, $loop # .. e1 (zdb)
+
+$found: negq t0, t1 # e0 : clear all but least set bit
+ and t0, t1, t0 # e1 (stall)
+
+ and t0, t3, t1 # e0 : bit set iff byte was the char
+ beq t1, $retnull # .. e1 (zdb)
+
+ and t0, 0xf0, t2 # e0 : binary search for that set bit
+ and t0, 0xcc, t3 # .. e1 :
+ and t0, 0xaa, t4 # e0 :
+ cmovne t2, 4, t2 # .. e1 :
+ cmovne t3, 2, t3 # e0 :
+ cmovne t4, 1, t4 # .. e1 :
+ addq t2, t3, t2 # e0 :
+ addq v0, t4, v0 # .. e1 :
+ addq v0, t2, v0 # e0 :
+ ret # .. e1 :
+
+$retnull:
+ mov zero, v0 # e0 :
+ ret # .. e1 :
+
+ .end strchr
--- /dev/null
+/*
+ * arch/alpha/lib/strcpy.S
+ * Contributed by Richard Henderson (rth@tamu.edu)
+ *
+ * Copy a null-terminated string from SRC to DST. Return a pointer
+ * to the null-terminator in the source.
+ */
+
+ .text
+
+ .align 3
+ .globl strcpy
+ .ent strcpy
+strcpy:
+ .frame $30, 0, $26
+ .prologue 0
+
+ mov $16, $0 # set up return value
+ mov $26, $23 # set up return address
+ br __stxcpy # do the copy
+
+ .end strcpy
--- /dev/null
+/*
+ * arch/alpha/lib/strncat.S
+ * Contributed by Richard Henderson (rth@tamu.edu)
+ *
+ * Append no more than COUNT characters from the null-terminated string SRC
+ * to the null-terminated string DST. Always null-terminate the new DST.
+ *
+ * This differs slightly from the semantics in libc in that we never write
+ * past count, whereas libc may write to count+1. This follows the generic
+ * implementation in lib/string.c and is, IMHO, more sensible.
+ */
+
+ .text
+
+ .align 3
+ .globl strncat
+ .ent strncat
+strncat:
+ .frame $30, 0, $26
+ .prologue 0
+
+ mov $16, $0 # set up return value
+ beq $18, $zerocount
+
+ /* Find the end of the string. */
+
+ ldq_u $1, 0($16) # load first quadword ($16 may be misaligned)
+ lda $2, -1($31)
+ insqh $2, $16, $2
+ andnot $16, 7, $16
+ or $2, $1, $1
+ cmpbge $31, $1, $2 # bits set iff byte == 0
+ bne $2, $found
+
+$loop: ldq $1, 8($16)
+ addq $16, 8, $16
+ cmpbge $31, $1, $2
+ beq $2, $loop
+
+$found: negq $2, $3 # clear all but least set bit
+ and $2, $3, $2
+
+ and $2, 0xf0, $3 # binary search for that set bit
+ and $2, 0xcc, $4
+ and $2, 0xaa, $5
+ cmovne $3, 4, $3
+ cmovne $4, 2, $4
+ cmovne $5, 1, $5
+ addq $3, $4, $3
+ addq $16, $5, $16
+ addq $16, $3, $16
+
+ /* Now do the append. */
+
+ bsr $23, __stxncpy
+
+ /* Worry about the null termination. */
+
+ zapnot $1, $22, $2 # was last byte a null?
+ bne $2, 0f
+ ret
+
+0: cmplt $22, $24, $2 # did we fill the buffer completely?
+ or $2, $18, $2
+ bne $2, 2f
+
+ and $24, 0x80, $2 # no zero next byte
+ bne $2, 1f
+
+ /* Here there are bytes left in the current word. Clear one. */
+ addq $24, $24, $24 # end-of-count bit <<= 1
+2: zap $1, $24, $1
+ stq_u $1, 0($16)
+ ret
+
+1: /* Here we must read the next DST word and clear the first byte. */
+ ldq_u $1, 8($16)
+ zap $1, 1, $1
+ stq_u $1, 8($16)
+
+$zerocount:
+ ret
+
+ .end strncat
--- /dev/null
+/*
+ * arch/alpha/lib/strncpy.S
+ * Contributed by Richard Henderson (rth@tamu.edu)
+ *
+ * Copy no more than COUNT bytes of the null-terminated string from
+ * SRC to DST. If SRC does not cover all of COUNT, the balance is
+ * zeroed.
+ *
+ * Or, rather, if the kernel cared about that weird ANSI quirk. This
+ * version has cropped that bit o' nastiness as well as assuming that
+ * __stxncpy is in range of a branch.
+ */
+
+ .set noat
+ .set noreorder
+
+ .text
+
+ .align 3
+ .globl strncpy
+ .ent strncpy
+strncpy:
+ .frame $30, 0, $26
+ .prologue 0
+
+ mov $16, $0 # set return value now
+ beq $18, 0f
+ mov $26, $23 # set return address
+ br __stxncpy # do the work of the copy
+0: ret
+
+ .end strncpy
--- /dev/null
+/*
+ * arch/alpha/lib/strncpy_from_user.S
+ * Contributed by Richard Henderson (rth@tamu.edu)
+ *
+ * Just like strncpy except in the return value:
+ *
+ * -EFAULT if an exception occurs before the terminator is copied.
+ * N if the buffer filled.
+ *
+ * Otherwise the length of the string is returned.
+ *
+ * Additionally, the fourth argument should be `¤t->tss.ex'.
+ */
+
+#include <asm/errno.h>
+
+ .set noat
+ .set noreorder
+
+ .text
+
+ .align 3
+ .globl __strncpy_from_user
+ .ent __strncpy_from_user
+
+__strncpy_from_user:
+ .frame $30, 0, $26
+ .prologue 0
+
+ ldq $20, 0($19)
+ beq $18, 9f
+ br $28, 1f # set up exception return address
+
+ lda $0, -EFAULT
+ ret
+
+1: addq $20, 1, $21
+ mov $16, $0 # save the string start
+ stq $21, 0($19) # increment exception count
+ bsr $23, __stxncpy # do the work of the copy
+
+ zapnot $1, $22, $5 # was last byte written null?
+ stq $20, 0($19) # decrement exception count
+ cmovne $5, 1, $5
+
+ and $22, 0xf0, $4 # binary search for the address of the
+ and $22, 0xcc, $3 # last byte written
+ and $22, 0xaa, $2
+ bic $16, 7, $1
+ cmovne $4, 4, $4
+ cmovne $3, 2, $3
+ cmovne $2, 1, $2
+ addq $1, $4, $1
+ addq $2, $3, $2
+ addq $1, $2, $1
+ addq $1, $5, $1 # add one if we filled the buffer
+
+ subq $1, $0, $0 # find string length
+ ret
+
+9: clr $0
+ ret
+
+ .end __strncpy_from_user
--- /dev/null
+/*
+ * arch/alpha/lib/strrchr.S
+ * Contributed by Richard Henderson (rth@tamu.edu)
+ *
+ * Return the address of the last occurrance of a given character
+ * within a null-terminated string, or null if it is not found.
+ */
+
+#include <alpha/regdef.h>
+
+ .set noreorder
+ .set noat
+
+ .align 3
+ .ent strrchr
+ .globl strrchr
+strrchr:
+ .frame sp, 0, ra
+ .prologue 0
+
+ zapnot a1, 1, a1 # e0 : zero extend our test character
+ mov zero, t6 # .. e1 : t6 is last match aligned addr
+ sll a1, 8, t5 # e0 : replicate our test character
+ mov zero, t7 # .. e1 : t7 is last match byte compare mask
+ or t5, a1, a1 # e0 :
+ ldq_u t0, 0(a0) # .. e1 : load first quadword
+ sll a1, 16, t5 # e0 :
+ andnot a0, 7, v0 # .. e1 : align source addr
+ or t5, a1, a1 # e0 :
+ lda t4, -1 # .. e1 : build garbage mask
+ sll a1, 32, t5 # e0 :
+ cmpbge zero, t0, t1 # .. e1 : bits set iff byte == zero
+ mskqh t4, a0, t4 # e0 :
+ or t5, a1, a1 # .. e1 : character replication complete
+ xor t0, a1, t2 # e0 : make bytes == c zero
+ cmpbge zero, t4, t4 # .. e1 : bits set iff byte is garbage
+ cmpbge zero, t2, t3 # e0 : bits set iff byte == c
+ andnot t1, t4, t1 # .. e1 : clear garbage from null test
+ andnot t3, t4, t3 # e0 : clear garbage from char test
+ bne t1, $eos # .. e1 : did we already hit the terminator?
+
+ /* Character search main loop */
+$loop:
+ ldq t0, 8(v0) # e0 : load next quadword
+ cmovne t3, v0, t6 # .. e1 : save previous comparisons match
+ cmovne t3, t3, t7 # e0 :
+ addq v0, 8, v0 # .. e1 :
+ xor t0, a1, t2 # e0 :
+ cmpbge zero, t0, t1 # .. e1 : bits set iff byte == zero
+ cmpbge zero, t2, t3 # e0 : bits set iff byte == c
+ beq t1, $loop # .. e1 : if we havnt seen a null, loop
+
+ /* Mask out character matches after terminator */
+$eos:
+ negq t1, t4 # e0 : isolate first null byte match
+ and t1, t4, t4 # e1 :
+ subq t4, 1, t5 # e0 : build a mask of the bytes upto...
+ or t4, t5, t4 # e1 : ... and including the null
+
+ and t3, t4, t3 # e0 : mask out char matches after null
+ cmovne t3, t3, t7 # .. e1 : save it, if match found
+ cmovne t3, v0, t6 # e0 :
+
+ /* Locate the address of the last matched character */
+
+ /* Retain the early exit for the ev4 -- the ev5 mispredict penalty
+ is 5 cycles -- the same as just falling through. */
+ beq t7, $retnull # .. e1 :
+
+ and t7, 0xf0, t2 # e0 : binary search for the high bit set
+ cmovne t2, t2, t7 # .. e1 (zdb)
+ cmovne t2, 4, t2 # e0 :
+ and t7, 0xcc, t1 # .. e1 :
+ cmovne t1, t1, t7 # e0 :
+ cmovne t1, 2, t1 # .. e1 :
+ and t7, 0xaa, t0 # e0 :
+ cmovne t0, 1, t0 # .. e1 (zdb)
+ addq t2, t1, t1 # e0 :
+ addq t6, t0, v0 # .. e1 : add our aligned base ptr to the mix
+ addq v0, t1, v0 # e0 :
+ ret # .. e1 :
+
+$retnull:
+ mov zero, v0 # e0 :
+ ret # .. e1 :
+
+ .end strrchr
--- /dev/null
+/* stxcpy.S
+ * Contributed by Richard Henderson (rth@tamu.edu)
+ *
+ * Copy a null-terminated string from SRC to DST.
+ *
+ * This is an internal routine used by strcpy, stpcpy, and strcat.
+ * As such, it uses special linkage conventions to make implementation
+ * of these public functions more efficient.
+ *
+ * On input:
+ * t9 = return address
+ * a0 = DST
+ * a1 = SRC
+ *
+ * On output:
+ * t8 = bitmask (with one bit set) indicating the last byte written
+ * a0 = unaligned address of the last *word* written
+ *
+ * Furthermore, v0, a3-a5, t11, and t12 are untouched.
+ */
+
+#include <alpha/regdef.h>
+
+ .set noat
+ .set noreorder
+
+ .text
+
+/* There is a problem with either gdb (as of 4.16) or gas (as of 2.7) that
+ doesn't like putting the entry point for a procedure somewhere in the
+ middle of the procedure descriptor. Work around this by putting the
+ aligned copy in its own procedure descriptor */
+
+ .ent stxcpy_aligned
+ .align 3
+stxcpy_aligned:
+ .frame sp, 0, t9
+ .prologue 0
+
+ /* On entry to this basic block:
+ t0 == the first destination word for masking back in
+ t1 == the first source word. */
+
+ /* Create the 1st output word and detect 0's in the 1st input word. */
+ lda t2, -1 # e1 : build a mask against false zero
+ mskqh t2, a1, t2 # e0 : detection in the src word
+ mskqh t1, a1, t3 # e0 :
+ ornot t1, t2, t2 # .. e1 :
+ mskql t0, a1, t0 # e0 : assemble the first output word
+ cmpbge zero, t2, t7 # .. e1 : bits set iff null found
+ or t0, t3, t1 # e0 :
+ bne t7, $a_eos # .. e1 :
+
+ /* On entry to this basic block:
+ t0 == the first destination word for masking back in
+ t1 == a source word not containing a null. */
+
+$a_loop:
+ stq_u t1, 0(a0) # e0 :
+ addq a0, 8, a0 # .. e1 :
+ ldq_u t1, 0(a1) # e0 :
+ addq a1, 8, a1 # .. e1 :
+ cmpbge zero, t1, t7 # e0 (stall)
+ beq t7, $a_loop # .. e1 (zdb)
+
+ /* Take care of the final (partial) word store.
+ On entry to this basic block we have:
+ t1 == the source word containing the null
+ t7 == the cmpbge mask that found it. */
+$a_eos:
+ negq t7, t6 # e0 : find low bit set
+ and t7, t6, t8 # e1 (stall)
+
+ /* For the sake of the cache, don't read a destination word
+ if we're not going to need it. */
+ and t8, 0x80, t6 # e0 :
+ bne t6, 1f # .. e1 (zdb)
+
+ /* We're doing a partial word store and so need to combine
+ our source and original destination words. */
+ ldq_u t0, 0(a0) # e0 :
+ subq t8, 1, t6 # .. e1 :
+ zapnot t1, t6, t1 # e0 : clear src bytes >= null
+ or t8, t6, t7 # .. e1 :
+ zap t0, t7, t0 # e0 : clear dst bytes <= null
+ or t0, t1, t1 # e1 :
+
+1: stq_u t1, 0(a0) # e0 :
+ ret (t9) # .. e1 :
+
+ .end stxcpy_aligned
+
+ .align 3
+ .ent __stxcpy
+ .globl __stxcpy
+__stxcpy:
+ .frame sp, 0, t9
+ .prologue 0
+
+ /* Are source and destination co-aligned? */
+ xor a0, a1, t0 # e0 :
+ unop # :
+ and t0, 7, t0 # e0 :
+ bne t0, $unaligned # .. e1 :
+
+ /* We are co-aligned; take care of a partial first word. */
+ ldq_u t1, 0(a1) # e0 : load first src word
+ and a0, 7, t0 # .. e1 : take care not to load a word ...
+ addq a1, 8, a1 # e0 :
+ beq t0, stxcpy_aligned # .. e1 : ... if we wont need it
+ ldq_u t0, 0(a0) # e0 :
+ br stxcpy_aligned # .. e1 :
+
+
+/* The source and destination are not co-aligned. Align the destination
+ and cope. We have to be very careful about not reading too much and
+ causing a SEGV. */
+
+ .align 3
+$u_head:
+ /* We know just enough now to be able to assemble the first
+ full source word. We can still find a zero at the end of it
+ that prevents us from outputting the whole thing.
+
+ On entry to this basic block:
+ t0 == the first dest word, for masking back in, if needed else 0
+ t1 == the low bits of the first source word
+ t6 == bytemask that is -1 in dest word bytes */
+
+ ldq_u t2, 8(a1) # e0 :
+ addq a1, 8, a1 # .. e1 :
+
+ extql t1, a1, t1 # e0 :
+ extqh t2, a1, t4 # e0 :
+ mskql t0, a0, t0 # e0 :
+ or t1, t4, t1 # .. e1 :
+ mskqh t1, a0, t1 # e0 :
+ or t0, t1, t1 # e1 :
+
+ or t1, t6, t6 # e0 :
+ cmpbge zero, t6, t7 # .. e1 :
+ lda t6, -1 # e0 : for masking just below
+ bne t7, $u_final # .. e1 :
+
+ mskql t6, a1, t6 # e0 : mask out the bits we have
+ or t6, t2, t2 # e1 : already extracted before
+ cmpbge zero, t2, t7 # e0 : testing eos
+ bne t7, $u_late_head_exit # .. e1 (zdb)
+
+ /* Finally, we've got all the stupid leading edge cases taken care
+ of and we can set up to enter the main loop. */
+
+ stq_u t1, 0(a0) # e0 : store first output word
+ addq a0, 8, a0 # .. e1 :
+ extql t2, a1, t0 # e0 : position ho-bits of lo word
+ ldq_u t2, 8(a1) # .. e1 : read next high-order source word
+ addq a1, 8, a1 # e0 :
+ cmpbge zero, t2, t7 # .. e1 :
+ nop # e0 :
+ bne t7, $u_eos # .. e1 :
+
+ /* Unaligned copy main loop. In order to avoid reading too much,
+ the loop is structured to detect zeros in aligned source words.
+ This has, unfortunately, effectively pulled half of a loop
+ iteration out into the head and half into the tail, but it does
+ prevent nastiness from accumulating in the very thing we want
+ to run as fast as possible.
+
+ On entry to this basic block:
+ t0 == the shifted high-order bits from the previous source word
+ t2 == the unshifted current source word
+
+ We further know that t2 does not contain a null terminator. */
+
+ .align 3
+$u_loop:
+ extqh t2, a1, t1 # e0 : extract high bits for current word
+ addq a1, 8, a1 # .. e1 :
+ extql t2, a1, t3 # e0 : extract low bits for next time
+ addq a0, 8, a0 # .. e1 :
+ or t0, t1, t1 # e0 : current dst word now complete
+ ldq_u t2, 0(a1) # .. e1 : load high word for next time
+ stq_u t1, -8(a0) # e0 : save the current word
+ mov t3, t0 # .. e1 :
+ cmpbge zero, t2, t7 # e0 : test new word for eos
+ beq t7, $u_loop # .. e1 :
+
+ /* We've found a zero somewhere in the source word we just read.
+ If it resides in the lower half, we have one (probably partial)
+ word to write out, and if it resides in the upper half, we
+ have one full and one partial word left to write out.
+
+ On entry to this basic block:
+ t0 == the shifted high-order bits from the previous source word
+ t2 == the unshifted current source word. */
+$u_eos:
+ extqh t2, a1, t1 # e0 :
+ or t0, t1, t1 # e1 : first (partial) source word complete
+
+ cmpbge zero, t1, t7 # e0 : is the null in this first bit?
+ bne t7, $u_final # .. e1 (zdb)
+
+$u_late_head_exit:
+ stq_u t1, 0(a0) # e0 : the null was in the high-order bits
+ addq a0, 8, a0 # .. e1 :
+ extql t2, a1, t1 # e0 :
+ cmpbge zero, t1, t7 # .. e1 :
+
+ /* Take care of a final (probably partial) result word.
+ On entry to this basic block:
+ t1 == assembled source word
+ t7 == cmpbge mask that found the null. */
+$u_final:
+ negq t7, t6 # e0 : isolate low bit set
+ and t6, t7, t8 # e1 :
+
+ and t8, 0x80, t6 # e0 : avoid dest word load if we can
+ bne t6, 1f # .. e1 (zdb)
+
+ ldq_u t0, 0(a0) # e0 :
+ subq t8, 1, t6 # .. e1 :
+ or t6, t8, t7 # e0 :
+ zapnot t1, t6, t1 # .. e1 : kill source bytes >= null
+ zap t0, t7, t0 # e0 : kill dest bytes <= null
+ or t0, t1, t1 # e1 :
+
+1: stq_u t1, 0(a0) # e0 :
+ ret (t9) # .. e1 :
+
+ /* Unaligned copy entry point. */
+ .align 3
+$unaligned:
+
+ ldq_u t1, 0(a1) # e0 : load first source word
+
+ and a0, 7, t4 # .. e1 : find dest misalignment
+ and a1, 7, t5 # e0 : find src misalignment
+
+ /* Conditionally load the first destination word and a bytemask
+ with 0xff indicating that the destination byte is sacrosanct. */
+
+ mov zero, t0 # .. e1 :
+ mov zero, t6 # e0 :
+ beq t4, 1f # .. e1 :
+ ldq_u t0, 0(a0) # e0 :
+ lda t6, -1 # .. e1 :
+ mskql t6, a0, t6 # e0 :
+1:
+ subq a1, t4, a1 # .. e1 : sub dest misalignment from src addr
+
+ /* If source misalignment is larger than dest misalignment, we need
+ extra startup checks to avoid SEGV. */
+
+ cmplt t4, t5, t8 # e0 :
+ beq t8, $u_head # .. e1 (zdb)
+
+ lda t2, -1 # e1 : mask out leading garbage in source
+ mskqh t2, t5, t2 # e0 :
+ nop # e0 :
+ ornot t1, t2, t3 # .. e1 :
+ cmpbge zero, t3, t7 # e0 : is there a zero?
+ beq t7, $u_head # .. e1 (zdb)
+
+ /* At this point we've found a zero in the first partial word of
+ the source. We need to isolate the valid source data and mask
+ it into the original destination data. (Incidentally, we know
+ that we'll need at least one byte of that original dest word.) */
+
+ ldq_u t0, 0(a0) # e0 :
+
+ negq t7, t6 # .. e1 : build bitmask of bytes <= zero
+ and t6, t7, t8 # e0 :
+ and a1, 7, t5 # .. e1 :
+ subq t8, 1, t6 # e0 :
+ or t6, t8, t7 # e1 :
+ srl t8, t5, t8 # e0 : adjust final null return value
+
+ zapnot t2, t7, t2 # .. e1 : prepare source word; mirror changes
+ and t1, t2, t1 # e1 : to source validity mask
+ extql t2, a1, t2 # .. e0 :
+ extql t1, a1, t1 # e0 :
+
+ andnot t0, t2, t0 # .. e1 : zero place for source to reside
+ or t0, t1, t1 # e1 : and put it there
+ stq_u t1, 0(a0) # .. e0 :
+ ret (t9) # e1 :
+
+ .end __stxcpy
--- /dev/null
+/* stxncpy.S
+ * Contributed by Richard Henderson (rth@tamu.edu)
+ *
+ * Copy no more than COUNT bytes of the null-terminated string from
+ * SRC to DST.
+ *
+ * This is an internal routine used by strncpy, stpncpy, and strncat.
+ * As such, it uses special linkage conventions to make implementation
+ * of these public functions more efficient.
+ *
+ * On input:
+ * t9 = return address
+ * a0 = DST
+ * a1 = SRC
+ * a2 = COUNT
+ *
+ * Furthermore, COUNT may not be zero.
+ *
+ * On output:
+ * t0 = last word written
+ * t8 = bitmask (with one bit set) indicating the last byte written
+ * t10 = bitmask (with one bit set) indicating the byte position of
+ * the end of the range specified by COUNT
+ * a0 = unaligned address of the last *word* written
+ * a2 = the number of full words left in COUNT
+ *
+ * Furthermore, v0, a3-a5, t11, t12, and $at are untouched.
+ */
+
+#include <alpha/regdef.h>
+
+ .set noat
+ .set noreorder
+
+ .text
+
+/* There is a problem with either gdb (as of 4.16) or gas (as of 2.7) that
+ doesn't like putting the entry point for a procedure somewhere in the
+ middle of the procedure descriptor. Work around this by putting the
+ aligned copy in its own procedure descriptor */
+
+ .ent stxncpy_aligned
+ .align 3
+stxncpy_aligned:
+ .frame sp, 0, t9, 0
+ .prologue 0
+
+ /* On entry to this basic block:
+ t0 == the first destination word for masking back in
+ t1 == the first source word. */
+
+ /* Create the 1st output word and detect 0's in the 1st input word. */
+ lda t2, -1 # e1 : build a mask against false zero
+ mskqh t2, a1, t2 # e0 : detection in the src word
+ mskqh t1, a1, t3 # e0 :
+ ornot t1, t2, t2 # .. e1 :
+ mskql t0, a1, t0 # e0 : assemble the first output word
+ cmpbge zero, t2, t7 # .. e1 : bits set iff null found
+ or t0, t3, t0 # e0 :
+ beq a2, $a_eoc # .. e1 :
+ bne t7, $a_eos # .. e1 :
+
+ /* On entry to this basic block:
+ t0 == a source word not containing a null. */
+
+$a_loop:
+ stq_u t0, 0(a0) # e0 :
+ addq a0, 8, a0 # .. e1 :
+ ldq_u t0, 0(a1) # e0 :
+ addq a1, 8, a1 # .. e1 :
+ subq a2, 1, a2 # e0 :
+ cmpbge zero, t0, t7 # .. e1 (stall)
+ beq a2, $a_eoc # e1 :
+ beq t7, $a_loop # e1 :
+
+ /* Take care of the final (partial) word store. At this point
+ the end-of-count bit is set in t7 iff it applies.
+
+ On entry to this basic block we have:
+ t0 == the source word containing the null
+ t7 == the cmpbge mask that found it. */
+
+$a_eos:
+ negq t7, t8 # e0 : find low bit set
+ and t7, t8, t8 # e1 (stall)
+
+ /* For the sake of the cache, don't read a destination word
+ if we're not going to need it. */
+ and t8, 0x80, t6 # e0 :
+ bne t6, 1f # .. e1 (zdb)
+
+ /* We're doing a partial word store and so need to combine
+ our source and original destination words. */
+ ldq_u t1, 0(a0) # e0 :
+ subq t8, 1, t6 # .. e1 :
+ or t8, t6, t7 # e0 :
+ unop #
+ zapnot t0, t7, t0 # e0 : clear src bytes > null
+ zap t1, t7, t1 # .. e1 : clear dst bytes <= null
+ or t0, t1, t0 # e1 :
+
+1: stq_u t0, 0(a0) # e0 :
+ ret (t9) # e1 :
+
+ /* Add the end-of-count bit to the eos detection bitmask. */
+$a_eoc:
+ or t10, t7, t7
+ br $a_eos
+
+ .end stxncpy_aligned
+
+ .align 3
+ .ent __stxncpy
+ .globl __stxncpy
+__stxncpy:
+ .frame sp, 0, t9, 0
+ .prologue 0
+
+ /* Are source and destination co-aligned? */
+ xor a0, a1, t1 # e0 :
+ and a0, 7, t0 # .. e1 : find dest misalignment
+ and t1, 7, t1 # e0 :
+ addq a2, t0, a2 # .. e1 : bias count by dest misalignment
+ subq a2, 1, a2 # e0 :
+ and a2, 7, t2 # e1 :
+ srl a2, 3, a2 # e0 : a2 = loop counter = (count - 1)/8
+ addq zero, 1, t10 # .. e1 :
+ sll t10, t2, t10 # e0 : t10 = bitmask of last count byte
+ bne t1, $unaligned # .. e1 :
+
+ /* We are co-aligned; take care of a partial first word. */
+
+ ldq_u t1, 0(a1) # e0 : load first src word
+ addq a1, 8, a1 # .. e1 :
+
+ beq t0, stxncpy_aligned # avoid loading dest word if not needed
+ ldq_u t0, 0(a0) # e0 :
+ br stxncpy_aligned # .. e1 :
+
+
+/* The source and destination are not co-aligned. Align the destination
+ and cope. We have to be very careful about not reading too much and
+ causing a SEGV. */
+
+ .align 3
+$u_head:
+ /* We know just enough now to be able to assemble the first
+ full source word. We can still find a zero at the end of it
+ that prevents us from outputting the whole thing.
+
+ On entry to this basic block:
+ t0 == the first dest word, unmasked
+ t1 == the shifted low bits of the first source word
+ t6 == bytemask that is -1 in dest word bytes */
+
+ ldq_u t2, 8(a1) # e0 : load second src word
+ addq a1, 8, a1 # .. e1 :
+ mskql t0, a0, t0 # e0 : mask trailing garbage in dst
+ extqh t2, a1, t4 # e0 :
+ or t1, t4, t1 # e1 : first aligned src word complete
+ mskqh t1, a0, t1 # e0 : mask leading garbage in src
+ or t0, t1, t0 # e0 : first output word complete
+ or t0, t6, t6 # e1 : mask original data for zero test
+ cmpbge zero, t6, t7 # e0 :
+ beq a2, $u_eocfin # .. e1 :
+ bne t7, $u_final # e1 :
+
+ lda t6, -1 # e1 : mask out the bits we have
+ mskql t6, a1, t6 # e0 : already seen
+ stq_u t0, 0(a0) # e0 : store first output word
+ or t6, t2, t2 # .. e1 :
+ cmpbge zero, t2, t7 # e0 : find nulls in second partial
+ addq a0, 8, a0 # .. e1 :
+ subq a2, 1, a2 # e0 :
+ bne t7, $u_late_head_exit # .. e1 :
+
+ /* Finally, we've got all the stupid leading edge cases taken care
+ of and we can set up to enter the main loop. */
+
+ extql t2, a1, t1 # e0 : position hi-bits of lo word
+ ldq_u t2, 8(a1) # .. e1 : read next high-order source word
+ addq a1, 8, a1 # e0 :
+ cmpbge zero, t2, t7 # e1 (stall)
+ beq a2, $u_eoc # e1 :
+ bne t7, $u_eos # e1 :
+
+ /* Unaligned copy main loop. In order to avoid reading too much,
+ the loop is structured to detect zeros in aligned source words.
+ This has, unfortunately, effectively pulled half of a loop
+ iteration out into the head and half into the tail, but it does
+ prevent nastiness from accumulating in the very thing we want
+ to run as fast as possible.
+
+ On entry to this basic block:
+ t1 == the shifted high-order bits from the previous source word
+ t2 == the unshifted current source word
+
+ We further know that t2 does not contain a null terminator. */
+
+ .align 3
+$u_loop:
+ extqh t2, a1, t0 # e0 : extract high bits for current word
+ addq a1, 8, a1 # .. e1 :
+ extql t2, a1, t3 # e0 : extract low bits for next time
+ addq a0, 8, a0 # .. e1 :
+ or t0, t1, t0 # e0 : current dst word now complete
+ ldq_u t2, 0(a1) # .. e1 : load high word for next time
+ stq_u t0, -8(a0) # e0 : save the current word
+ mov t3, t1 # .. e1 :
+ subq a2, 1, a2 # e0 :
+ cmpbge zero, t2, t7 # .. e1 : test new word for eos
+ beq a2, $u_eoc # e1 :
+ beq t7, $u_loop # e1 :
+
+ /* We've found a zero somewhere in the source word we just read.
+ If it resides in the lower half, we have one (probably partial)
+ word to write out, and if it resides in the upper half, we
+ have one full and one partial word left to write out.
+
+ On entry to this basic block:
+ t1 == the shifted high-order bits from the previous source word
+ t2 == the unshifted current source word. */
+$u_eos:
+ extqh t2, a1, t0 # e0 :
+ or t0, t1, t0 # e1 : first (partial) source word complete
+
+ cmpbge zero, t0, t7 # e0 : is the null in this first bit?
+ bne t7, $u_final # .. e1 (zdb)
+
+ stq_u t0, 0(a0) # e0 : the null was in the high-order bits
+ addq a0, 8, a0 # .. e1 :
+ subq a2, 1, a2 # e1 :
+
+$u_late_head_exit:
+ extql t2, a1, t0 # .. e0 :
+ cmpbge zero, t0, t7 # e0 :
+ or t7, t10, t6 # e1 :
+ cmoveq a2, t6, t7 # e0 :
+ nop # .. e1 :
+
+ /* Take care of a final (probably partial) result word.
+ On entry to this basic block:
+ t0 == assembled source word
+ t7 == cmpbge mask that found the null. */
+$u_final:
+ negq t7, t6 # e0 : isolate low bit set
+ and t6, t7, t8 # e1 :
+
+ and t8, 0x80, t6 # e0 : avoid dest word load if we can
+ bne t6, 1f # .. e1 (zdb)
+
+ ldq_u t1, 0(a0) # e0 :
+ subq t8, 1, t6 # .. e1 :
+ or t6, t8, t7 # e0 :
+ zapnot t0, t7, t0 # .. e1 : kill source bytes > null
+ zap t1, t7, t1 # e0 : kill dest bytes <= null
+ or t0, t1, t0 # e1 :
+
+1: stq_u t0, 0(a0) # e0 :
+ ret (t9) # .. e1 :
+
+$u_eoc: # end-of-count
+ extqh t2, a1, t0
+ or t0, t1, t0
+ cmpbge zero, t0, t7
+
+$u_eocfin: # end-of-count, final word
+ or t10, t7, t7
+ br $u_final
+
+ /* Unaligned copy entry point. */
+ .align 3
+$unaligned:
+
+ ldq_u t1, 0(a1) # e0 : load first source word
+
+ and a0, 7, t4 # .. e1 : find dest misalignment
+ and a1, 7, t5 # e0 : find src misalignment
+
+ /* Conditionally load the first destination word and a bytemask
+ with 0xff indicating that the destination byte is sacrosanct. */
+
+ mov zero, t0 # .. e1 :
+ mov zero, t6 # e0 :
+ beq t4, 1f # .. e1 :
+ ldq_u t0, 0(a0) # e0 :
+ lda t6, -1 # .. e1 :
+ mskql t6, a0, t6 # e0 :
+1:
+ subq a1, t4, a1 # .. e1 : sub dest misalignment from src addr
+
+ /* If source misalignment is larger than dest misalignment, we need
+ extra startup checks to avoid SEGV. */
+
+ cmplt t4, t5, t8 # e1 :
+ extql t1, a1, t1 # .. e0 : shift src into place
+ lda t2, -1 # e0 : for creating masks later
+ beq t8, $u_head # e1 :
+
+ mskqh t2, t5, t2 # e0 : begin src byte validity mask
+ cmpbge zero, t1, t7 # .. e1 : is there a zero?
+ extql t2, a1, t2 # e0 :
+ or t7, t10, t6 # .. e1 : test for end-of-count too
+ cmpbge zero, t2, t3 # e0 :
+ cmoveq a2, t6, t7 # .. e1 :
+ andnot t7, t3, t7 # e0 :
+ beq t7, $u_head # .. e1 (zdb)
+
+ /* At this point we've found a zero in the first partial word of
+ the source. We need to isolate the valid source data and mask
+ it into the original destination data. (Incidentally, we know
+ that we'll need at least one byte of that original dest word.) */
+
+ ldq_u t0, 0(a0) # e0 :
+ negq t7, t6 # .. e1 : build bitmask of bytes <= zero
+ mskqh t1, t4, t1 # e0 :
+ and t6, t7, t8 # .. e1 :
+ subq t8, 1, t6 # e0 :
+ or t6, t8, t7 # e1 :
+
+ zapnot t2, t7, t2 # e0 : prepare source word; mirror changes
+ zapnot t1, t7, t1 # .. e1 : to source validity mask
+
+ andnot t0, t2, t0 # e0 : zero place for source to reside
+ or t0, t1, t0 # e1 : and put it there
+ stq_u t0, 0(a0) # e0 :
+ ret (t9) # .. e1 :
+
+ .end __stxncpy
do_exit(SIGSEGV);
}
set_flags(regs->eflags, VEFLAGS, VIF_MASK | current->tss.v86mask);
- copy_to_user(¤t->tss.vm86_info->regs,regs,sizeof(*regs));
- put_user(current->tss.screen_bitmap,¤t->tss.vm86_info->screen_bitmap);
+ tmp = copy_to_user(¤t->tss.vm86_info->regs,regs,sizeof(*regs));
+ tmp += put_user(current->tss.screen_bitmap,¤t->tss.vm86_info->screen_bitmap);
+ if (tmp) {
+ printk("vm86: could not access userspace vm86_info\n");
+ do_exit(SIGSEGV);
+ }
tmp = current->tss.esp0;
current->tss.esp0 = current->saved_kernel_stack;
current->saved_kernel_stack = 0;
struct vm86_struct info;
struct task_struct *tsk = current;
struct pt_regs * pt_regs = (struct pt_regs *) &v86;
- int error;
if (tsk->saved_kernel_stack)
return -EPERM;
- /* v86 must be readable (now) and writable (for save_v86_state) */
- error = verify_area(VERIFY_WRITE,v86,sizeof(*v86));
- if (error)
- return error;
- copy_from_user(&info,v86,sizeof(info));
+ if (copy_from_user(&info,v86,sizeof(info)))
+ return -EFAULT;
/*
* make sure the vm86() system call doesn't try to do anything silly
*/
info.regs.__null_ds = 0;
info.regs.__null_es = 0;
- info.regs.__null_fs = 0;
- info.regs.__null_gs = 0;
+
+/* we are clearing fs,gs later just before "jmp ret_from_sys_call",
+ * because starting with Linux 2.1.x they aren't no longer saved/restored
+ */
+
/*
* The eflags register is also special: we cannot trust that the user
* has set it up safely, so this makes sure interrupt etc flags are
tsk->tss.screen_bitmap = info.screen_bitmap;
if (info.flags & VM86_SCREEN_BITMAP)
mark_screen_rdonly(tsk);
- __asm__ __volatile__("movl %0,%%esp\n\t"
+ __asm__ __volatile__(
+ "xorl %%eax,%%eax; mov %%ax,%%fs; mov %%ax,%%gs\n\t"
+ "movl %0,%%esp\n\t"
"jmp ret_from_sys_call"
: /* no outputs */
- :"r" (&info.regs), "b" (tsk));
+ :"r" (&info.regs), "b" (tsk) : "ax");
return 0;
}
static inline int is_revectored(int nr, struct revectored_struct * bitmap)
{
- if (verify_area(VERIFY_READ, bitmap, 256/8) < 0)
+ unsigned long map;
+ if (get_user(map, bitmap->__map + (nr >> 5)))
return 1;
- return test_bit(nr, bitmap);
+ return test_bit(nr & ((1 << 5)-1), &map);
}
/*
static void do_int(struct vm86_regs *regs, int i, unsigned char * ssp, unsigned long sp)
{
- unsigned short *intr_ptr, seg;
-
+ unsigned long *intr_ptr, segoffs;
+
if (regs->cs == BIOSSEG)
goto cannot_handle;
if (is_revectored(i, ¤t->tss.vm86_info->int_revectored))
goto cannot_handle;
if (i==0x21 && is_revectored(AH(regs),¤t->tss.vm86_info->int21_revectored))
goto cannot_handle;
- intr_ptr = (unsigned short *) (i << 2);
- if (verify_area(VERIFY_READ, intr_ptr, 4) < 0)
+ intr_ptr = (unsigned long *) (i << 2);
+ if (get_user(segoffs, intr_ptr))
goto cannot_handle;
- get_user(seg, intr_ptr+1);
- if (seg == BIOSSEG)
+ if ((segoffs >> 16) == BIOSSEG)
goto cannot_handle;
pushw(ssp, sp, get_vflags(regs));
pushw(ssp, sp, regs->cs);
pushw(ssp, sp, IP(regs));
- regs->cs = seg;
+ regs->cs = segoffs >> 16;
SP(regs) -= 6;
- get_user(IP(regs), intr_ptr+0);
+ IP(regs) = segoffs & 0xffff;
clear_TF(regs);
clear_IF(regs);
return;
#include <asm/pgtable.h>
#include <asm/dma.h>
-/*
- * The SMP kernel can't handle the 4MB page table optimizations yet
- */
-#ifdef __SMP__
-#undef USE_PENTIUM_MM
-#endif
-
extern void die_if_kernel(char *,struct pt_regs *,long);
extern void show_net_buffers(void);
/* unmap the original low memory mappings */
pgd_val(pg_dir[0]) = 0;
while (address < end_mem) {
-#ifdef USE_PENTIUM_MM
+ /*
+ * The following code enabled 4MB page tables for the
+ * Intel Pentium cpu, unfortunately the SMP kernel can't
+ * handle the 4MB page table optimizations yet
+ */
+#ifndef __SMP__
/*
* This will create page tables that
* span up to the next 4MB virtual
#include <linux/fd.h>
-
+#include <linux/hdreg.h>
#define OLDFDRAWCMD 0x020d /* send a raw command to the FDC */
static struct tq_struct floppy_tq =
{ 0, 0, 0, 0 };
+static void schedule_bh( void (*handler)(void*) )
+{
+ floppy_tq.routine = (void *)(void *) handler;
+ queue_task_irq(&floppy_tq, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+}
+
static struct timer_list fd_timer ={ NULL, NULL, 0, 0, 0 };
static void cancel_activity(void)
} while ((ST0 & 0x83) != UNIT(current_drive) && inr == 2);
}
if (handler) {
- if(intr_count >= 2) {
- /* expected interrupt */
- floppy_tq.routine = (void *)(void *) handler;
- queue_task_irq(&floppy_tq, &tq_immediate);
- mark_bh(IMMEDIATE_BH);
- } else
+ if(intr_count >= 2)
+ schedule_bh( (void *)(void *) handler);
+ else
handler();
} else
FDCS->reset = 1;
int ret;
unsigned long flags;
- floppy_tq.routine = (void *)(void *) handler;
- queue_task(&floppy_tq, &tq_immediate);
- mark_bh(IMMEDIATE_BH);
+ schedule_bh((void *)(void *)handler);
INT_OFF;
while(command_status < 2 && NO_SIGNAL){
is_alive("wait_til_done");
if (TESTF(FD_NEED_TWADDLE))
twaddle();
- floppy_tq.routine = (void *)(void *) floppy_start;
- queue_task(&floppy_tq, &tq_immediate);
- mark_bh(IMMEDIATE_BH);
+ schedule_bh( (void *)(void *) floppy_start);
#ifdef DEBUGT
debugt("queue fd request");
#endif
bad_flp_intr,
request_done };
-static struct tq_struct request_tq =
-{ 0, 0, (void *) (void *) redo_fd_request, 0 };
-
static void process_fd_request(void)
{
cont = &rw_cont;
- queue_task(&request_tq, &tq_immediate);
- mark_bh(IMMEDIATE_BH);
+ schedule_bh( (void *)(void *) redo_fd_request);
}
static void do_fd_request(void)
{
+ if(usage_count == 0) {
+ printk("warning: usage count=0, CURRENT=%p exiting\n", CURRENT);
+ printk("sect=%ld cmd=%d\n", CURRENT->sector, CURRENT->cmd);
+ return;
+ }
sti();
if (fdc_busy){
/* fdc busy, this new request will be treated when the
* Misc Ioctl's and support
* ========================
*/
-static int fd_copyout(void *param, const void *address, int size)
+static inline int fd_copyout(void *param, const void *address, int size)
{
- int ret;
-
- ECALL(verify_area(VERIFY_WRITE,param,size));
- copy_to_user(param,(void *) address, size);
- return 0;
+ return copy_to_user(param,address, size) ? -EFAULT : 0;
}
-static int fd_copyin(void *param, void *address, int size)
+static inline int fd_copyin(void *param, void *address, int size)
{
- int ret;
+ return copy_from_user(address, param, size) ? -EFAULT : 0;
+}
- ECALL(verify_area(VERIFY_READ,param,size));
- copy_from_user((void *) address, param, size);
- return 0;
+static inline int write_user_long(unsigned long useraddr, unsigned long value)
+{
+ return put_user(value, (unsigned long *)useraddr) ? -EFAULT : 0;
}
#define COPYOUT(x) ECALL(fd_copyout((void *)param, &(x), sizeof(x)))
return -EINVAL;
}
+static int get_floppy_geometry(int drive, int type, struct floppy_struct **g)
+{
+ if (type)
+ *g = &floppy_type[type];
+ else {
+ LOCK_FDC(drive,0);
+ CALL(poll_drive(0,0));
+ process_fd_request();
+ *g = current_type[drive];
+ }
+ if(!*g)
+ return -ENODEV;
+ return 0;
+}
+
static int fd_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long param)
{
cmd = FDEJECT;
}
+ /* generic block device ioctls */
+ switch(cmd) {
+ /* the following have been inspired by the corresponding
+ * code for other block devices. */
+ struct floppy_struct *g;
+ struct hd_geometry *loc;
+
+ case HDIO_GETGEO:
+ loc = (struct hd_geometry *) param;
+ ECALL(get_floppy_geometry(drive, type, &g));
+ ECALL(verify_area(VERIFY_WRITE, loc, sizeof(*loc)));
+ put_user(g->head, &loc->heads);
+ put_user(g->sect, &loc->sectors);
+ put_user(g->track, &loc->cylinders);
+ put_user(0,&loc->start);
+ return 0;
+ case BLKRASET:
+ if(!suser()) return -EACCES;
+ if(param > 0xff) return -EINVAL;
+ read_ahead[MAJOR(inode->i_rdev)] = param;
+ return 0;
+ case BLKRAGET:
+ return write_user_long(param,
+ read_ahead[MAJOR(inode->i_rdev)]);
+ case BLKFLSBUF:
+ if(!suser()) return -EACCES;
+ fsync_dev(inode->i_rdev);
+ invalidate_buffers(inode->i_rdev);
+ return 0;
+
+ case BLKGETSIZE:
+ ECALL(get_floppy_geometry(drive, type, &g));
+ return write_user_long(param, g->size);
+ /* BLKRRPART is not defined as floppies don't have
+ * partition tables */
+ }
+
/* convert the old style command into a new style command */
if ((cmd & 0xff00) == 0x0200) {
ECALL(normalize_0x02xx_ioctl(&cmd, &size));
return set_geometry(cmd, & inparam.g,
drive, type, device);
case FDGETPRM:
- LOCK_FDC(drive,1);
- CALL(poll_drive(1,0));
- process_fd_request();
- if (type)
- outparam = (char *) &floppy_type[type];
- else
- outparam = (char *) current_type[drive];
- if(!outparam)
- return -ENODEV;
+ ECALL(get_floppy_geometry(drive, type,
+ (struct floppy_struct**)
+ &outparam));
break;
case FDMSGON:
j=1;
for (i=current->mm->env_start; i< current->mm->env_end; i ++){
- c= get_fs_byte(i);
+ get_user(c, (char *)i);
if (match){
if (j==99)
c='\0';
case BLKGETSIZE: /* Return device size */
if (!lo->lo_inode)
return -ENXIO;
- if (!arg) return -EINVAL;
- err = verify_area(VERIFY_WRITE, (long *) arg, sizeof(long));
- if (err)
- return err;
- put_fs_long(loop_sizes[lo->lo_number] << 1, (long *) arg);
- return 0;
- default:
+ if (!arg)
return -EINVAL;
+ return put_user(loop_sizes[lo->lo_number] << 1, (int *) arg);
+ default:
+ return -EINVAL;
}
return 0;
}
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
+ *
+ * Credits:
+ * Heiko Eissfeldt <heiko@colossus.escape.de>
+ * For finding abug in the return of the track numbers.
*/
/*
i=verify_area(VERIFY_WRITE, hdr, sizeof(*hdr));
if(i<0)
return i;
- loc_hdr.cdth_trk0 = bcd_to_int(sony_toc.first_track_num);
- loc_hdr.cdth_trk1 = bcd_to_int(sony_toc.last_track_num);
+ loc_hdr.cdth_trk0 = sony_toc.first_track_num;
+ loc_hdr.cdth_trk1 = sony_toc.last_track_num;
copy_to_user(hdr, &loc_hdr, sizeof(*hdr));
}
return 0;
* If we want to stop after the last track, use the lead-out
* MSF to do that.
*/
- if (ti.cdti_trk1 >= bcd_to_int(sony_toc.last_track_num))
+ if (ti.cdti_trk1 >= sony_toc.last_track_num)
{
log_to_msf(msf_to_log(sony_toc.lead_out_start_msf)-1,
&(params[4]));
TRACE_EXIT;
return error;
}
- memcpy_fromfs(&krnl_arg.mtop, arg, arg_size);
+ copy_from_user(&krnl_arg.mtop, arg, arg_size);
}
TRACEx1(5, "called with ioctl command: 0x%08x", command);
switch (command) {
TRACE_EXIT;
return error;
}
- memcpy_tofs(arg, &krnl_arg, arg_size);
+ copy_to_user(arg, &krnl_arg, arg_size);
}
TRACE_EXIT;
return result;
TRACE_EXIT;
return -EIO;
}
- memcpy_tofs(buff, deblock_buffer + buf_pos_rd, cnt);
+ copy_to_user(buff, deblock_buffer + buf_pos_rd, cnt);
buff += cnt;
to_do -= cnt; /* what's left from req_len */
remaining -= cnt; /* what remains on this tape */
TRACE_EXIT;
return result;
}
- memcpy_fromfs(deblock_buffer + buf_pos_wr, buff, cnt);
+ copy_from_user(deblock_buffer + buf_pos_wr, buff, cnt);
buff += cnt;
req_len -= cnt;
buf_pos_wr += cnt;
}
/* copy buffer to user-space in one go */
if (bytes_done>0)
- copy_to_user( (void *) buf, (void *) buffaddr, bytes_done);
+ copy_to_user( (void *) buf, (void *) bus_to_virt(buffaddr), bytes_done);
#if 1
/* Checks Ton's patch below */
if ((return_read_eof == NO) && (status_eof_detected == YES)) {
/* copy from user to DMA buffer and initiate transfer. */
if (bytes_todo>0) {
- copy_from_user( (void *) buffaddr, (const void *) buf, bytes_todo);
+ copy_from_user( (void *) bus_to_virt(buffaddr), (const void *) buf, bytes_todo);
/****************** similar problem with read() at FM could happen here at EOT.
******************/
return -EPERM;
error = verify_area(VERIFY_READ, (int *) ioarg, sizeof(int));
if (error) return error;
- c = get_user((int *) ioarg);
+ c = get_user(sizeof(int), (int *) ioarg);
if (c==0) {
QIC02_TAPE_DEBUG = 0;
return 0;
/* copy struct from user space to kernel space */
stp = (char *) &qic02_tape_dynconf;
argp = (char *) ioarg;
- for (i=0; i<sizeof(qic02_tape_dynconf); i++)
- *stp++ = get_user(argp++);
+ copy_from_user(stp, argp, sizeof(qic02_tape_dynconf));
if (status_zombie==NO)
qic02_release_resources(); /* and go zombie */
if (update_ifc_masks(qic02_tape_dynconf.ifc_type))
/* copy mtop struct from user space to kernel space */
stp = (char *) &operation;
argp = (char *) ioarg;
- for (i=0; i<sizeof(operation); i++)
- *stp++ = get_user(argp++);
+ copy_from_user(stp, argp, sizeof(operation));
/* ---note: mt_count is signed, negative seeks must be
* --- translated to seeks in opposite direction!
* This assumes a one-to-one identity mapping between
* kernel addresses and physical memory.
*/
- buffaddr = align_buffer((unsigned long) &qic02_tape_buf, TAPE_BLKSIZE);
+ buffaddr = align_buffer(virt_to_bus(qic02_tape_buf), TAPE_BLKSIZE);
printk(", at address 0x%lx (0x%lx)\n", buffaddr, (unsigned long) &qic02_tape_buf);
#ifndef CONFIG_MAX_16M
#include <linux/errno.h>
#include <linux/tty.h>
#include <linux/sched.h>
+#include <linux/mm.h>
#include <asm/segment.h>
#include "vt_kern.h"
#include "selection.h"
error = verify_area (VERIFY_READ, (void *) param3,
sizeof (temp_i));
if (error == 0) {
- temp_i = get_user ((int *) param3);
+ get_user (temp_i, (int *) param3);
if (ppp->flags & SC_DEBUG)
printk (KERN_INFO
"ppp_tty_ioctl: set mru to %x\n", temp_i);
error = verify_area (VERIFY_READ, (void *) param3,
sizeof (temp_i));
if (error == 0) {
- temp_i = get_user ((int *) param3) & SC_MASK;
+ get_user (temp_i, (int *) param3);
+ temp_i &= SC_MASK;
temp_i |= (ppp->flags & ~SC_MASK);
if ((ppp->flags & SC_CCP_OPEN) &&
error = verify_area (VERIFY_READ, (void *) param3,
sizeof (temp_i));
if (error == 0) {
- ppp->xmit_async_map[0] = get_user ((int *) param3);
+ get_user (ppp->xmit_async_map[0],(int *) param3);
if (ppp->flags & SC_DEBUG)
printk (KERN_INFO
"ppp_tty_ioctl: set xmit asyncmap %x\n",
error = verify_area (VERIFY_READ, (void *) param3,
sizeof (temp_i));
if (error == 0) {
- ppp->recv_async_map = get_user ((int *) param3);
+ get_user (ppp->recv_async_map,(int *) param3);
if (ppp->flags & SC_DEBUG)
printk (KERN_INFO
"ppp_tty_ioctl: set rcv asyncmap %x\n",
error = verify_area (VERIFY_READ, (void *) param3,
sizeof (temp_i));
if (error == 0) {
- temp_i = (get_user ((int *) param3) & 0x1F) << 16;
+ get_user (temp_i, (int *) param3);
+ temp_i = (temp_i & 0x1F) << 16;
temp_i |= (ppp->flags & ~0x1F0000);
if ((ppp->flags | temp_i) & SC_DEBUG)
error = verify_area (VERIFY_READ, (void *) param3,
sizeof (temp_i));
if (error == 0) {
- temp_i = get_user ((int *) param3) + 1;
+ get_user (temp_i, (int *) param3);
+ ++temp_i;
if (ppp->flags & SC_DEBUG)
printk (KERN_INFO
"ppp_tty_ioctl: set maxcid to %d\n",
if (err) {
return err;
}
- tmp = get_user((int *)arg);
+ get_user(tmp,(int *)arg);
#ifndef SL_INCLUDE_CSLIP
if (tmp & (SL_MODE_CSLIP|SL_MODE_ADAPTIVE)) {
return -EINVAL;
if (err) {
return -err;
}
- tmp = get_user((int *)arg);
+ get_user(tmp,(int *)arg);
if (tmp > 255) /* max for unchar */
return -EINVAL;
if ((sl->keepalive = (unchar) tmp) != 0) {
if (err) {
return -err;
}
- tmp = get_user((int *)arg);
+ get_user(tmp,(int *)arg);
if (tmp > 255) /* max for unchar */
- return -EINVAL;
+ return -EINVAL;
if ((sl->outfill = (unchar) tmp) != 0){
sl->outfill_timer.expires=jiffies+sl->outfill*HZ;
add_timer(&sl->outfill_timer);
static void padzero(unsigned long elf_bss)
{
unsigned long nbyte;
- char * fpnt;
nbyte = elf_bss & (PAGE_SIZE-1);
if (nbyte) {
nbyte = PAGE_SIZE - nbyte;
- /* FIXME: someone should investigate, why a bad binary
- is allowed to bring a wrong elf_bss until here,
- and how to react. Suffice the plain return?
- rossius@hrz.tu-chemnitz.de */
- if (verify_area(VERIFY_WRITE, (void *) elf_bss, nbyte)) {
- return;
- }
- fpnt = (char *) elf_bss;
- do {
- put_user(0, fpnt++);
- } while (--nbyte);
+ clear_user((void *) elf_bss, nbyte);
}
}
next->b_count--;
retry = 1;
}
-
+
+ repeat2:
+ bh = lru_list[BUF_LOCKED];
+ if (!bh)
+ break;
+ for (i = nr_buffers_type[BUF_LOCKED]*2 ; i-- > 0 ; bh = next) {
+ if (bh->b_list != BUF_LOCKED)
+ goto repeat2;
+ next = bh->b_next_free;
+ if (!lru_list[BUF_LOCKED])
+ break;
+ if (dev && bh->b_dev != dev)
+ continue;
+ if (buffer_locked(bh)) {
+ /* Buffer is locked; skip it unless wait is
+ requested AND pass > 0. */
+ if (!wait || !pass) {
+ retry = 1;
+ continue;
+ }
+ wait_on_buffer (bh);
+ goto repeat2;
+ }
+ }
+
/* If we are waiting for the sync to succeed, and if any dirty
blocks were written, then repeat; on the second pass, only
wait for buffers being written (do not pass to write any
#include <linux/fs.h>
#include <linux/ext2_fs.h>
#include <linux/sched.h>
+#include <linux/mm.h>
#include <linux/stat.h>
static int ext2_readlink (struct inode *, char *, int);
#define ACC_MODE(x) ("\000\004\002\006"[(x)&O_ACCMODE])
-/*
- * How long a filename can we get from user space?
- * -EFAULT if invalid area
- * 0 if ok (ENAMETOOLONG before EFAULT)
- * >0 EFAULT after xx bytes
- */
-static inline int get_max_filename(unsigned long address)
-{
- struct vm_area_struct * vma;
-
- if (get_fs() == KERNEL_DS)
- return 0;
- vma = find_vma(current->mm, address);
- if (!vma || vma->vm_start > address || !(vma->vm_flags & VM_READ))
- return -EFAULT;
- address = vma->vm_end - address;
- if (address > PAGE_SIZE)
- return 0;
- if (vma->vm_next && vma->vm_next->vm_start == vma->vm_end &&
- (vma->vm_next->vm_flags & VM_READ))
- return 0;
- return address;
-}
-
/*
* In order to reduce some races, while at the same time doing additional
* checking and hopefully speeding things up, we copy filenames to the
*
* POSIX.1 2.4: an empty pathname is invalid (ENOENT).
*/
+static inline int do_getname(const char *filename, char *page)
+{
+ int retval;
+ unsigned long len = PAGE_SIZE;
+
+ if ((unsigned long) filename >= TASK_SIZE) {
+ if (get_fs() != KERNEL_DS)
+ return -EFAULT;
+ } else if (TASK_SIZE - (unsigned long) filename < PAGE_SIZE)
+ len = TASK_SIZE - (unsigned long) filename;
+
+ retval = strncpy_from_user((char *)page, filename, len);
+ if (retval > 0) {
+ if (retval < len)
+ return 0;
+ return -ENAMETOOLONG;
+ } else if (!retval)
+ retval = -ENOENT;
+ return retval;
+}
+
int getname(const char * filename, char **result)
{
- int i, error;
unsigned long page;
- char * tmp, c;
-
- i = get_max_filename((unsigned long) filename);
- if (i < 0)
- return i;
- error = -EFAULT;
- if (!i) {
- error = -ENAMETOOLONG;
- i = PAGE_SIZE;
- }
- get_user(c, filename++);
- if (!c)
- return -ENOENT;
- if(!(page = __get_free_page(GFP_KERNEL)))
- return -ENOMEM;
- *result = tmp = (char *) page;
- while (--i) {
- *(tmp++) = c;
- get_user(c, filename++);
- if (!c) {
- *tmp = '\0';
- return 0;
- }
+ int retval;
+
+ page = __get_free_page(GFP_KERNEL);
+ retval = -ENOMEM;
+ if (page) {
+ *result = (char *)page;
+ retval = do_getname(filename, (char *) page);
+ if (retval < 0)
+ free_page(page);
}
- free_page(page);
- return error;
+ return retval;
}
void putname(char * name)
#ifndef _ASM_SEGMENT_H
#define _ASM_SEGMENT_H
-#include <linux/string.h>
-
/*
* The fs value determines whether argument validity checking should be
* performed or not. If get_fs() == USER_DS, checking is performed, with
* - OR we are in kernel mode.
*/
#define __access_ok(addr,size,mask) \
-(((mask)&((addr | size | (addr+size)) >> 42))==0)
+ (((mask)&((addr | size | (addr+size)) >> 42))==0)
#define __access_mask (-(long)get_fs())
#define access_ok(type,addr,size) \
-__access_ok(((unsigned long)(addr)),(size),__access_mask)
+ __access_ok(((unsigned long)(addr)),(size),__access_mask)
/*
- * Uh, these should become the main single-value transfer routines..
- * They automatically use the right size if we just have the right
- * pointer type..
+ * These are the main single-value transfer routines. They automatically
+ * use the right size if we just have the right pointer type.
*
* As the alpha uses the same address space for kernel and user
- * data, we can just do these as direct assignments.
+ * data, we can just do these as direct assignments. (Of course, the
+ * exception handling means that it's no longer "just"...)
*
* Careful to not
- * (a) re-use the arguments for side effects (sizeof is ok)
+ * (a) re-use the arguments for side effects (sizeof/typeof is ok)
* (b) require any knowledge of processes at this stage
*/
#define put_user(x,ptr) __put_user((x),(ptr),sizeof(*(ptr)),__access_mask)
*/
extern void __copy_user(void);
-#define __copy_tofrom_user(to,from,n,v) ({ \
-register void * __cu_to __asm__("$6"); \
-register const void * __cu_from __asm__("$7"); \
-register long __cu_len __asm__("$0"); \
-__cu_to = (to); __cu_from = (from); \
-__cu_len = (n); \
-if (__access_ok(((unsigned long)(v)),__cu_len,__access_mask)) { \
-register void * __cu_ex __asm__("$8"); \
-__cu_ex = ¤t->tss.ex; \
-__asm__ __volatile__("jsr $28,(%7),__copy_user" \
-:"=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to) \
-:"0" (__cu_len), "1" (__cu_from), "2" (__cu_to), \
- "r" (__cu_ex), "r" (__copy_user) \
-:"$1","$2","$3","$4","$5","memory"); \
-} __cu_len; })
-
-#define __get_user(x,ptr,size,mask) ({ \
-register long __gu_err __asm__("$0"); \
-register long __gu_val __asm__("$1"); \
-register long __gu_addr __asm__("$2"); \
-register void * __gu_ex __asm__("$3"); \
-__gu_addr = (long) (ptr); \
-__gu_ex = ¤t->tss.ex; \
-__gu_err = -EFAULT; \
-__asm__("":"=r" (__gu_val)); \
-if (__access_ok(__gu_addr,size,mask)) { \
-switch (size) { \
-case 1: __get_user_asm(8); break; \
-case 2: __get_user_asm(16); break; \
-case 4: __get_user_asm(32); break; \
-case 8: __get_user_asm(64); break; \
-default: __get_user_asm(unknown); break; \
-} } x = (__typeof__(*(ptr))) __gu_val; __gu_err; })
-
-extern void __get_user_8(void);
-extern void __get_user_16(void);
-extern void __get_user_32(void);
-extern void __get_user_64(void);
+#define __copy_tofrom_user(to,from,n,v) \
+({ \
+ register void * __cu_to __asm__("$6") = (to); \
+ register const void * __cu_from __asm__("$7") = (from); \
+ register long __cu_len __asm__("$0") = (n); \
+ if (__access_ok(((long)(v)),__cu_len,__access_mask)) { \
+ register void * __cu_ex __asm__("$8"); \
+ __cu_ex = ¤t->tss.ex; \
+ __asm__ __volatile__( \
+ "jsr $28,(%7),__copy_user" \
+ : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to) \
+ : "0" (__cu_len), "1" (__cu_from), "2" (__cu_to), \
+ "r" (__cu_ex), "r" (__copy_user) \
+ : "$1","$2","$3","$4","$5","$28","memory"); \
+ } \
+ __cu_len; \
+})
+
extern void __get_user_unknown(void);
-#define __get_user_asm(x) \
-__asm__ __volatile__("jsr $28,(%4),__get_user_" #x \
-:"=r" (__gu_err),"=r" (__gu_val) \
-:"r" (__gu_ex), "r" (__gu_addr),"r" (__get_user_##x) \
-:"$4","$5","$28")
-
-#define __put_user(x,ptr,size,mask) ({ \
-register long __pu_err __asm__("$0"); \
-register __typeof__(*(ptr)) __pu_val __asm__("$6"); \
-register long __pu_addr __asm__("$7"); \
-__pu_val = (x); \
-__pu_addr = (long) (ptr); \
-__pu_err = -EFAULT; \
-if (__access_ok(__pu_addr,size,mask)) { \
-register void * __pu_ex __asm__("$8"); \
-__pu_ex = ¤t->tss.ex; \
-switch (size) { \
-case 1: __put_user_asm(8); break; \
-case 2: __put_user_asm(16); break; \
-case 4: __put_user_asm(32); break; \
-case 8: __put_user_asm(64); break; \
-default: __put_user_asm(unknown); break; \
-} } __pu_err; })
-
-extern void __put_user_8(void);
-extern void __put_user_16(void);
-extern void __put_user_32(void);
-extern void __put_user_64(void);
+#define __get_user(x,ptr,size,mask) \
+({ \
+ long __gu_err = -EFAULT, __gu_val = 0; \
+ const __typeof__(*(ptr)) *__gu_addr = (ptr); \
+ if (__access_ok((long)__gu_addr,size,mask)) { \
+ long __gu_ex_count = current->tss.ex.count; \
+ switch (size) { \
+ case 1: __get_user_8; break; \
+ case 2: __get_user_16; break; \
+ case 4: __get_user_32; break; \
+ case 8: __get_user_64; break; \
+ default: __get_user_unknown(); break; \
+ } \
+ } \
+ (x) = (__typeof__(*(ptr))) __gu_val; \
+ __gu_err; \
+})
+
+#define __get_user_64 \
+ __asm__("/* Inline __get_user_64 */\n\t" \
+ "br $28,1f\n\t" /* set up exception address */ \
+ "br 2f\n" /* exception! */ \
+ "1:\t" \
+ "stq %5,%3\n\t" /* store inc'ed exception count */ \
+ "ldq %1,%2\n\t" /* actual data load */ \
+ "stq %4,%3\n\t" /* restore exception count */ \
+ "clr %0\n" /* no exception: error = 0 */ \
+ "2:\t/* End __get_user_64 */" \
+ : "=r"(__gu_err), "=r"(__gu_val) \
+ : "m"(*__gu_addr), "m"(current->tss.ex.count), \
+ "r"(__gu_ex_count), "r"(__gu_ex_count+1), \
+ "0"(__gu_err), "1"(__gu_val) \
+ : "$28")
+
+#define __get_user_32 \
+ __asm__("/* Inline __get_user_32 */\n\t" \
+ "br $28,1f\n\t" /* set up exception address */ \
+ "br 2f\n" /* exception! */ \
+ "1:\t" \
+ "stq %5,%3\n\t" /* store inc'ed exception count */ \
+ "ldl %1,%2\n\t" /* actual data load */ \
+ "stq %4,%3\n\t" /* restore exception count */ \
+ "clr %0\n" /* no exception: error = 0 */ \
+ "2:\t/* End __get_user_32 */" \
+ : "=r"(__gu_err), "=r"(__gu_val) \
+ : "m"(*__gu_addr), "m"(current->tss.ex.count), \
+ "r"(__gu_ex_count), "r"(__gu_ex_count+1), \
+ "0"(__gu_err), "1"(__gu_val) \
+ : "$28")
+
+#define __get_user_16 \
+ __asm__("/* Inline __get_user_16 */\n\t" \
+ "br $28,1f\n\t" /* set up exception address */ \
+ "br 2f\n" /* exception! */ \
+ "1:\t" \
+ "stq %6,%4\n\t" /* store inc'ed exception count */ \
+ "ldq_u %1,%2\n\t" /* actual data load */ \
+ "stq %5,%4\n\t" /* restore exception count */ \
+ "clr %0\n\t" /* no exception: error = 0 */ \
+ "extwl %1,%3,%1\n" /* extract the short */ \
+ "2:\t/* End __get_user_16 */" \
+ : "=r"(__gu_err), "=r"(__gu_val) \
+ : "m"(*__gu_addr), "r"(__gu_addr), \
+ "m"(current->tss.ex.count), "r"(__gu_ex_count), \
+ "r"(__gu_ex_count+1), "0"(__gu_err), "1"(__gu_val) \
+ : "$28")
+
+#define __get_user_8 \
+ __asm__("/* Inline __get_user_8 */\n\t" \
+ "br $28,1f\n\t" /* set up exception address */ \
+ "br 2f\n" /* exception! */ \
+ "1:\t" \
+ "stq %6,%4\n\t" /* store inc'ed exception count */ \
+ "ldq_u %1,%2\n\t" /* actual data load */ \
+ "stq %5,%4\n\t" /* restore exception count */ \
+ "clr %0\n\t" /* no exception: error = 0 */ \
+ "extbl %1,%3,%1\n" /* extract the byte */ \
+ "2:\t/* End __get_user_8 */" \
+ : "=r"(__gu_err), "=r"(__gu_val) \
+ : "m"(*__gu_addr), "r"(__gu_addr), \
+ "m"(current->tss.ex.count), "r"(__gu_ex_count), \
+ "r"(__gu_ex_count+1), "0"(__gu_err), "1"(__gu_val) \
+ : "$28")
+
extern void __put_user_unknown(void);
-#define __put_user_asm(x) \
-__asm__ __volatile__("jsr $28,(%5),__put_user_" #x \
-:"=r" (__pu_err),"=r" (__pu_val) \
-:"1" (__pu_val), "r" (__pu_ex), "r" (__pu_addr), "r" (__put_user_##x) \
-:"$2","$3","$4","$5","$6","$28")
+#define __put_user(x,ptr,size,mask) \
+({ \
+ long __pu_err = -EFAULT; \
+ __typeof__(*(ptr)) *__pu_addr = (ptr); \
+ __typeof__(*(ptr)) __pu_val = (x); \
+ if (__access_ok((long)__pu_addr,size,mask)) { \
+ long __pu_ex_count = current->tss.ex.count; \
+ switch (size) { \
+ case 1: __put_user_8; break; \
+ case 2: __put_user_16; break; \
+ case 4: __put_user_32; break; \
+ case 8: __put_user_64; break; \
+ default: __put_user_unknown(); break; \
+ } \
+ } \
+ __pu_err; \
+})
+
+#define __put_user_64 \
+ __asm__("/* Inline __put_user_64 */\n\t" \
+ "br $28,1f\n\t" /* set up exception address */ \
+ "br 2f\n" /* exception! */ \
+ "1:\t" \
+ "stq %5,%3\n\t" /* store inc'ed exception count */ \
+ "stq %2,%1\n\t" /* actual data store */ \
+ "stq %4,%3\n\t" /* restore exception count */ \
+ "clr %0\n" /* no exception: error = 0 */ \
+ "2:\t/* End __put_user_64 */" \
+ : "=r"(__pu_err), "=m"(*__pu_addr) \
+ : "r"(__pu_val), "m"(current->tss.ex.count), \
+ "r"(__pu_ex_count), "r"(__pu_ex_count+1), \
+ "0"(__pu_err) \
+ : "$28")
+
+#define __put_user_32 \
+ __asm__("/* Inline __put_user_32 */\n\t" \
+ "br $28,1f\n\t" /* set up exception address */ \
+ "br 2f\n" /* exception! */ \
+ "1:\t" \
+ "stq %5,%3\n\t" /* store inc'ed exception count */ \
+ "stl %2,%1\n\t" /* actual data store */ \
+ "stq %4,%3\n\t" /* restore exception count */ \
+ "clr %0\n" /* no exception: error = 0 */ \
+ "2:\t/* End __put_user_32 */" \
+ : "=r"(__pu_err), "=m"(*__pu_addr) \
+ : "r"(__pu_val), "m"(current->tss.ex.count), \
+ "r"(__pu_ex_count), "r"(__pu_ex_count+1), \
+ "0"(__pu_err) \
+ : "$28")
+
+#define __put_user_16 \
+ __asm__("/* Inline __put_user_16 */\n\t" \
+ "br $28,1f\n\t" /* set up exception address */ \
+ "lda %0,%7\n\t" /* exception! error = -EFAULT */ \
+ "br 2f\n" \
+ "1:\t" \
+ "stq %6,%4\n\t" /* store inc'ed exception count */ \
+ "ldq_u %0,%1\n\t" /* masked data store */ \
+ "inswl %2,%3,%2\n\t" \
+ "mskwl %0,%3,%0\n\t" \
+ "or %0,%2,%2\n\t" \
+ "stq_u %2,%1\n\t" \
+ "stq %5,%4\n\t" /* restore exception count */ \
+ "clr %0\n" /* no exception: error = 0 */ \
+ "2:\t/* End __put_user_16 */" \
+ : "=r"(__pu_err), "=m"(*__pu_addr), "=r"(__pu_val) \
+ : "r"(__pu_addr), "m"(current->tss.ex.count), \
+ "r"(__pu_ex_count), "r"(__pu_ex_count+1), "i"(-EFAULT), \
+ "2"(__pu_val) \
+ : "$28")
+
+#define __put_user_8 \
+ __asm__("/* Inline __put_user_8 */\n\t" \
+ "br $28,1f\n\t" /* set up exception address */ \
+ "lda %0,%7\n\t" /* exception! error = -EFAULT */ \
+ "br 2f\n" \
+ "1:\t" \
+ "stq %6,%4\n\t" /* store inc'ed exception count */ \
+ "ldq_u %0,%1\n\t" /* masked data store */ \
+ "insbl %2,%3,%2\n\t" \
+ "mskbl %0,%3,%0\n\t" \
+ "or %0,%2,%2\n\t" \
+ "stq_u %2,%1\n\t" \
+ "stq %5,%4\n\t" /* restore exception count */ \
+ "clr %0\n" /* no exception: error = 0 */ \
+ "2:\t/* End __put_user_8 */" \
+ : "=r"(__pu_err), "=m"(*__pu_addr), "=r"(__pu_val) \
+ : "r"(__pu_addr), "m"(current->tss.ex.count), \
+ "r"(__pu_ex_count), "r"(__pu_ex_count+1), "i"(-EFAULT), \
+ "2"(__pu_val) \
+ : "$28")
+
+
+extern void __clear_user(void);
+
+#define clear_user(to,n) \
+({ \
+ register void * __cl_to __asm__("$6") = (to); \
+ register long __cl_len __asm__("$0") = (n); \
+ if (__access_ok(((long)__cl_to),__cl_len,__access_mask)) { \
+ register void * __cl_ex __asm__("$7"); \
+ __cl_ex = ¤t->tss.ex; \
+ __asm__ __volatile__( \
+ "jsr $28,(%2),__clear_user" \
+ : "=r"(__cl_len), "=r"(__cl_to) \
+ : "r"(__clear_user), "r"(__cl_ex), \
+ "0"(__cl_len), "1"(__cl_to) \
+ : "$1","$2","$3","$4","$5","$28","memory"); \
+ } \
+ __cl_len; \
+})
+
+
+/* Returns: -EFAULT if exception before terminator, N if the entire
+ buffer filled, else strlen. */
+
+struct exception_struct;
+extern long __strncpy_from_user(char *__to, const char *__from,
+ long __to_len, struct exception_struct *);
+
+#define strncpy_from_user(to,from,n) \
+({ \
+ char * __sfu_to = (to); \
+ const char * __sfu_from = (from); \
+ long __sfu_len = (n), __sfu_ret = -EFAULT; \
+ if (__access_ok(((long)__sfu_from),__sfu_len,__access_mask)) { \
+ __sfu_ret = __strncpy_from_user(__sfu_to,__sfu_from, \
+ __sfu_len, ¤t->tss.ex); \
+ __sfu_ret; \
+})
#endif /* _ASM_SEGMENT_H */
__constant_c_memset((s),(0x0101010101010101UL*(unsigned char)c),(count)) : \
__memset((s),(c),(count)))
+#define __HAVE_ARCH_STRCPY
+#define __HAVE_ARCH_STRNCPY
+#define __HAVE_ARCH_STRCAT
+#define __HAVE_ARCH_STRNCAT
+#define __HAVE_ARCH_STRCHR
+#define __HAVE_ARCH_STRRCHR
#define __HAVE_ARCH_STRLEN
#endif /* __KERNEL__ */
typedef unsigned long elf_greg_t;
-#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t))
+#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct user_i387_struct elf_fpregset_t;
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
+/* Wow, the "main" arch needs arch dependent functions too.. :) */
+
+/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
+ now struct_user_regs, they are different) */
+
+#define ELF_CORE_COPY_REGS(pr_reg, regs) \
+ pr_reg[0] = regs->ebx; \
+ pr_reg[1] = regs->ecx; \
+ pr_reg[2] = regs->edx; \
+ pr_reg[3] = regs->esi; \
+ pr_reg[4] = regs->edi; \
+ pr_reg[5] = regs->ebp; \
+ pr_reg[6] = regs->eax; \
+ pr_reg[7] = regs->xds; \
+ pr_reg[8] = regs->xes; \
+ /* fake once used fs and gs selectors? */ \
+ pr_reg[9] = regs->xds; /* was fs and __fs */ \
+ pr_reg[10] = regs->xds; /* was gs and __gs */ \
+ pr_reg[11] = regs->orig_eax; \
+ pr_reg[12] = regs->eip; \
+ pr_reg[13] = regs->xcs; \
+ pr_reg[14] = regs->eflags; \
+ pr_reg[15] = regs->esp; \
+ pr_reg[16] = regs->xss;
+
#endif
#include <linux/config.h>
-/*
- * Define USE_PENTIUM_MM if you want the 4MB page table optimizations.
- * This works only on an intel Pentium.
- */
-#define USE_PENTIUM_MM 1
-
/*
* The Linux memory management assumes a three-level page table setup. On
* the i386, we use that, but "fold" the mid level into the top-level page
#else
#define __access_ok(type,addr,size) \
(__kernel_ok || (__user_ok(addr,size) && \
- ((type) == VERIFY_READ || __verify_write((void *)(addr),(size)))))
+ ((type) == VERIFY_READ || wp_works_ok || __verify_write((void *)(addr),(size)))))
#endif /* CPU */
#define access_ok(type,addr,size) \
"decl %2\n" \
"3:\tlea 0(%3,%1,4),%0" \
:"=d" (size) \
- :"c" (size >> 2), "m" (current->tss.ex), "r" (size & 3), \
+ :"c" (size >> 2), "m" (current->tss.ex), "q" (size & 3), \
"D" (to), "S" (from), "0" (size) \
:"cx","di","si","memory");
__copy_user(to,__cu_from,__cu_size); \
__cu_size; })
+#define __clear_user(addr,size) \
+__asm__ __volatile__( \
+ "movl $3f,%0\n\t" \
+ "incl %2\n\t" \
+ "rep; stosl\n\t" \
+ "testl $2,%3\n\t" \
+ "je 1f\n\t" \
+ "stosw\n\t" \
+ "subl $2,%3\n" \
+ "1:\t" \
+ "testl $1,%3\n\t" \
+ "je 2f\n\t" \
+ "stosb\n\t" \
+ "decl %3\n" \
+ "2:\t" \
+ "decl %2\n" \
+ "3:\tlea 0(%3,%1,4),%0" \
+ :"=d" (size) \
+ :"c" (size >> 2), "m" (current->tss.ex), "r" (size & 3), \
+ "D" (addr), "0" (size), "a" (0) \
+ :"cx","di","memory");
+
+#define clear_user(addr,n) ({ \
+void * __cl_addr = (addr); \
+unsigned long __cl_size = (n); \
+if (__cl_size && __access_ok(VERIFY_WRITE, ((unsigned long)(__cl_addr)), __cl_size)) \
+__clear_user(__cl_addr, __cl_size); \
+__cl_size; })
+
+#define __strncpy_from_user(dst,src,count,res) \
+__asm__ __volatile__( \
+ "cld\n\t" \
+ "movl $3f,%0\n\t" \
+ "incl %2\n" \
+ "1:\tdecl %1\n\t" \
+ "js 2f\n\t" \
+ "lodsb\n\t" \
+ "stosb\n\t" \
+ "testb %%al,%%al\n\t" \
+ "jne 1b\n" \
+ "2:\t" \
+ "incl %1\n\t" \
+ "xorl %0,%0\n\t" \
+ "decl %2\n" \
+ "3:" \
+ :"=d" (res), "=r" (count) \
+ :"m" (current->tss.ex), "1" (count), "S" (src),"D" (dst),"0" (res) \
+ :"si","di","ax","cx","memory")
+
+#define strncpy_from_user(dest,src,count) ({ \
+const void * __sc_src = (src); \
+unsigned long __sc_count = (count); \
+long __sc_res = -EFAULT; \
+if (__access_ok(VERIFY_READ, ((unsigned long)(__sc_src)), __sc_count)) { \
+ unsigned long __sc_residue = __sc_count; \
+ __strncpy_from_user(dest,__sc_src,__sc_count,__sc_res); \
+ if (!__sc_res) __sc_res = __sc_residue - __sc_count; \
+} __sc_res; })
+
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_SEGMENT_H */
long eax;
long __null_ds;
long __null_es;
- long __null_fs;
- long __null_gs;
long orig_eax;
long eip;
unsigned short cs, __csh;
unsigned long pos, ppos, page_cache;
int reada_ok;
- if (!access_ok(VERIFY_WRITE, buf,count))
+ if (!access_ok(VERIFY_WRITE, buf, count))
return -EFAULT;
+ if (!count)
+ return 0;
error = 0;
read = 0;
page_cache = 0;
sk->rcvbuf = SK_RMEM_MAX;
sk->rto = TCP_TIMEOUT_INIT; /*TCP_WRITE_TIME*/
sk->cong_window = 1; /* start with only sending one packet at a time. */
+ sk->ssthresh = 0x7fffffff;
sk->priority = 1;
sk->state = TCP_CLOSE;