--- /dev/null
+Kernel 2.2.xx Mainline
+----------------------
+Person: Marc-Christian Petersen
+EMail: m.c.p@wolk-project.de
+Mailinglist: linux-kernel@vger.kernel.org
+Website: http://www.kernel.org/pub/linux/kernel/people/mcp/
+Status: Maintained
+Employed by: Linux-Systeme GmbH, http://www.linux-systeme.de/
+
+
+ Thanks to Alan Cox 8-)
+
+
+-------------------------------------------------------------------------
+If you need a more secured, more stable, even faster, better SMP support,
+IDE LBA48 support, LFS support, IPSec support, HTB support, IPVS support
+etc. enabled kernel with many more features and important fixes, please
+use my 2.2-secure tree.
+You may find more informations about it at http://www.wolk-project.de.
+-------------------------------------------------------------------------
+
+
+
+2.2.26
+------
+o CAN-2004-0077: behave safely in case of do_munmap() (Solar Designer)
+ failures in mremap(2)
+o CAN-2003-0984: /dev/rtc can leak parts of kernel (Solar Designer)
+ memory to unprivileged users (2.4 backport)
+o CAN-2003-0244: hashing exploits in network stack (David S. Miller)
+o update_atime() performance improvement (2.4 backport) (Solar Designer)
+o ability to swapoff after a device file might (Solar Designer)
+ have been re-created
+o MAINTAINERS correction for Kernel 2.2 and 2.2 fixes (me)
+o fixed some typos (Solar Designer, me)
+
S: Maintained
KERNEL (2.2.XX TREE)
-P: Alan Cox
-M: Alan.Cox@linux.org
-L: linux-kernel@vger.kernel.org
-W: http://www.kernel.org/pub/linux/kernel/alan/
-S: Maintained
+P: Marc-Christian Petersen
+M: m.c.p@wolk-project.de
+L: linux-kernel@vger.kernel.org
+W: http://www.kernel.org/pub/linux/kernel/people/mcp/
+S: Maintained
KERNEL AUTOMOUNTER (AUTOFS)
P: H. Peter Anvin
S: Maintained
LINUX 2.2 FIXES
-P: Alan Cox
-M: alan@lxorguk.ukuu.org.uk
+P: Marc-Christian Petersen
+M: m.c.p@wolk-project.de
S: Maintained
THE REST
VERSION = 2
PATCHLEVEL = 2
-SUBLEVEL = 25
+SUBLEVEL = 26
EXTRAVERSION =
ARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/)
goto clear_TF;
}
- /* Mast out spurious debug traps due to lazy DR7 setting */
+ /* Mask out spurious debug traps due to lazy DR7 setting */
if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
if (!tsk->tss.debugreg[7])
goto clear_dr7;
/* User mode accesses just cause a SIGSEGV */
if (error_code & 4) {
tsk->tss.cr2 = address;
- tsk->tss.error_code = error_code;
+ tsk->tss.error_code = error_code | (address >= TASK_SIZE);
tsk->tss.trap_no = 14;
force_sig(SIGSEGV, tsk);
return;
/* Ensure clock and real-time-mode-register are accessible */
msr = rtc->msr & 0xc0;
rtc->msr = 0x40;
+ memset(&wtime, 0, sizeof(wtime));
do {
wtime.tm_sec = BCD2BIN(rtc->bcd_sec);
wtime.tm_min = BCD2BIN(rtc->bcd_min);
cli();
/* Ensure clock and real-time-mode-register are accessible */
rtc->ctrl = RTC_READ;
+ memset(&wtime, 0, sizeof(wtime));
wtime.tm_sec = BCD2BIN(rtc->bcd_sec);
wtime.tm_min = BCD2BIN(rtc->bcd_min);
wtime.tm_hour = BCD2BIN(rtc->bcd_hr);
* tm_min, and tm_sec values are filled in.
*/
+ memset(&wtime, 0, sizeof(wtime));
get_rtc_alm_time(&wtime);
break;
}
}
case RTC_RD_TIME: /* Read the time/date from RTC */
{
+ memset(&wtime, 0, sizeof(wtime));
get_rtc_time(&wtime);
break;
}
case RTC_RD_TIME:
if (ppc_md.get_rtc_time)
{
+ memset(&rtc_tm, 0, sizeof(rtc_tm));
get_rtc_time(&rtc_tm);
copy_to_user_ret((struct rtc_time*)arg, &rtc_tm, sizeof(struct rtc_time), -EFAULT);
switch (cmd)
{
case RTCGET:
+ memset(&rtc_tm, 0, sizeof(rtc_tm));
get_rtc_time(&rtc_tm);
copy_to_user_ret((struct rtc_time*)arg, &rtc_tm, sizeof(struct rtc_time), -EFAULT);
* tm_min, and tm_sec values are filled in.
*/
+ memset(&wtime, 0, sizeof(wtime));
get_rtc_alm_time(&wtime);
break;
}
}
case RTC_RD_TIME: /* Read the time/date from RTC */
{
+ memset(&wtime, 0, sizeof(wtime));
get_rtc_time(&wtime);
break;
}
if (board >= boards)
return -EINVAL;
- if (max_len < sizeof (struct gfx_getboardinfo_args))
+ if (max_len < (int)sizeof (struct gfx_getboardinfo_args))
return -EINVAL;
if (max_len > cards [board].g_board_info_len)
max_len = cards [boards].g_board_info_len;
i_pos = MSDOS_I(inode)->i_location;
if (inode->i_ino == MSDOS_ROOT_INO || !i_pos) return;
if (!(bh = fat_bread(sb, ((unsigned long long) i_pos) >> MSDOS_DPB_BITS))) {
- printk("dev = %s, ino = %ld\n", kdevname(inode->i_dev), i_pos);
+ printk("dev = %s, ino = %ld\n",
+ kdevname(inode->i_dev), (long)i_pos);
fat_fs_panic(sb, "msdos_write_inode: unable to read i-node block");
return;
}
void update_atime (struct inode *inode)
{
+ if ( inode->i_atime == CURRENT_TIME ) return;
if ( IS_NOATIME (inode) ) return;
if ( IS_NODIRATIME (inode) && S_ISDIR (inode->i_mode) ) return;
if ( IS_RDONLY (inode) ) return;
int size = 0, result = 0;
char c;
- if (start >= end)
+ if (!start || start >= end)
return result;
for (;;) {
addr = get_phys_addr(p, start);
start = NULL;
dp = (struct proc_dir_entry *) inode->u.generic_ip;
- if (pid && process_unauthorized(type, pid))
- {
+ if (pid && process_unauthorized(type, pid)) {
free_page(page);
return -EIO;
}
free_page(page);
return length;
}
+ if (pid && process_unauthorized(type, pid)) {
+ free_page(page);
+ return -EIO;
+ }
if (start != NULL) {
/* We have had block-adjusting processing! */
copy_to_user(buf, start, length);
}
/*
- * Thsi function is called by register_tty_driver() to handle
+ * This function is called by tty_register_driver() to handle
* registering the driver's /proc handler into /proc/tty/driver/<foo>
*/
void proc_tty_register_driver(struct tty_driver *driver)
}
/*
- * This function is called by unregister_tty_driver()
+ * This function is called by tty_unregister_driver()
*/
void proc_tty_unregister_driver(struct tty_driver *driver)
{
fd_set_bits fds;
char *bits;
long timeout;
- int ret, size;
+ int ret, size, max_fdset;
timeout = MAX_SCHEDULE_TIMEOUT;
if (tvp) {
if (n < 0)
goto out_nofds;
- if (n > current->files->max_fdset)
- n = current->files->max_fdset;
+ max_fdset = current->files->max_fdset;
+ if (n > max_fdset)
+ n = max_fdset;
+ if (n > NR_OPEN)
+ n = NR_OPEN;
/*
* We need 6 bitmaps (in/out/ex for both incoming and outgoing),
lock_kernel();
/* Do a sanity check on nfds ... */
err = -EINVAL;
- if (nfds > current->files->max_fds || nfds > 0x100000)
+ if (nfds > current->files->max_fds || nfds > NR_OPEN)
goto out;
if (timeout) {
--- /dev/null
+#ifndef _LINUX_JHASH_H
+#define _LINUX_JHASH_H
+
+/* jhash.h: Jenkins hash support.
+ *
+ * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
+ *
+ * http://burtleburtle.net/bob/hash/
+ *
+ * These are the credits from Bob's sources:
+ *
+ * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
+ * hash(), hash2(), hash3, and mix() are externally useful functions.
+ * Routines to test the hash are included if SELF_TEST is defined.
+ * You can use this free for any purpose. It has no warranty.
+ *
+ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
+ *
+ * I've modified Bob's hash to be useful in the Linux kernel, and
+ * any bugs present are surely my fault. -DaveM
+ */
+
+/* NOTE: Arguments are modified. */
+#define __jhash_mix(a, b, c) \
+{ \
+ a -= b; a -= c; a ^= (c>>13); \
+ b -= c; b -= a; b ^= (a<<8); \
+ c -= a; c -= b; c ^= (b>>13); \
+ a -= b; a -= c; a ^= (c>>12); \
+ b -= c; b -= a; b ^= (a<<16); \
+ c -= a; c -= b; c ^= (b>>5); \
+ a -= b; a -= c; a ^= (c>>3); \
+ b -= c; b -= a; b ^= (a<<10); \
+ c -= a; c -= b; c ^= (b>>15); \
+}
+
+/* The golden ration: an arbitrary value */
+#define JHASH_GOLDEN_RATIO 0x9e3779b9
+
+/* The most generic version, hashes an arbitrary sequence
+ * of bytes. No alignment or length assumptions are made about
+ * the input key.
+ */
+static inline u32 jhash(void *key, u32 length, u32 initval)
+{
+ u32 a, b, c, len;
+ u8 *k = key;
+
+ len = length;
+ a = b = JHASH_GOLDEN_RATIO;
+ c = initval;
+
+ while (len >= 12) {
+ a += (k[0] +((u32)k[1]<<8) +((u32)k[2]<<16) +((u32)k[3]<<24));
+ b += (k[4] +((u32)k[5]<<8) +((u32)k[6]<<16) +((u32)k[7]<<24));
+ c += (k[8] +((u32)k[9]<<8) +((u32)k[10]<<16)+((u32)k[11]<<24));
+
+ __jhash_mix(a,b,c);
+
+ k += 12;
+ len -= 12;
+ }
+
+ c += length;
+ switch (len) {
+ case 11: c += ((u32)k[10]<<24);
+ case 10: c += ((u32)k[9]<<16);
+ case 9 : c += ((u32)k[8]<<8);
+ case 8 : b += ((u32)k[7]<<24);
+ case 7 : b += ((u32)k[6]<<16);
+ case 6 : b += ((u32)k[5]<<8);
+ case 5 : b += k[4];
+ case 4 : a += ((u32)k[3]<<24);
+ case 3 : a += ((u32)k[2]<<16);
+ case 2 : a += ((u32)k[1]<<8);
+ case 1 : a += k[0];
+ };
+
+ __jhash_mix(a,b,c);
+
+ return c;
+}
+
+/* A special optimized version that handles 1 or more of u32s.
+ * The length parameter here is the number of u32s in the key.
+ */
+static inline u32 jhash2(u32 *k, u32 length, u32 initval)
+{
+ u32 a, b, c, len;
+
+ a = b = JHASH_GOLDEN_RATIO;
+ c = initval;
+ len = length;
+
+ while (len >= 3) {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ __jhash_mix(a, b, c);
+ k += 3; len -= 3;
+ }
+
+ c += length * 4;
+
+ switch (len) {
+ case 2 : b += k[1];
+ case 1 : a += k[0];
+ };
+
+ __jhash_mix(a,b,c);
+
+ return c;
+}
+
+
+/* A special ultra-optimized versions that knows they are hashing exactly
+ * 3, 2 or 1 word(s).
+ *
+ * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
+ * done at the end is not done here.
+ */
+static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
+{
+ a += JHASH_GOLDEN_RATIO;
+ b += JHASH_GOLDEN_RATIO;
+ c += initval;
+
+ __jhash_mix(a, b, c);
+
+ return c;
+}
+
+static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
+{
+ return jhash_3words(a, b, 0, initval);
+}
+
+static inline u32 jhash_1word(u32 a, u32 initval)
+{
+ return jhash_3words(a, 0, 0, initval);
+}
+
+#endif /* _LINUX_JHASH_H */
NET_IPV4_ROUTE_ERROR_COST=12,
NET_IPV4_ROUTE_ERROR_BURST=13,
NET_IPV4_ROUTE_GC_ELASTICITY=14,
- NET_IPV4_ROUTE_MTU_EXPIRES=15
+ NET_IPV4_ROUTE_MTU_EXPIRES=15,
+ NET_IPV4_ROUTE_SECRET_INTERVAL=16,
};
enum
struct mm_struct * mm;
struct vm_area_struct *mpnt, *prev, **npp, *free, *extra;
- if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
+ if ((addr & ~PAGE_MASK) || addr >= TASK_SIZE || len > TASK_SIZE-addr)
return -EINVAL;
if ((len = PAGE_ALIGN(len)) == 0)
unsigned long offset = len;
flush_cache_range(mm, old_addr, old_addr + len);
- flush_tlb_range(mm, old_addr, old_addr + len);
/*
* This is not the clever way to do this, but we're taking the
if (move_one_page(mm, old_addr + offset, new_addr + offset))
goto oops_we_failed;
}
+ flush_tlb_range(mm, old_addr, old_addr + len);
return 0;
/*
new_vma->vm_ops->open(new_vma);
insert_vm_struct(current->mm, new_vma);
merge_segments(current->mm, new_vma->vm_start, new_vma->vm_end);
+ /* XXX: possible errors masked, mapping might remain */
do_munmap(addr, old_len);
current->mm->total_vm += new_len >> PAGE_SHIFT;
if (new_vma->vm_flags & VM_LOCKED) {
old_len = PAGE_ALIGN(old_len);
new_len = PAGE_ALIGN(new_len);
+ if (old_len > TASK_SIZE || addr > TASK_SIZE - old_len)
+ goto out;
+
+ if (addr >= TASK_SIZE)
+ goto out;
+
/*
* Always allow a shrinking remap: that just unmaps
* the unnecessary pages..
*/
- ret = addr;
if (old_len >= new_len) {
- do_munmap(addr+new_len, old_len - new_len);
+ ret = do_munmap(addr+new_len, old_len - new_len);
+ if (!ret || old_len == new_len)
+ ret = addr;
goto out;
}
+ if (new_len > TASK_SIZE || addr > TASK_SIZE - new_len)
+ goto out;
+
/*
* Ok, we need to grow..
*/
for (type = swap_list.head; type >= 0; type = swap_info[type].next) {
p = swap_info + type;
if ((p->flags & SWP_WRITEOK) == SWP_WRITEOK) {
- if (p->swap_file) {
- if (p->swap_file == dentry)
- break;
- } else {
- if (S_ISBLK(dentry->d_inode->i_mode)
- && (p->swap_device == dentry->d_inode->i_rdev))
- break;
- }
+ if (p->swap_file == dentry)
+ break;
+ if (S_ISBLK(dentry->d_inode->i_mode) &&
+ p->swap_device == dentry->d_inode->i_rdev)
+ break;
}
prev = type;
}
extern void tcp_init(void);
extern void tcp_v4_init(struct net_proto_family *);
+extern void ipfrag_init(void);
/*
proc_net_register(&proc_net_tcp);
proc_net_register(&proc_net_udp);
#endif /* CONFIG_PROC_FS */
+
+ ipfrag_init();
}
#include <linux/ip.h>
#include <linux/icmp.h>
#include <linux/netdevice.h>
+#include <linux/jhash.h>
+#include <linux/random.h>
#include <net/sock.h>
#include <net/ip.h>
#include <net/icmp.h>
#define IPQ_HASHSZ 64
struct ipq *ipq_hash[IPQ_HASHSZ];
+static u32 ipfrag_hash_rnd;
-#define ipqhashfn(id, saddr, daddr, prot) \
- ((((id) >> 1) ^ (saddr) ^ (daddr) ^ (prot)) & (IPQ_HASHSZ - 1))
+static unsigned int ipqhashfn(u16 id, u32 saddr, u32 daddr, u8 prot)
+{
+ return jhash_3words((u32)id << 16 | prot, saddr, daddr,
+ ipfrag_hash_rnd) & (IPQ_HASHSZ - 1);
+}
+
+static struct timer_list ipfrag_secret_timer;
+static int ipfrag_secret_interval = 10 * 60 * HZ;
+
+static void ipfrag_secret_rebuild(unsigned long dummy)
+{
+ unsigned long now = jiffies;
+ int i;
+
+ get_random_bytes(&ipfrag_hash_rnd, sizeof(u32));
+ for (i = 0; i < IPQ_HASHSZ; i++) {
+ struct ipq *q;
+
+ q = ipq_hash[i];
+ while (q) {
+ struct ipq *next = q->next;
+ unsigned int hval = ipqhashfn(q->iph->id,
+ q->iph->saddr,
+ q->iph->daddr,
+ q->iph->protocol);
+
+ if (hval != i) {
+ /* Unlink. */
+ if (q->next)
+ q->next->pprev = q->pprev;
+ *q->pprev = q->next;
+
+ /* Relink to new hash chain. */
+ if ((q->next = ipq_hash[hval]) != NULL)
+ q->next->pprev = &q->next;
+ ipq_hash[hval] = q;
+ q->pprev = &ipq_hash[hval];
+ }
+
+ q = next;
+ }
+ }
+
+ mod_timer(&ipfrag_secret_timer, now + ipfrag_secret_interval);
+}
atomic_t ip_frag_mem = ATOMIC_INIT(0); /* Memory used for fragments */
return NULL;
}
+void ipfrag_init(void)
+{
+ ipfrag_hash_rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^
+ (jiffies ^ (jiffies >> 6)));
+
+ init_timer(&ipfrag_secret_timer);
+ ipfrag_secret_timer.function = ipfrag_secret_rebuild;
+ ipfrag_secret_timer.expires = jiffies + ipfrag_secret_interval;
+ add_timer(&ipfrag_secret_timer);
+}
+
/* Process an incoming IP datagram fragment. */
struct sk_buff *ip_defrag(struct sk_buff *skb)
{
#include <linux/igmp.h>
#include <linux/pkt_sched.h>
#include <linux/mroute.h>
+#include <linux/random.h>
+#include <linux/jhash.h>
#include <net/protocol.h>
#include <net/ip.h>
#include <net/route.h>
int ip_rt_error_burst = 5*HZ;
int ip_rt_gc_elasticity = 8;
int ip_rt_mtu_expires = 10*60*HZ;
-
+int ip_rt_secret_interval = 10*60*HZ;
static unsigned long rt_deadline = 0;
#define RTprint(a...) printk(KERN_DEBUG a)
{ NULL, NULL, 0, 0L, rt_run_flush };
static struct timer_list rt_periodic_timer =
{ NULL, NULL, 0, 0L, NULL };
+static struct timer_list rt_secret_timer =
+ { NULL, NULL, 0, 0L, NULL };
/*
* Interface to generic destination cache.
* Route cache.
*/
-struct rtable *rt_hash_table[RT_HASH_DIVISOR];
+struct rtable *rt_hash_table[RT_HASH_DIVISOR];
+static unsigned int rt_hash_rnd;
static int rt_intern_hash(unsigned hash, struct rtable * rth, struct rtable ** res);
-static __inline__ unsigned rt_hash_code(u32 daddr, u32 saddr, u8 tos)
+static unsigned rt_hash_code(u32 daddr, u32 saddr, u8 tos)
{
- unsigned hash = ((daddr&0xF0F0F0F0)>>4)|((daddr&0x0F0F0F0F)<<4);
- hash = hash^saddr^tos;
- hash = hash^(hash>>16);
- return (hash^(hash>>8)) & 0xFF;
+ return (jhash_3words(daddr, saddr, (u32) tos, rt_hash_rnd) & 0xFF);
}
#ifdef CONFIG_PROC_FS
rt_deadline = 0;
+ get_random_bytes(&rt_hash_rnd, 4);
+
start_bh_atomic();
for (i=0; i<RT_HASH_DIVISOR; i++) {
if ((rth = xchg(&rt_hash_table[i], NULL)) == NULL)
end_bh_atomic();
}
+static void rt_secret_rebuild(unsigned long dummy)
+{
+ unsigned long now = jiffies;
+
+ rt_cache_flush(0);
+ mod_timer(&rt_secret_timer, now + ip_rt_secret_interval);
+}
+
/*
Short description of GC goals.
{NET_IPV4_ROUTE_MTU_EXPIRES, "mtu_expires",
&ip_rt_mtu_expires, sizeof(int), 0644, NULL,
&proc_dointvec_jiffies, &sysctl_jiffies},
+ {NET_IPV4_ROUTE_SECRET_INTERVAL, "secret_interval",
+ &ip_rt_secret_interval, sizeof(int), 0644, NULL,
+ &proc_dointvec_jiffies, &sysctl_jiffies},
{0}
};
#endif
devinet_init();
ip_fib_init();
rt_periodic_timer.function = rt_check_expire;
+ rt_secret_timer.function = rt_secret_rebuild;
/* All the timers, started at system startup tend
to synchronize. Perturb it a bit.
*/
+ ip_rt_gc_interval;
add_timer(&rt_periodic_timer);
+ rt_periodic_timer.expires = jiffies + net_random()%ip_rt_secret_interval
+ + ip_rt_secret_interval;
+ add_timer(&rt_secret_timer);
+
#ifdef CONFIG_PROC_FS
proc_net_register(&(struct proc_dir_entry) {
PROC_NET_RTCACHE, 8, "rt_cache",