*/
static void update_hw(unsigned long irq, unsigned long mask)
{
+#ifdef CONFIG_ALPHA_ALCOR
+ /* always mask out 20..30 (which are unused) */
+ mask |= 0x7ff00000UL << 16;
+#endif
switch (irq) {
#if NR_IRQS == 48
default:
DEVICE( S3, S3_TRIO64V2, "Trio64V2/DX or /GX"),
DEVICE( S3, S3_PLATO_PXG, "PLATO/PX (graphics)"),
DEVICE( S3, S3_ViRGE_DXGX, "ViRGE/DX or /GX"),
+ DEVICE( S3, S3_ViRGE_GX2, "ViRGE/GX2"),
DEVICE( INTEL, INTEL_82375, "82375EB"),
BRIDGE( INTEL, INTEL_82424, "82424ZX Saturn", 0x00),
DEVICE( INTEL, INTEL_82378, "82378IB"),
if (ex.a_data + ex.a_bss > rlim)
return -ENOMEM;
+ if (flush_old_exec(bprm))
+ return -ENOMEM;
+
/* OK, This is the point of no return */
- flush_old_exec(bprm);
current->mm->end_code = ex.a_text +
(current->mm->start_code = N_TXTADDR(ex));
}
}
+ if (flush_old_exec(bprm))
+ return -ENOMEM;
+
/* OK, This is the point of no return */
- flush_old_exec(bprm);
current->mm->end_data = 0;
current->mm->end_code = 0;
mpnt->vm_pte = 0;
insert_vm_struct(current->mm, mpnt);
current->mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
- }
- for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
- if (bprm->page[i]) {
- current->mm->rss++;
- put_dirty_page(current,bprm->page[i],stack_base);
+ for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
+ if (bprm->page[i]) {
+ current->mm->rss++;
+ put_dirty_page(current,bprm->page[i],stack_base);
+ }
+ stack_base += PAGE_SIZE;
+ }
+ } else {
+ /*
+ * This one is tricky. We are already in the new context, so we cannot
+ * return with -ENOMEM. So we _have_ to deallocate argument pages here,
+ * if there is no VMA, they wont be freed at exit_mmap() -> memory leak.
+ *
+ * User space then gets a SIGSEGV when it tries to access argument pages.
+ */
+ for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
+ if (bprm->page[i]) {
+ free_page(bprm->page[i]);
+ bprm->page[i]=NULL;
+ }
}
- stack_base += PAGE_SIZE;
}
+
return p;
}
return result;
}
-static void exec_mmap(void)
+static int exec_mmap(void)
{
/*
* The clear_page_tables done later on exec does the right thing
* to the page directory when shared, except for graceful abort
- * (the oom is wrong there, too, IMHO)
*/
if (current->mm->count > 1) {
- struct mm_struct *mm = kmalloc(sizeof(*mm), GFP_KERNEL);
- if (!mm) {
- /* this is wrong, I think. */
- oom(current);
- return;
- }
+ struct mm_struct *old_mm, *mm = kmalloc(sizeof(*mm), GFP_KERNEL);
+ if (!mm)
+ return -ENOMEM;
+
*mm = *current->mm;
mm->def_flags = 0; /* should future lockings be kept? */
mm->count = 1;
mm->mmap_avl = NULL;
mm->total_vm = 0;
mm->rss = 0;
- current->mm->count--;
+
+ old_mm = current->mm;
current->mm = mm;
new_page_tables(current);
- return;
+
+ if ((old_mm != &init_mm) && (!--old_mm->count)) {
+ /*
+ * all threads exited while we were sleeping, 'old_mm' is held
+ * by us exclusively, lets get rid of it:
+ */
+ exit_mmap(old_mm);
+ free_page_tables(old_mm);
+ kfree(old_mm);
+ }
+
+ return 0;
}
exit_mmap(current->mm);
clear_page_tables(current);
+
+ return 0;
}
/*
}
}
-void flush_old_exec(struct linux_binprm * bprm)
+int flush_old_exec(struct linux_binprm * bprm)
{
int i;
int ch;
current->comm[i] = '\0';
/* Release all of the old mmap stuff. */
- exec_mmap();
+ if (exec_mmap())
+ return -ENOMEM;
flush_thread();
flush_old_signals(current->sig);
flush_old_files(current->files);
+
+ return 0;
}
/*
inode->i_count--;
+ if (inode->i_count)
+ /*
+ * Huoh, we were supposed to be the last user, but someone has
+ * grabbed it while we were sleeping. Dont destroy inode VM
+ * mappings, it might cause a memory leak.
+ */
+ return;
+
if (inode->i_mmap) {
printk("iput: inode %lu on device %s still has mappings.\n",
inode->i_ino, kdevname(inode->i_dev));
extern int prepare_binprm(struct linux_binprm *);
extern void remove_arg_zero(struct linux_binprm *);
extern int search_binary_handler(struct linux_binprm *,struct pt_regs *);
-extern void flush_old_exec(struct linux_binprm * bprm);
+extern int flush_old_exec(struct linux_binprm * bprm);
extern unsigned long setup_arg_pages(unsigned long p, struct linux_binprm * bprm);
extern unsigned long copy_strings(int argc,char ** argv,unsigned long *page,
unsigned long p, int from_kmem);
#define PCI_DEVICE_ID_S3_TRIO64V2 0x8901
#define PCI_DEVICE_ID_S3_PLATO_PXG 0x8902
#define PCI_DEVICE_ID_S3_ViRGE_DXGX 0x8a01
+#define PCI_DEVICE_ID_S3_ViRGE_GX2 0x8a10
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_DEVICE_ID_INTEL_82375 0x0482
tmp->vm_flags &= ~VM_LOCKED;
tmp->vm_mm = mm;
tmp->vm_next = NULL;
+ if (copy_page_range(mm, current->mm, tmp)) {
+ kfree(tmp);
+ exit_mmap(mm);
+ return -ENOMEM;
+ }
if (tmp->vm_inode) {
tmp->vm_inode->i_count++;
/* insert tmp into the share list, just after mpnt */
mpnt->vm_next_share = tmp;
tmp->vm_prev_share = mpnt;
}
- if (copy_page_range(mm, current->mm, tmp)) {
- exit_mmap(mm);
- return -ENOMEM;
- }
if (tmp->vm_ops && tmp->vm_ops->open)
tmp->vm_ops->open(tmp);
*p = tmp;
return;
}
#endif
- if (p->counter > current->counter + 3)
+ if (p->policy != SCHED_OTHER || p->counter > current->counter + 3)
need_resched = 1;
nr_running++;
(p->prev_run = init_task.prev_run)->next_run = p;
pages = -pages;
vma->vm_mm->locked_vm += pages;
- if (newflags & VM_LOCKED)
+ if ((newflags & VM_LOCKED) && (newflags & VM_READ))
while (start < end) {
- char c = get_user((char *) start);
+ int c = get_user((int *) start);
__asm__ __volatile__("": :"r" (c));
start += PAGE_SIZE;
}
*/
for (;;) {
+ int was_locked;
struct sk_buff * skb = sk->send_head;
if (!skb)
break;
* We may need to remove this from the dev send list.
*/
cli();
- if (skb->next)
+ was_locked = skb_device_locked(skb);
+
+ if (was_locked) {
+ /* In this case, we are relying on the fact that kfree_skb
+ * will just set the free flag to be 3, and increment
+ * a counter. It will not actually free anything, and
+ * will not take much time
+ */
+ kfree_skb(skb, FREE_WRITE);
+ } else {
skb_unlink(skb);
+ }
sti();
- kfree_skb(skb, FREE_WRITE); /* write. */
+
+ if (!was_locked)
+ kfree_skb(skb, FREE_WRITE); /* write. */
if (!sk->dead)
sk->write_space(sk);
}
struct tcphdr *th;
struct iphdr *iph;
int size;
+ unsigned long flags;
dev = skb->dev;
IS_SKB(skb);
/* effect is that we'll send some unnecessary data, */
/* but the alternative is disastrous... */
- if (skb_device_locked(skb))
+ save_flags(flags);
+ cli();
+
+ if (skb_device_locked(skb)) {
+ restore_flags(flags);
break;
+ }
+
+ /* Unlink from any chain */
+ skb_unlink(skb);
+
+ restore_flags(flags);
/*
* Discard the surplus MAC header
* We still add up the counts as the round trip time wants
* adjusting.
*/
- if (sk && !skb_device_locked(skb))
+ if (!skb_device_locked(skb))
{
- /* Remove it from any existing driver queue first! */
- skb_unlink(skb);
/* Now queue it */
ip_statistics.IpOutRequests++;
dev_queue_xmit(skb, dev, sk->priority);
sk->packets_out++;
+ } else {
+ /* This shouldn't happen as we skip out above if the buffer is locked */
+ printk(KERN_WARNING "tcp_do_retransmit: sk_buff (%p) became locked\n", skb);
}
}
}
unsigned int minor = MINOR(inode->i_rdev);
struct sk_buff *skb;
skb=alloc_skb(count, GFP_KERNEL);
+ if (!skb)
+ return -ENOBUFS;
skb->free=1;
memcpy_fromfs(skb_put(skb,count),buf, count);
return (netlink_handler[minor])(skb);