]> git.neil.brown.name Git - history.git/commitdiff
[PATCH] ia64: Enable cpu_vm_mask maintenance and improve SN2 TLB flushing
authorJack Steiner <steiner@sgi.com>
Tue, 10 Feb 2004 04:16:10 +0000 (20:16 -0800)
committerDavid Mosberger <davidm@tiger.hpl.hp.com>
Tue, 10 Feb 2004 04:16:10 +0000 (20:16 -0800)
arch/ia64/sn/kernel/sn2/sn2_smp.c
include/asm-ia64/mmu_context.h

index 14b3f21fc9be2cb85781c0265d79b6a3fae8a3fb..3cfb3cd74d51d68f177de9c567be888d88e9dd84 100644 (file)
@@ -5,7 +5,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
  */
 
 #include <linux/init.h>
@@ -27,6 +27,8 @@
 #include <asm/delay.h>
 #include <asm/io.h>
 #include <asm/smp.h>
+#include <asm/numa.h>
+#include <asm/bitops.h>
 #include <asm/hw_irq.h>
 #include <asm/current.h>
 #include <asm/sn/sn_cpuid.h>
@@ -67,14 +69,56 @@ wait_piowc(void)
  *
  * Purges the translation caches of all processors of the given virtual address
  * range.
+ *
+ * Note:
+ *     - cpu_vm_mask is a bit mask that indicates which cpus have loaded the context.
+ *     - cpu_vm_mask is converted into a nodemask of the nodes containing the
+ *       cpus in cpu_vm_mask.
+ *     - if only one bit is set in cpu_vm_mask & it is the current cpu,
+ *       then only the local TLB needs to be flushed. This flushing can be done
+ *       using ptc.l. This is the common case & avoids the global spinlock.
+ *     - if multiple cpus have loaded the context, then flushing has to be
+ *       done with ptc.g/MMRs under protection of the global ptc_lock.
  */
 
 void
 sn2_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbits)
 {
-       int                     cnode, mycnode, nasid, flushed=0;
+       int                     i, cnode, mynasid, cpu, lcpu=0, nasid, flushed=0;
        volatile unsigned       long    *ptc0, *ptc1;
        unsigned long           flags=0, data0, data1;
+       struct mm_struct        *mm=current->active_mm;
+       short                   nasids[NR_NODES], nix;
+       DECLARE_BITMAP(nodes_flushed, NR_NODES);
+
+       CLEAR_BITMAP(nodes_flushed, NR_NODES);
+
+       i = 0;
+
+       for_each_cpu_mask(cpu, mm->cpu_vm_mask) {
+               cnode = cpu_to_node(cpu);
+               __set_bit(cnode, nodes_flushed);
+               lcpu = cpu;
+               i++;
+       }
+
+       preempt_disable();
+
+       if (likely(i == 1 && lcpu == smp_processor_id())) {
+               do {
+                       ia64_ptcl(start, nbits<<2);
+                       start += (1UL << nbits);
+               } while (start < end);
+               ia64_srlz_i();
+               preempt_enable();
+               return;
+       }
+
+       nix = 0;
+       for (cnode=find_first_bit(&nodes_flushed, NR_NODES); cnode < NR_NODES; 
+                       cnode=find_next_bit(&nodes_flushed, NR_NODES, ++cnode))
+               nasids[nix++] = cnodeid_to_nasid(cnode);
+
 
        data0 = (1UL<<SH_PTC_0_A_SHFT) |
                (nbits<<SH_PTC_0_PS_SHFT) |
@@ -84,20 +128,19 @@ sn2_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbit
        ptc0 = (long*)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_0);
        ptc1 = (long*)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_1);
 
-       mycnode = numa_node_id();
+
+       mynasid = smp_physical_node_id();
 
        spin_lock_irqsave(&sn2_global_ptc_lock, flags);
 
        do {
                data1 = start | (1UL<<SH_PTC_1_START_SHFT);
-               for (cnode = 0; cnode < numnodes; cnode++) {
-                       if (is_headless_node(cnode))
-                               continue;
-                       if (cnode == mycnode) {
+               for (i=0; i<nix; i++) {
+                       nasid = nasids[i];
+                       if (likely(nasid == mynasid)) {
                                ia64_ptcga(start, nbits<<2);
                                ia64_srlz_i();
                        } else {
-                               nasid = cnodeid_to_nasid(cnode);
                                ptc0 = CHANGE_NASID(nasid, ptc0);
                                ptc1 = CHANGE_NASID(nasid, ptc1);
                                pio_atomic_phys_write_mmrs(ptc0, data0, ptc1, data1);
@@ -115,6 +158,7 @@ sn2_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbit
 
        spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
 
+       preempt_enable();
 }
 
 /*
index 3a3ea55e9ab3b593c150b5d0da302d1ee647702d..41d65da53e859c57904b3339a0730a60eb9a59dd 100644 (file)
@@ -106,6 +106,7 @@ get_mmu_context (struct mm_struct *mm)
                /* re-check, now that we've got the lock: */
                context = mm->context;
                if (context == 0) {
+                       cpus_clear(mm->cpu_vm_mask);
                        if (ia64_ctx.next >= ia64_ctx.limit)
                                wrap_mmu_context(mm);
                        mm->context = context = ia64_ctx.next++;
@@ -170,6 +171,8 @@ activate_context (struct mm_struct *mm)
        do {
                context = get_mmu_context(mm);
                MMU_TRACE('A', smp_processor_id(), mm, context);
+               if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
+                       cpu_set(smp_processor_id(), mm->cpu_vm_mask);
                reload_context(context);
                MMU_TRACE('a', smp_processor_id(), mm, context);
                /* in the unlikely event of a TLB-flush by another thread, redo the load: */