]> git.neil.brown.name Git - history.git/commitdiff
[PATCH] 995/1: better EBSA110 idle
authorNicolas Pitre <nico@cam.org>
Sun, 10 Mar 2002 14:44:30 +0000 (14:44 +0000)
committerRussell King <src@flint.arm.linux.org.uk>
Sun, 10 Mar 2002 14:44:30 +0000 (14:44 +0000)
This should bring better performances as all interrupts are always run with
clock switching enabled and the idle spinning always with the lower clock.

This also keeps the brokenness of that architecture localized while
preserving the sanity of the common SA idle function.

include/asm-arm/arch-ebsa110/system.h

index e33951995e689f20827d542c64b58c3280674ed3..d813800e86e3e457fa7c906a70f8664ebd6f77f7 100644 (file)
  * will stop our MCLK signal (which provides the clock for the glue
  * logic, and therefore the timer interrupt).
  *
- * Instead, we spin, waiting for either hlt_counter or need_resched()
- * to be set.  If we have been spinning for 2cs, then we drop the
- * core clock down to the memory clock.
+ * Instead, we spin, polling the IRQ_STAT register for the occurrence
+ * of any interrupt with core clock down to the memory clock.
  */
 static void arch_idle(void)
 {
-       unsigned long start_idle;
+       const char *irq_stat = (char *)0xff000000;
+       long flags;
 
-       start_idle = jiffies;
+       if (!hlt_counter)
+               return;
 
        do {
-               if (need_resched() || hlt_counter)
-                       goto slow_out;
-       } while (time_before(jiffies, start_idle + HZ/50));
-
-       cpu_do_idle(IDLE_CLOCK_SLOW);
-
-       while (!need_resched() && !hlt_counter) {
-               /* do nothing slowly */
-       }
-
-       cpu_do_idle(IDLE_CLOCK_FAST);
-slow_out:
+               /* disable interrupts */
+               cli();
+               /* check need_resched here to avoid races */
+               if (need_resched()) {
+                       sti();
+                       return;
+               }
+               /* disable clock switching */
+               asm volatile ("mcr%? p15, 0, ip, c15, c2, 2");
+               /* wait for an interrupt to occur */
+               while (!*irq_stat);
+               /* enable clock switching */
+               asm volatile ("mcr%? p15, 0, ip, c15, c1, 2");
+               /* allow the interrupt to happen */
+               sti();
+       } while (!need_resched());
 }
 
 #define arch_reset(mode)       cpu_reset(0x80000000)