static int batch_requests;
unsigned long blk_max_low_pfn, blk_max_pfn;
-atomic_t nr_iowait_tasks = ATOMIC_INIT(0);
int blk_nohighio = 0;
static struct congestion_state {
atomic_inc(&congestion_states[rw].nr_congested_queues);
}
-/*
- * This task is about to go to sleep on IO. Increment nr_iowait_tasks so
- * that process accounting knows that this is a task in IO wait state.
- *
- * But don't do that if it is a deliberate, throttling IO wait (this task
- * has set its backing_dev_info: the queue against which it should throttle)
- */
-void io_schedule(void)
-{
- atomic_inc(&nr_iowait_tasks);
- schedule();
- atomic_dec(&nr_iowait_tasks);
-}
-
-void io_schedule_timeout(long timeout)
-{
- atomic_inc(&nr_iowait_tasks);
- schedule_timeout(timeout);
- atomic_dec(&nr_iowait_tasks);
-}
-
/**
* blk_get_backing_dev_info - get the address of a queue's backing_dev_info
* @dev: device
jiffies_to_clock_t(kstat_cpu(i).cpustat.nice),
jiffies_to_clock_t(kstat_cpu(i).cpustat.system),
jiffies_to_clock_t(kstat_cpu(i).cpustat.idle),
- jiffies_to_clock_t(kstat_cpu(i).cpustat.idle));
+ jiffies_to_clock_t(kstat_cpu(i).cpustat.iowait));
}
len += sprintf(page + len, "intr %u", sum);
"btime %lu\n"
"processes %lu\n"
"procs_running %lu\n"
- "procs_blocked %u\n",
+ "procs_blocked %lu\n",
nr_context_switches(),
xtime.tv_sec - jif / HZ,
total_forks,
nr_running(),
- atomic_read(&nr_iowait_tasks));
+ nr_iowait());
return proc_calc_metrics(page, start, off, count, eof, len);
}
extern int last_pid;
extern unsigned long nr_running(void);
extern unsigned long nr_uninterruptible(void);
+extern unsigned long nr_iowait(void);
#include <linux/time.h>
#include <linux/param.h>
extern void show_stack(unsigned long *stack);
extern void show_regs(struct pt_regs *);
+void io_schedule(void);
+void io_schedule_timeout(long timeout);
extern void cpu_init (void);
extern void trap_init(void);
task_t *migration_thread;
struct list_head migration_queue;
+ atomic_t nr_iowait;
} ____cacheline_aligned;
static struct runqueue runqueues[NR_CPUS] __cacheline_aligned;
{
unsigned long i, sum = 0;
- for (i = 0; i < NR_CPUS; i++)
+ for (i = 0; i < NR_CPUS; i++) {
+ if (!cpu_online(i))
+ continue;
sum += cpu_rq(i)->nr_uninterruptible;
-
+ }
return sum;
}
{
unsigned long i, sum = 0;
- for (i = 0; i < NR_CPUS; i++)
+ for (i = 0; i < NR_CPUS; i++) {
+ if (!cpu_online(i))
+ continue;
sum += cpu_rq(i)->nr_switches;
+ }
+ return sum;
+}
+
+unsigned long nr_iowait(void)
+{
+ unsigned long i, sum = 0;
+ for (i = 0; i < NR_CPUS; ++i) {
+ if (!cpu_online(i))
+ continue;
+ sum += atomic_read(&cpu_rq(i)->nr_iowait);
+ }
return sum;
}
/* note: this timer irq context must be accounted for as well */
if (irq_count() - HARDIRQ_OFFSET >= SOFTIRQ_OFFSET)
kstat_cpu(cpu).cpustat.system += sys_ticks;
- else if (atomic_read(&nr_iowait_tasks) > 0)
+ else if (atomic_read(&rq->nr_iowait) > 0)
kstat_cpu(cpu).cpustat.iowait += sys_ticks;
else
kstat_cpu(cpu).cpustat.idle += sys_ticks;
sys_sched_yield();
}
+/*
+ * This task is about to go to sleep on IO. Increment rq->nr_iowait so
+ * that process accounting knows that this is a task in IO wait state.
+ *
+ * But don't do that if it is a deliberate, throttling IO wait (this task
+ * has set its backing_dev_info: the queue against which it should throttle)
+ */
+void io_schedule(void)
+{
+ struct runqueue *rq = this_rq();
+
+ atomic_inc(&rq->nr_iowait);
+ schedule();
+ atomic_dec(&rq->nr_iowait);
+}
+
+void io_schedule_timeout(long timeout)
+{
+ struct runqueue *rq = this_rq();
+
+ atomic_inc(&rq->nr_iowait);
+ schedule_timeout(timeout);
+ atomic_dec(&rq->nr_iowait);
+}
+
/**
* sys_sched_get_priority_max - return maximum RT priority.
* @policy: scheduling class.
rq->expired = rq->arrays + 1;
spin_lock_init(&rq->lock);
INIT_LIST_HEAD(&rq->migration_queue);
+ atomic_set(&rq->nr_iowait, 0);
for (j = 0; j < 2; j++) {
array = rq->arrays + j;