Root/kernel/sched_stats.h

1
2#ifdef CONFIG_SCHEDSTATS
3/*
4 * bump this up when changing the output format or the meaning of an existing
5 * format, so that tools can adapt (or abort)
6 */
7#define SCHEDSTAT_VERSION 15
8
9static int show_schedstat(struct seq_file *seq, void *v)
10{
11    int cpu;
12    int mask_len = DIV_ROUND_UP(NR_CPUS, 32) * 9;
13    char *mask_str = kmalloc(mask_len, GFP_KERNEL);
14
15    if (mask_str == NULL)
16        return -ENOMEM;
17
18    seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
19    seq_printf(seq, "timestamp %lu\n", jiffies);
20    for_each_online_cpu(cpu) {
21        struct rq *rq = cpu_rq(cpu);
22#ifdef CONFIG_SMP
23        struct sched_domain *sd;
24        int dcount = 0;
25#endif
26
27        /* runqueue-specific stats */
28        seq_printf(seq,
29            "cpu%d %u %u %u %u %u %u %llu %llu %lu",
30            cpu, rq->yld_count,
31            rq->sched_switch, rq->sched_count, rq->sched_goidle,
32            rq->ttwu_count, rq->ttwu_local,
33            rq->rq_cpu_time,
34            rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
35
36        seq_printf(seq, "\n");
37
38#ifdef CONFIG_SMP
39        /* domain-specific stats */
40        preempt_disable();
41        for_each_domain(cpu, sd) {
42            enum cpu_idle_type itype;
43
44            cpumask_scnprintf(mask_str, mask_len,
45                      sched_domain_span(sd));
46            seq_printf(seq, "domain%d %s", dcount++, mask_str);
47            for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
48                    itype++) {
49                seq_printf(seq, " %u %u %u %u %u %u %u %u",
50                    sd->lb_count[itype],
51                    sd->lb_balanced[itype],
52                    sd->lb_failed[itype],
53                    sd->lb_imbalance[itype],
54                    sd->lb_gained[itype],
55                    sd->lb_hot_gained[itype],
56                    sd->lb_nobusyq[itype],
57                    sd->lb_nobusyg[itype]);
58            }
59            seq_printf(seq,
60                   " %u %u %u %u %u %u %u %u %u %u %u %u\n",
61                sd->alb_count, sd->alb_failed, sd->alb_pushed,
62                sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
63                sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
64                sd->ttwu_wake_remote, sd->ttwu_move_affine,
65                sd->ttwu_move_balance);
66        }
67        preempt_enable();
68#endif
69    }
70    kfree(mask_str);
71    return 0;
72}
73
74static int schedstat_open(struct inode *inode, struct file *file)
75{
76    unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32);
77    char *buf = kmalloc(size, GFP_KERNEL);
78    struct seq_file *m;
79    int res;
80
81    if (!buf)
82        return -ENOMEM;
83    res = single_open(file, show_schedstat, NULL);
84    if (!res) {
85        m = file->private_data;
86        m->buf = buf;
87        m->size = size;
88    } else
89        kfree(buf);
90    return res;
91}
92
93static const struct file_operations proc_schedstat_operations = {
94    .open = schedstat_open,
95    .read = seq_read,
96    .llseek = seq_lseek,
97    .release = single_release,
98};
99
100static int __init proc_schedstat_init(void)
101{
102    proc_create("schedstat", 0, NULL, &proc_schedstat_operations);
103    return 0;
104}
105module_init(proc_schedstat_init);
106
107/*
108 * Expects runqueue lock to be held for atomicity of update
109 */
110static inline void
111rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
112{
113    if (rq) {
114        rq->rq_sched_info.run_delay += delta;
115        rq->rq_sched_info.pcount++;
116    }
117}
118
119/*
120 * Expects runqueue lock to be held for atomicity of update
121 */
122static inline void
123rq_sched_info_depart(struct rq *rq, unsigned long long delta)
124{
125    if (rq)
126        rq->rq_cpu_time += delta;
127}
128
129static inline void
130rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
131{
132    if (rq)
133        rq->rq_sched_info.run_delay += delta;
134}
135# define schedstat_inc(rq, field) do { (rq)->field++; } while (0)
136# define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
137# define schedstat_set(var, val) do { var = (val); } while (0)
138#else /* !CONFIG_SCHEDSTATS */
139static inline void
140rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
141{}
142static inline void
143rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
144{}
145static inline void
146rq_sched_info_depart(struct rq *rq, unsigned long long delta)
147{}
148# define schedstat_inc(rq, field) do { } while (0)
149# define schedstat_add(rq, field, amt) do { } while (0)
150# define schedstat_set(var, val) do { } while (0)
151#endif
152
153#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
154static inline void sched_info_reset_dequeued(struct task_struct *t)
155{
156    t->sched_info.last_queued = 0;
157}
158
159/*
160 * Called when a process is dequeued from the active array and given
161 * the cpu. We should note that with the exception of interactive
162 * tasks, the expired queue will become the active queue after the active
163 * queue is empty, without explicitly dequeuing and requeuing tasks in the
164 * expired queue. (Interactive tasks may be requeued directly to the
165 * active queue, thus delaying tasks in the expired queue from running;
166 * see scheduler_tick()).
167 *
168 * Though we are interested in knowing how long it was from the *first* time a
169 * task was queued to the time that it finally hit a cpu, we call this routine
170 * from dequeue_task() to account for possible rq->clock skew across cpus. The
171 * delta taken on each cpu would annul the skew.
172 */
173static inline void sched_info_dequeued(struct task_struct *t)
174{
175    unsigned long long now = task_rq(t)->clock, delta = 0;
176
177    if (unlikely(sched_info_on()))
178        if (t->sched_info.last_queued)
179            delta = now - t->sched_info.last_queued;
180    sched_info_reset_dequeued(t);
181    t->sched_info.run_delay += delta;
182
183    rq_sched_info_dequeued(task_rq(t), delta);
184}
185
186/*
187 * Called when a task finally hits the cpu. We can now calculate how
188 * long it was waiting to run. We also note when it began so that we
189 * can keep stats on how long its timeslice is.
190 */
191static void sched_info_arrive(struct task_struct *t)
192{
193    unsigned long long now = task_rq(t)->clock, delta = 0;
194
195    if (t->sched_info.last_queued)
196        delta = now - t->sched_info.last_queued;
197    sched_info_reset_dequeued(t);
198    t->sched_info.run_delay += delta;
199    t->sched_info.last_arrival = now;
200    t->sched_info.pcount++;
201
202    rq_sched_info_arrive(task_rq(t), delta);
203}
204
205/*
206 * Called when a process is queued into either the active or expired
207 * array. The time is noted and later used to determine how long we
208 * had to wait for us to reach the cpu. Since the expired queue will
209 * become the active queue after active queue is empty, without dequeuing
210 * and requeuing any tasks, we are interested in queuing to either. It
211 * is unusual but not impossible for tasks to be dequeued and immediately
212 * requeued in the same or another array: this can happen in sched_yield(),
213 * set_user_nice(), and even load_balance() as it moves tasks from runqueue
214 * to runqueue.
215 *
216 * This function is only called from enqueue_task(), but also only updates
217 * the timestamp if it is already not set. It's assumed that
218 * sched_info_dequeued() will clear that stamp when appropriate.
219 */
220static inline void sched_info_queued(struct task_struct *t)
221{
222    if (unlikely(sched_info_on()))
223        if (!t->sched_info.last_queued)
224            t->sched_info.last_queued = task_rq(t)->clock;
225}
226
227/*
228 * Called when a process ceases being the active-running process, either
229 * voluntarily or involuntarily. Now we can calculate how long we ran.
230 * Also, if the process is still in the TASK_RUNNING state, call
231 * sched_info_queued() to mark that it has now again started waiting on
232 * the runqueue.
233 */
234static inline void sched_info_depart(struct task_struct *t)
235{
236    unsigned long long delta = task_rq(t)->clock -
237                    t->sched_info.last_arrival;
238
239    rq_sched_info_depart(task_rq(t), delta);
240
241    if (t->state == TASK_RUNNING)
242        sched_info_queued(t);
243}
244
245/*
246 * Called when tasks are switched involuntarily due, typically, to expiring
247 * their time slice. (This may also be called when switching to or from
248 * the idle task.) We are only called when prev != next.
249 */
250static inline void
251__sched_info_switch(struct task_struct *prev, struct task_struct *next)
252{
253    struct rq *rq = task_rq(prev);
254
255    /*
256     * prev now departs the cpu. It's not interesting to record
257     * stats about how efficient we were at scheduling the idle
258     * process, however.
259     */
260    if (prev != rq->idle)
261        sched_info_depart(prev);
262
263    if (next != rq->idle)
264        sched_info_arrive(next);
265}
266static inline void
267sched_info_switch(struct task_struct *prev, struct task_struct *next)
268{
269    if (unlikely(sched_info_on()))
270        __sched_info_switch(prev, next);
271}
272#else
273#define sched_info_queued(t) do { } while (0)
274#define sched_info_reset_dequeued(t) do { } while (0)
275#define sched_info_dequeued(t) do { } while (0)
276#define sched_info_switch(t, next) do { } while (0)
277#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
278
279/*
280 * The following are functions that support scheduler-internal time accounting.
281 * These functions are generally called at the timer tick. None of this depends
282 * on CONFIG_SCHEDSTATS.
283 */
284
285/**
286 * account_group_user_time - Maintain utime for a thread group.
287 *
288 * @tsk: Pointer to task structure.
289 * @cputime: Time value by which to increment the utime field of the
290 * thread_group_cputime structure.
291 *
292 * If thread group time is being maintained, get the structure for the
293 * running CPU and update the utime field there.
294 */
295static inline void account_group_user_time(struct task_struct *tsk,
296                       cputime_t cputime)
297{
298    struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
299
300    if (!cputimer->running)
301        return;
302
303    spin_lock(&cputimer->lock);
304    cputimer->cputime.utime =
305        cputime_add(cputimer->cputime.utime, cputime);
306    spin_unlock(&cputimer->lock);
307}
308
309/**
310 * account_group_system_time - Maintain stime for a thread group.
311 *
312 * @tsk: Pointer to task structure.
313 * @cputime: Time value by which to increment the stime field of the
314 * thread_group_cputime structure.
315 *
316 * If thread group time is being maintained, get the structure for the
317 * running CPU and update the stime field there.
318 */
319static inline void account_group_system_time(struct task_struct *tsk,
320                         cputime_t cputime)
321{
322    struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
323
324    if (!cputimer->running)
325        return;
326
327    spin_lock(&cputimer->lock);
328    cputimer->cputime.stime =
329        cputime_add(cputimer->cputime.stime, cputime);
330    spin_unlock(&cputimer->lock);
331}
332
333/**
334 * account_group_exec_runtime - Maintain exec runtime for a thread group.
335 *
336 * @tsk: Pointer to task structure.
337 * @ns: Time value by which to increment the sum_exec_runtime field
338 * of the thread_group_cputime structure.
339 *
340 * If thread group time is being maintained, get the structure for the
341 * running CPU and update the sum_exec_runtime field there.
342 */
343static inline void account_group_exec_runtime(struct task_struct *tsk,
344                          unsigned long long ns)
345{
346    struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
347
348    if (!cputimer->running)
349        return;
350
351    spin_lock(&cputimer->lock);
352    cputimer->cputime.sum_exec_runtime += ns;
353    spin_unlock(&cputimer->lock);
354}
355

Archive Download this file



interactive