Root/kernel/context_tracking.c

1/*
2 * Context tracking: Probe on high level context boundaries such as kernel
3 * and userspace. This includes syscalls and exceptions entry/exit.
4 *
5 * This is used by RCU to remove its dependency on the timer tick while a CPU
6 * runs in userspace.
7 *
8 * Started by Frederic Weisbecker:
9 *
10 * Copyright (C) 2012 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com>
11 *
12 * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton,
13 * Steven Rostedt, Peter Zijlstra for suggestions and improvements.
14 *
15 */
16
17#include <linux/context_tracking.h>
18#include <linux/rcupdate.h>
19#include <linux/sched.h>
20#include <linux/hardirq.h>
21#include <linux/export.h>
22
23#define CREATE_TRACE_POINTS
24#include <trace/events/context_tracking.h>
25
26struct static_key context_tracking_enabled = STATIC_KEY_INIT_FALSE;
27EXPORT_SYMBOL_GPL(context_tracking_enabled);
28
29DEFINE_PER_CPU(struct context_tracking, context_tracking);
30EXPORT_SYMBOL_GPL(context_tracking);
31
32void context_tracking_cpu_set(int cpu)
33{
34    if (!per_cpu(context_tracking.active, cpu)) {
35        per_cpu(context_tracking.active, cpu) = true;
36        static_key_slow_inc(&context_tracking_enabled);
37    }
38}
39
40/**
41 * context_tracking_user_enter - Inform the context tracking that the CPU is going to
42 * enter userspace mode.
43 *
44 * This function must be called right before we switch from the kernel
45 * to userspace, when it's guaranteed the remaining kernel instructions
46 * to execute won't use any RCU read side critical section because this
47 * function sets RCU in extended quiescent state.
48 */
49void context_tracking_user_enter(void)
50{
51    unsigned long flags;
52
53    /*
54     * Repeat the user_enter() check here because some archs may be calling
55     * this from asm and if no CPU needs context tracking, they shouldn't
56     * go further. Repeat the check here until they support the inline static
57     * key check.
58     */
59    if (!context_tracking_is_enabled())
60        return;
61
62    /*
63     * Some contexts may involve an exception occuring in an irq,
64     * leading to that nesting:
65     * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
66     * This would mess up the dyntick_nesting count though. And rcu_irq_*()
67     * helpers are enough to protect RCU uses inside the exception. So
68     * just return immediately if we detect we are in an IRQ.
69     */
70    if (in_interrupt())
71        return;
72
73    /* Kernel threads aren't supposed to go to userspace */
74    WARN_ON_ONCE(!current->mm);
75
76    local_irq_save(flags);
77    if ( __this_cpu_read(context_tracking.state) != IN_USER) {
78        if (__this_cpu_read(context_tracking.active)) {
79            trace_user_enter(0);
80            /*
81             * At this stage, only low level arch entry code remains and
82             * then we'll run in userspace. We can assume there won't be
83             * any RCU read-side critical section until the next call to
84             * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
85             * on the tick.
86             */
87            vtime_user_enter(current);
88            rcu_user_enter();
89        }
90        /*
91         * Even if context tracking is disabled on this CPU, because it's outside
92         * the full dynticks mask for example, we still have to keep track of the
93         * context transitions and states to prevent inconsistency on those of
94         * other CPUs.
95         * If a task triggers an exception in userspace, sleep on the exception
96         * handler and then migrate to another CPU, that new CPU must know where
97         * the exception returns by the time we call exception_exit().
98         * This information can only be provided by the previous CPU when it called
99         * exception_enter().
100         * OTOH we can spare the calls to vtime and RCU when context_tracking.active
101         * is false because we know that CPU is not tickless.
102         */
103        __this_cpu_write(context_tracking.state, IN_USER);
104    }
105    local_irq_restore(flags);
106}
107
108#ifdef CONFIG_PREEMPT
109/**
110 * preempt_schedule_context - preempt_schedule called by tracing
111 *
112 * The tracing infrastructure uses preempt_enable_notrace to prevent
113 * recursion and tracing preempt enabling caused by the tracing
114 * infrastructure itself. But as tracing can happen in areas coming
115 * from userspace or just about to enter userspace, a preempt enable
116 * can occur before user_exit() is called. This will cause the scheduler
117 * to be called when the system is still in usermode.
118 *
119 * To prevent this, the preempt_enable_notrace will use this function
120 * instead of preempt_schedule() to exit user context if needed before
121 * calling the scheduler.
122 */
123asmlinkage __visible void __sched notrace preempt_schedule_context(void)
124{
125    enum ctx_state prev_ctx;
126
127    if (likely(!preemptible()))
128        return;
129
130    /*
131     * Need to disable preemption in case user_exit() is traced
132     * and the tracer calls preempt_enable_notrace() causing
133     * an infinite recursion.
134     */
135    preempt_disable_notrace();
136    prev_ctx = exception_enter();
137    preempt_enable_no_resched_notrace();
138
139    preempt_schedule();
140
141    preempt_disable_notrace();
142    exception_exit(prev_ctx);
143    preempt_enable_notrace();
144}
145EXPORT_SYMBOL_GPL(preempt_schedule_context);
146#endif /* CONFIG_PREEMPT */
147
148/**
149 * context_tracking_user_exit - Inform the context tracking that the CPU is
150 * exiting userspace mode and entering the kernel.
151 *
152 * This function must be called after we entered the kernel from userspace
153 * before any use of RCU read side critical section. This potentially include
154 * any high level kernel code like syscalls, exceptions, signal handling, etc...
155 *
156 * This call supports re-entrancy. This way it can be called from any exception
157 * handler without needing to know if we came from userspace or not.
158 */
159void context_tracking_user_exit(void)
160{
161    unsigned long flags;
162
163    if (!context_tracking_is_enabled())
164        return;
165
166    if (in_interrupt())
167        return;
168
169    local_irq_save(flags);
170    if (__this_cpu_read(context_tracking.state) == IN_USER) {
171        if (__this_cpu_read(context_tracking.active)) {
172            /*
173             * We are going to run code that may use RCU. Inform
174             * RCU core about that (ie: we may need the tick again).
175             */
176            rcu_user_exit();
177            vtime_user_exit(current);
178            trace_user_exit(0);
179        }
180        __this_cpu_write(context_tracking.state, IN_KERNEL);
181    }
182    local_irq_restore(flags);
183}
184
185/**
186 * __context_tracking_task_switch - context switch the syscall callbacks
187 * @prev: the task that is being switched out
188 * @next: the task that is being switched in
189 *
190 * The context tracking uses the syscall slow path to implement its user-kernel
191 * boundaries probes on syscalls. This way it doesn't impact the syscall fast
192 * path on CPUs that don't do context tracking.
193 *
194 * But we need to clear the flag on the previous task because it may later
195 * migrate to some CPU that doesn't do the context tracking. As such the TIF
196 * flag may not be desired there.
197 */
198void __context_tracking_task_switch(struct task_struct *prev,
199                    struct task_struct *next)
200{
201    clear_tsk_thread_flag(prev, TIF_NOHZ);
202    set_tsk_thread_flag(next, TIF_NOHZ);
203}
204
205#ifdef CONFIG_CONTEXT_TRACKING_FORCE
206void __init context_tracking_init(void)
207{
208    int cpu;
209
210    for_each_possible_cpu(cpu)
211        context_tracking_cpu_set(cpu);
212}
213#endif
214

Archive Download this file



interactive