Root/target/linux/ubicom32/files/arch/ubicom32/kernel/ubicom32_context_switch.S

1/*
2 * arch/ubicom32/kernel/ubicom32_context_switch.S
3 * Implements context switch and return functions.
4 *
5 * (C) Copyright 2009, Ubicom, Inc.
6 *
7 * This file is part of the Ubicom32 Linux Kernel Port.
8 *
9 * The Ubicom32 Linux Kernel Port is free software: you can redistribute
10 * it and/or modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation, either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * The Ubicom32 Linux Kernel Port is distributed in the hope that it
15 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
16 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with the Ubicom32 Linux Kernel Port. If not,
21 * see <http://www.gnu.org/licenses/>.
22 *
23 * Ubicom32 implementation derived from (with many thanks):
24 * arch/m68knommu
25 * arch/blackfin
26 * arch/parisc
27 */
28#include <linux/sys.h>
29#include <linux/linkage.h>
30#include <asm/asm-offsets.h>
31#include <asm/ubicom32-common.h>
32#include <asm/ip5000.h>
33#include <asm/range-protect.h>
34
35/*
36 * begin_restore_context()
37 * Restore most of the context from sp (struct pt_reg *)
38 *
39 * This *can* be called without the global atomic lock. (because sp is
40 * not restored!) Only d15 and a3 are allowed to be used after this
41 * before calling complete_restore_context
42 */
43.macro begin_restore_context
44    move.4 d0, PT_D0(sp)
45    move.4 d1, PT_D1(sp)
46    move.4 d2, PT_D2(sp)
47    move.4 d3, PT_D3(sp)
48    move.4 d4, PT_D4(sp)
49    move.4 d5, PT_D5(sp)
50    move.4 d6, PT_D6(sp)
51    move.4 d7, PT_D7(sp)
52    move.4 d8, PT_D8(sp)
53    move.4 d9, PT_D9(sp)
54    move.4 d10, PT_D10(sp)
55    move.4 d11, PT_D11(sp)
56    move.4 d12, PT_D12(sp)
57    move.4 d13, PT_D13(sp)
58    move.4 d14, PT_D14(sp)
59;; move.4 d15, PT_D15(sp)
60    move.4 a0, PT_A0(sp)
61    move.4 a1, PT_A1(sp)
62    move.4 a2, PT_A2(sp)
63;; move.4 a3, PT_A3(sp)
64    move.4 a4, PT_A4(sp)
65    move.4 a5, PT_A5(sp)
66    move.4 a6, PT_A6(sp)
67    move.4 acc0_hi, PT_ACC0HI(sp)
68    move.4 acc0_lo, PT_ACC0LO(sp)
69    move.4 mac_rc16, PT_MAC_RC16(sp)
70    move.4 acc1_hi, PT_ACC1HI(sp)
71    move.4 acc1_lo, PT_ACC1LO(sp)
72    move.4 source3, PT_SOURCE3(sp)
73    move.4 int_mask0, PT_INT_MASK0(sp)
74    move.4 int_mask1, PT_INT_MASK1(sp)
75.endm
76
77/*
78 * complete_restore_context()
79 * Completely restore the context from sp (struct pt_reg *)
80 *
81 * Note: Recovered PC and CSR are saved on the stack and are to be
82 * popped off before returning.
83 */
84.macro complete_restore_context
85    move.4 a3, sp
86    move.4 d15, PT_D15(sp)
87    move.4 sp, PT_SP(a3) ; Recover Stack pointer from save area
88    move.4 -4(sp)++, PT_PC(a3) ; Recover saved PC and save to stack
89    move.4 -4(sp)++, PT_CSR(a3) ; Recover saved csr and save to stack
90    move.4 a3, PT_A3(a3)
91.endm
92
93/*
94 * old restore_context macro
95 */
96.macro restore_context
97    begin_restore_context
98    complete_restore_context
99.endm
100
101/*
102 * ldsr_thread_enable_interrupts()
103 * An assembly version of the enable interrupts function.
104 *
105 * The stack is fair game but all registers MUST be preserved.
106 *
107 */
108.macro ldsr_thread_enable_interrupts
109    move.4 -4(sp)++, d3 ; Push d3
110    move.4 -4(sp)++, a3 ; Push a3
111
112    /*
113     * Read the ROSR and obtain ~(1 << tid)
114     */
115    lsr.4 d3, rosr, #0x2 ; Move the thread portion of ROSR into d3
116    lsl.4 d3, #1, d3 ; perform a (1 << tid)
117    not.4 d3, d3 ; Negate the value of d3 == ~(1 << threadid)
118
119    /*
120     * Get the value of the ldsr_soft_irq_mask
121     */
122    moveai a3, #%hi(ldsr_soft_irq_mask)
123    move.4 a3, %lo(ldsr_soft_irq_mask)(a3)
124
125    /*
126     * Now re-enable interrupts for this thread and then
127     * wakeup the LDSR.
128     */
129    and.4 scratchpad1, scratchpad1, d3
130    move.4 int_set0, a3
131
132    /*
133     * Restore the registers.
134     */
135    move.4 a3, (sp)4++
136    move.4 d3, (sp)4++
137.endm
138
139/*
140 * ret_from_interrupt_to_kernel()
141 * RFI function that is where do_IRQ() returns to if the thread was
142 * in kernel space.
143 */
144    .section .text.ret_from_interrupt_to_kernel, "ax", @progbits
145    .global ret_from_interrupt_to_kernel
146ret_from_interrupt_to_kernel:
147    begin_restore_context ; Restore the thread context
148    atomic_lock_acquire ; Enter critical section
149    complete_restore_context ; Restore the thread context
150    atomic_lock_release ; Leave critical section
151    ldsr_thread_enable_interrupts ; enable the threads interrupts
152    move.4 csr, (sp)4++ ; Restore csr from the stack
153    ret (sp)4++
154
155/*
156 * ret_from_interrupt_to_user()
157 * RFI function that is where do_IRQ() returns to if the thread was
158 * in user space.
159 *
160 * TODO: Do we really need the critical section handling in this code?
161 *
162 */
163    .section .text.ret_from_interrupt_to_user, "ax", @progbits
164    .global ret_from_interrupt_to_user
165ret_from_interrupt_to_user:
166    ldsr_thread_enable_interrupts ; enable the threads interrupts
167    /*
168     * Set a1 to the thread info pointer, no need to save it as we are
169     * restoring userspace and will never return
170     */
171    movei d0, #(~(ASM_THREAD_SIZE-1))
172    and.4 a1, sp, d0
173
174    /*
175     * Test if the scheduler needs to be called.
176     */
177    btst TI_FLAGS(a1), #ASM_TIF_NEED_RESCHED
178    jmpeq.t 2f
179    call a5, schedule ; Call the scheduler. I will come back here.
180
181    /*
182     * See if we have pending signals and call do_signal
183     * if needed.
184     */
1852:
186    btst TI_FLAGS(a1), #ASM_TIF_SIGPENDING ; Any signals needed?
187    jmpeq.t 1f
188
189    /*
190     * Now call do_signal()
191     */
192    move.4 d0, #0 ; oldset pointer is NULL
193    move.4 d1, sp ; d1 is the regs pointer
194    call a5, do_signal ; Call do_signal()
195
196    /*
197     * Back from do_signal(), re-enter critical section.
198     */
1991:
200    begin_restore_context ; Restore the thread context
201    atomic_lock_acquire ; Enter critical section
202    call a3, __complete_and_return_to_userspace ; jump to unprotected section
203
204/*
205 * restore_all_registers()
206 *
207 * restore_all_registers will be the alternate exit route for
208 * preempted processes that have called a signal handler
209 * and are returning back to user space.
210 */
211    .section .text.restore_all_registers, "ax", @progbits
212    .global restore_all_registers
213restore_all_registers:
214    begin_restore_context ; Restore the thread context
215    atomic_lock_acquire ; Enter critical section
216    call a3, __complete_and_return_to_userspace
217
218/*
219 * __complete_and_return_to_userspace
220 *
221 * restores the second half of the context and returns
222 * You must have the atomic lock when you call this function
223 */
224    .section .kernel_unprotected, "ax", @progbits
225__complete_and_return_to_userspace:
226    disable_kernel_ranges_for_current d15 ; disable kernel ranges
227    complete_restore_context ; restore previous context
228    atomic_lock_release ; Leave critical section
229    move.4 csr, (sp)4++ ; Restore csr from the stack
230    ret (sp)4++
231
232/*
233 * ret_from_fork()
234 * Called on the child's return from fork system call.
235 */
236    .section .text.ret_from_fork, "ax", @progbits
237    .global ret_from_fork
238ret_from_fork:
239    ;;; d0 contains the arg for schedule_tail
240    ;;; the others we don't care about as they are in PT_REGS (sp)
241    call a5, schedule_tail
242
243    atomic_lock_acquire ; Enter critical section
244
245    move.4 a3, sp
246    move.4 d0, PT_D0(a3) ; Restore D0
247    move.4 d1, PT_D1(a3) ; Restore D1
248    move.4 d2, PT_D2(a3) ; Restore D2
249    move.4 d3, PT_D3(a3) ; Restore D3
250    move.4 d10, PT_D10(a3) ; Restore D10
251    move.4 d11, PT_D11(a3) ; Restore D11
252    move.4 d12, PT_D12(a3) ; Restore D12
253    move.4 d13, PT_D13(a3) ; Restore D13
254    move.4 a1, PT_A1(a3) ; Restore A1
255    move.4 a2, PT_A2(a3) ; Restore A2
256    move.4 a5, PT_A5(a3) ; Restore A5
257    move.4 a6, PT_A6(a3) ; Restore A6
258    ;; I think atomic_lock_acquire could be moved here..
259    move.4 sp, PT_SP(a3) ; Restore sp
260    move.4 a4, PT_PC(a3) ; Restore pc in register a4
261    move.4 PT_FRAME_TYPE(a3), #0 ; Clear frame_type to indicate it is invalid.
262
263#ifdef CONFIG_PROTECT_KERNEL
264    call a3, __ret_from_fork_bottom_half
265    .section .kernel_unprotected, "ax", @progbits
266__ret_from_fork_bottom_half:
267    disable_kernel_ranges_for_current d15
268#endif
269    atomic_lock_release ; Leave critical section
270    calli a4, 0(a4) ; Return.
271
272/*
273 * __switch_to()
274 *
275 * Call with:
276 * void *__switch_to(struct task_struct *prev, struct thread_struct *prev_switch,
277 * struct thread_struct *next_switch)
278 */
279    .section .text.__switch_to, "ax", @progbits
280    .global __switch_to
281__switch_to:
282
283    /*
284     * Set up register a3 to point to save area.
285     */
286    movea a3, d1 ; a3 now holds prev_switch
287    move.4 (a3)4++, d10
288    move.4 (a3)4++, d11
289    move.4 (a3)4++, d12
290    move.4 (a3)4++, d13
291    move.4 (a3)4++, a1
292    move.4 (a3)4++, a2
293    move.4 (a3)4++, a5
294    move.4 (a3)4++, a6
295    move.4 (a3)4++, a7
296
297    /*
298     * Set up register a3 to point to restore area.
299     */
300    movea a3, d2 ; a3 now holds next_switch
301    move.4 d10 , (a3)4++
302    move.4 d11 , (a3)4++
303    move.4 d12 , (a3)4++
304    move.4 d13 , (a3)4++
305    move.4 a1 , (a3)4++
306    move.4 a2 , (a3)4++
307    move.4 a5 , (a3)4++
308    move.4 a6 , (a3)4++
309    move.4 a7 , (a3)4++
310
311    /*
312     * Load the sw_ksp with the proper thread_info pointer.
313     */
314    movei d15, #(~(ASM_THREAD_SIZE-1))
315    and.4 a3, sp, d15 ; a3 now has the thread info pointer
316    moveai a4, #%hi(sw_ksp)
317    lea.1 a4, %lo(sw_ksp)(a4) ; a4 now has the base address of sw_ksp array
318    lsr.4 d15, ROSR, #2 ; Thread number - bit's 6 through 31 are zeroes anyway.
319    move.4 (a4, d15), a3 ; Load the thread info pointer into the hw_ksp array..
320
321    /*
322     * We are done with context switch. Time to return..
323     */
324    calli a5, 0(a5)
325    .size __switch_to, . - __switch_to
326
327/*
328 * ubicom32_emulate_insn()
329 * Emulates the instruction.
330 *
331 * Call with:
332 * unsigned int ubicom32_emulate_insn(int source1, int source2, int source3, int *save_acc, int *save_csr);
333 */
334    .section .text.ubicom32_emulate_insn, "ax", @progbits
335    .global ubicom32_emulate_insn
336    .global trap_emulate
337ubicom32_emulate_insn:
338    movea a3, d3 ; a3 holds save_acc pointer
339    movea a4, d4 ; a4 hods save_csr pointer
340    move.4 source3, d2
341    move.4 acc0_lo, (a3)
342    move.4 acc0_hi, 4(a3)
343    move.4 acc1_lo, 8(a3)
344    move.4 acc1_hi, 12(a3)
345    move.4 mac_rc16, 16(a3)
346    move.4 CSR, (a4)
347    setcsr_flush 0
348
349trap_emulate:
350    move.4 d0, d1
351    setcsr_flush 0
352    move.4 (a4), CSR ; Save csr
353    move.4 (a3), acc0_lo
354    move.4 4(a3), acc0_hi
355    move.4 8(a3), acc1_lo
356    move.4 12(a3), acc1_hi
357    move.4 16(a3), mac_rc16
358    ret a5
359    .size ubicom32_emulate_insn, . - ubicom32_emulate_insn
360

Archive Download this file



interactive