| 1 | /* |
| 2 | * arch/ubicom32/kernel/ubicom32_syscall.S |
| 3 | * <TODO: Replace with short file description> |
| 4 | * |
| 5 | * (C) Copyright 2009, Ubicom, Inc. |
| 6 | * |
| 7 | * This file is part of the Ubicom32 Linux Kernel Port. |
| 8 | * |
| 9 | * The Ubicom32 Linux Kernel Port is free software: you can redistribute |
| 10 | * it and/or modify it under the terms of the GNU General Public License |
| 11 | * as published by the Free Software Foundation, either version 2 of the |
| 12 | * License, or (at your option) any later version. |
| 13 | * |
| 14 | * The Ubicom32 Linux Kernel Port is distributed in the hope that it |
| 15 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied |
| 16 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See |
| 17 | * the GNU General Public License for more details. |
| 18 | * |
| 19 | * You should have received a copy of the GNU General Public License |
| 20 | * along with the Ubicom32 Linux Kernel Port. If not, |
| 21 | * see <http://www.gnu.org/licenses/>. |
| 22 | * |
| 23 | * Ubicom32 implementation derived from (with many thanks): |
| 24 | * arch/m68knommu |
| 25 | * arch/blackfin |
| 26 | * arch/parisc |
| 27 | */ |
| 28 | #include <linux/sys.h> |
| 29 | #include <linux/linkage.h> |
| 30 | #include <linux/unistd.h> |
| 31 | |
| 32 | #include <asm/ubicom32-common.h> |
| 33 | #include <asm/thread_info.h> |
| 34 | #include <asm/asm-offsets.h> |
| 35 | #include <asm/range-protect.h> |
| 36 | |
| 37 | /* |
| 38 | * __old_system_call() |
| 39 | */ |
| 40 | .section .old_syscall_entry.text, "ax", @progbits |
| 41 | #ifdef CONFIG_OLD_40400010_SYSTEM_CALL |
| 42 | __old_system_call: |
| 43 | call a3, system_call |
| 44 | .size __old_system_call, . - __old_system_call ; |
| 45 | #else |
| 46 | /* |
| 47 | * something that will crash the userspace application, but |
| 48 | * should not take down the kernel, if protection is enabled |
| 49 | * this will never even get executed. |
| 50 | */ |
| 51 | .long 0xFABBCCDE ; illegal instruction |
| 52 | bkpt #-1 ; we will never get here |
| 53 | #endif |
| 54 | |
| 55 | /* |
| 56 | * system_call() |
| 57 | */ |
| 58 | .section .syscall_entry.text, "ax", @progbits |
| 59 | .global system_call |
| 60 | system_call: |
| 61 | /* |
| 62 | * Regular ABI rules for function calls apply for syscall. d8 holds |
| 63 | * the syscall number. We will use that to index into the syscall table. |
| 64 | * d0 - d5 hold the parameters. |
| 65 | * |
| 66 | * First we get the current thread_info and swap to the kernel stack. |
| 67 | * This is done by reading the current thread and looking up the ksp |
| 68 | * from the sw_ksp array and storing it in a3. |
| 69 | * |
| 70 | * Then we reserve space for the syscall context a struct pt_regs and |
| 71 | * save it using a4 initially and later as sp. |
| 72 | * Once sp is set to the kernel sp we can leave the critical section. |
| 73 | * |
| 74 | * For the user case the kernel stack will have the following layout. |
| 75 | * |
| 76 | * a3 ksp[0] +-----------------------+ |
| 77 | * | Thread info area | |
| 78 | * | struct thread_info | |
| 79 | * +-----------------------+ |
| 80 | * : : |
| 81 | * | Kernel Stack Area | |
| 82 | * | | |
| 83 | * a4 / sp >>> +-----------------------+ |
| 84 | * | Context save area | |
| 85 | * | struct pt_reg | |
| 86 | * ksp[THREAD_SIZE-8] +-----------------------+ |
| 87 | * | 8 Byte Buffer Zone | |
| 88 | * ksp[THREAD_SIZE] +-----------------------+ |
| 89 | |
| 90 | * |
| 91 | * For kernel syscalls the layout is as follows. |
| 92 | * |
| 93 | * a3 ksp[0] +-----------------------+ |
| 94 | * | Thread info area | |
| 95 | * | struct thread_info | |
| 96 | * +-----------------------+ |
| 97 | * : : |
| 98 | * | Kernel Stack Area | |
| 99 | * | | |
| 100 | * a4 / sp >>> +-----------------------+ |
| 101 | * | Context save area | |
| 102 | * | struct pt_reg | |
| 103 | * sp at syscall entry +-----------------------+ |
| 104 | * | Callers Kernel Stack | |
| 105 | * : : |
| 106 | * |
| 107 | * Once the context is saved we optionally call syscall_trace and setup |
| 108 | * the exit routine and jump to the syscall. |
| 109 | */ |
| 110 | |
| 111 | /* |
| 112 | * load the base address for sw_ksp into a3 |
| 113 | * Note.. we cannot access it just yet as protection is still on. |
| 114 | */ |
| 115 | moveai a3, #%hi(sw_ksp) |
| 116 | lea.1 a3, %lo(sw_ksp)(a3) |
| 117 | |
| 118 | /* |
| 119 | * Enter critical section . |
| 120 | * |
| 121 | * The 'critical' aspects here are the switching the to the ksp and |
| 122 | * changing the protection registers, these both use per thread |
| 123 | * information so we need to protect from a context switch. For now this |
| 124 | * is done using the global atomic lock. |
| 125 | */ |
| 126 | atomic_lock_acquire |
| 127 | |
| 128 | thread_get_self d15 ; Load current thread number |
| 129 | #ifdef CONFIG_PROTECT_KERNEL |
| 130 | lsl.4 d9, #1, d15 ; Convert to thread bit |
| 131 | enable_kernel_ranges d9 |
| 132 | #endif |
| 133 | /* |
| 134 | * in order to reduce the size of code in the syscall section we get |
| 135 | * out of it right now |
| 136 | */ |
| 137 | call a4, __system_call_bottom_half |
| 138 | .size system_call, . - system_call |
| 139 | |
| 140 | .section .text.__system_call_bottom_half, "ax", @progbits |
| 141 | __system_call_bottom_half: |
| 142 | |
| 143 | /* |
| 144 | * We need to Determine if this is a kernel syscall or user syscall. |
| 145 | * Start by loading the pointer for the thread_info structure for the |
| 146 | * current process in to a3. |
| 147 | */ |
| 148 | move.4 a3, (a3, d15) ; a3 = sw_ksp[d15] |
| 149 | |
| 150 | /* |
| 151 | * Now if this is a kernel thread the same value can be a acheived by |
| 152 | * masking off the lower bits on the current stack pointer. |
| 153 | */ |
| 154 | movei d9, #(~(ASM_THREAD_SIZE-1)) ; load mask |
| 155 | and.4 d9, sp, d9 ; apply mask |
| 156 | |
| 157 | /* |
| 158 | * d9 now has the masked version of the sp. If this is identical to |
| 159 | * what is in a3 then don't switch to ksp as we are already in the |
| 160 | * kernel. |
| 161 | */ |
| 162 | sub.4 #0, a3, d9 |
| 163 | |
| 164 | /* |
| 165 | * if d9 and a3 are not equal. We are usespace and have to shift to |
| 166 | * ksp. |
| 167 | */ |
| 168 | jmpne.t 1f |
| 169 | |
| 170 | /* |
| 171 | * Kernel Syscall. |
| 172 | * |
| 173 | * The kernel has called this routine. We have to pdec space for pt_regs |
| 174 | * from sp. |
| 175 | */ |
| 176 | pdec a4, PT_SIZE(sp) ; a4 = ksp - PT_SIZE |
| 177 | jmpt.t 2f |
| 178 | |
| 179 | /* |
| 180 | * Userspace Syscall. |
| 181 | * |
| 182 | * Add THREAD_SIZE and subtract PT_SIZE to create the proper ksp |
| 183 | */ |
| 184 | 1: movei d15, #(ASM_THREAD_SIZE - 8 - PT_SIZE) |
| 185 | lea.1 a4, (a3, d15) ; a4 = ksp + d15 |
| 186 | |
| 187 | /* |
| 188 | * Replace user stack pointer with kernel stack pointer (a4) |
| 189 | * Load -1 into frame_type in save area to indicate this is system call |
| 190 | * frame. |
| 191 | */ |
| 192 | 2: move.4 PT_A7(a4), a7 ; Save old sp/A7 on kernel stack |
| 193 | move.4 PT_FRAME_TYPE(a4), #-1 ; Set the frame type. |
| 194 | move.4 sp, a4 ; Change to ksp. |
| 195 | /* |
| 196 | * We are now officially back in the kernel! |
| 197 | */ |
| 198 | |
| 199 | /* |
| 200 | * Now that we are on the ksp we can leave the critical section |
| 201 | */ |
| 202 | atomic_lock_release |
| 203 | |
| 204 | /* |
| 205 | * We need to save a0 because we need to be able to restore it in |
| 206 | * the event that we need to handle a signal. It's not generally |
| 207 | * a callee-saved register but is the GOT pointer. |
| 208 | */ |
| 209 | move.4 PT_A0(sp), a0 ; Save A0 on kernel stack |
| 210 | |
| 211 | /* |
| 212 | * We still need to save d10-d13, a1, a2, a5, a6 in the kernel frame |
| 213 | * for this process, we also save the system call params in the case of |
| 214 | * syscall restart. (note a7 was saved above) |
| 215 | */ |
| 216 | move.4 PT_A1(sp), a1 ; Save A1 on kernel stack |
| 217 | move.4 PT_A2(sp), a2 ; Save A2 on kernel stack |
| 218 | move.4 PT_A5(sp), a5 ; Save A5 on kernel stack |
| 219 | move.4 PT_A6(sp), a6 ; Save A6 on kernel stack |
| 220 | move.4 PT_PC(sp), a5 ; Save A5 at the PC location |
| 221 | move.4 PT_D10(sp), d10 ; Save D10 on kernel stack |
| 222 | move.4 PT_D11(sp), d11 ; Save D11 on kernel stack |
| 223 | move.4 PT_D12(sp), d12 ; Save D12 on kernel stack |
| 224 | move.4 PT_D13(sp), d13 ; Save D13 on kernel stack |
| 225 | |
| 226 | /* |
| 227 | * Now save the syscall parameters |
| 228 | */ |
| 229 | move.4 PT_D0(sp), d0 ; Save d0 on kernel stack |
| 230 | move.4 PT_ORIGINAL_D0(sp), d0 ; Save d0 on kernel stack |
| 231 | move.4 PT_D1(sp), d1 ; Save d1 on kernel stack |
| 232 | move.4 PT_D2(sp), d2 ; Save d2 on kernel stack |
| 233 | move.4 PT_D3(sp), d3 ; Save d3 on kernel stack |
| 234 | move.4 PT_D4(sp), d4 ; Save d4 on kernel stack |
| 235 | move.4 PT_D5(sp), d5 ; Save d5 on kernel stack |
| 236 | move.4 PT_D8(sp), d8 ; Save d8 on kernel stack |
| 237 | |
| 238 | /* |
| 239 | * Test if syscalls are being traced and if they are jump to syscall |
| 240 | * trace (it will comeback here) |
| 241 | */ |
| 242 | btst TI_FLAGS(a3), #ASM_TIF_SYSCALL_TRACE |
| 243 | jmpne.f .Lsystem_call__trace |
| 244 | .Lsystem_call__trace_complete: |
| 245 | /* |
| 246 | * Check for a valid call number [ 0 <= syscall_number < NR_syscalls ] |
| 247 | */ |
| 248 | cmpi d8, #0 |
| 249 | jmplt.f 3f |
| 250 | cmpi d8, #NR_syscalls |
| 251 | jmplt.t 4f |
| 252 | |
| 253 | /* |
| 254 | * They have passed an invalid number. Call sys_ni_syscall staring by |
| 255 | * load a4 with the base address of sys_ni_syscall |
| 256 | */ |
| 257 | 3: moveai a4, #%hi(sys_ni_syscall) |
| 258 | lea.1 a4, %lo(sys_ni_syscall)(a4) |
| 259 | jmpt.t 5f ; Jump to regular processing |
| 260 | |
| 261 | /* |
| 262 | * Validated syscall, load the syscall table base address into a3 and |
| 263 | * read the syscall ptr out. |
| 264 | */ |
| 265 | 4: moveai a3, #%hi(sys_call_table) |
| 266 | lea.1 a3, %lo(sys_call_table)(a3) ; a3 = sys_call_table |
| 267 | move.4 a4, (a3, d8) ; a4 = sys_call_table[d8] |
| 268 | |
| 269 | /* |
| 270 | * Before calling the syscall, setup a5 so that syscall_exit is called |
| 271 | * on return from syscall |
| 272 | */ |
| 273 | 5: moveai a5, #%hi(syscall_exit) ; Setup return address |
| 274 | lea.1 a5, %lo(syscall_exit)(a5) ; from system call |
| 275 | |
| 276 | /* |
| 277 | * If the syscall is __NR_rt_rigreturn then we have to test d1 to |
| 278 | * figure out if we have to change change the return routine to restore |
| 279 | * all registers. |
| 280 | */ |
| 281 | cmpi d8, #__NR_rt_sigreturn |
| 282 | jmpeq.f 6f |
| 283 | |
| 284 | /* |
| 285 | * Launch system call (it will return through a5 - syscall_exit) |
| 286 | */ |
| 287 | calli a3, 0(a4) |
| 288 | |
| 289 | /* |
| 290 | * System call is rt_sigreturn. Test d1. If it is 1 we have to |
| 291 | * change the return address to restore_all_registers |
| 292 | */ |
| 293 | 6: cmpi d1, #1 |
| 294 | jmpne.t 7f |
| 295 | |
| 296 | moveai a5, #%hi(restore_all_registers) ; Setup return address |
| 297 | lea.1 a5, %lo(restore_all_registers)(a5) ; to restore_all_registers. |
| 298 | |
| 299 | /* |
| 300 | * Launch system call (it will return through a5) |
| 301 | */ |
| 302 | 7: calli a3, 0(a4) ; Launch system call |
| 303 | |
| 304 | .Lsystem_call__trace: |
| 305 | /* |
| 306 | * Syscalls are being traced. |
| 307 | * Call syscall_trace, (return here) |
| 308 | */ |
| 309 | call a5, syscall_trace |
| 310 | |
| 311 | /* |
| 312 | * Restore syscall state (it would have been discarded during the |
| 313 | * syscall trace) |
| 314 | */ |
| 315 | move.4 d0, PT_D0(sp) ; Restore d0 from kernel stack |
| 316 | move.4 d1, PT_D1(sp) ; Restore d1 from kernel stack |
| 317 | move.4 d2, PT_D2(sp) ; Restore d2 from kernel stack |
| 318 | move.4 d3, PT_D3(sp) ; Restore d3 from kernel stack |
| 319 | move.4 d4, PT_D4(sp) ; Restore d4 from kernel stack |
| 320 | move.4 d5, PT_D5(sp) ; Restore d5 from kernel stack |
| 321 | /* add this back if we ever have a syscall with 7 args */ |
| 322 | move.4 d8, PT_D8(sp) ; Restore d8 from kernel stack |
| 323 | |
| 324 | /* |
| 325 | * return to syscall |
| 326 | */ |
| 327 | jmpt.t .Lsystem_call__trace_complete |
| 328 | .size __system_call_bottom_half, . - __system_call_bottom_half |
| 329 | |
| 330 | /* |
| 331 | * syscall_exit() |
| 332 | */ |
| 333 | .section .text.syscall_exit |
| 334 | .global syscall_exit |
| 335 | syscall_exit: |
| 336 | /* |
| 337 | * d0 contains the return value. We should move that into the kernel |
| 338 | * stack d0 location. We will be transitioning from kernel to user |
| 339 | * mode. Test the flags and see if we have to call schedule. If we are |
| 340 | * going to truly exit then all that has to be done is that from the |
| 341 | * kernel stack we have to restore d0, a0, a1, a2, a5, a6 and sp (a7)bb |
| 342 | * and then return via a5. |
| 343 | */ |
| 344 | |
| 345 | /* |
| 346 | * Save d0 to pt_regs |
| 347 | */ |
| 348 | move.4 PT_D0(sp), d0 ; Save d0 into the kernel stack |
| 349 | |
| 350 | /* |
| 351 | * load the thread_info structure by masking off the THREAD_SIZE |
| 352 | * bits. |
| 353 | * |
| 354 | * Note: we used to push a1, but now we don't as we are going |
| 355 | * to eventually restore it to the userspace a1. |
| 356 | */ |
| 357 | movei d9, #(~(ASM_THREAD_SIZE-1)) |
| 358 | and.4 a1, sp, d9 |
| 359 | |
| 360 | /* |
| 361 | * Are any interesting bits set on TI flags, if there are jump |
| 362 | * aside to post_processing. |
| 363 | */ |
| 364 | move.4 d9, #(_TIF_SYSCALL_TRACE | _TIF_NEED_RESCHED | _TIF_SIGPENDING) |
| 365 | and.4 #0, TI_FLAGS(a1), d9 |
| 366 | jmpne.f .Lsyscall_exit__post_processing ; jump to handler |
| 367 | .Lsyscall_exit__post_processing_complete: |
| 368 | |
| 369 | move.4 d0, PT_D0(sp) ; Restore D0 from kernel stack |
| 370 | move.4 d1, PT_D1(sp) ; Restore d1 from kernel stack |
| 371 | move.4 d2, PT_D2(sp) ; Restore d2 from kernel stack |
| 372 | move.4 d3, PT_D3(sp) ; Restore d3 from kernel stack |
| 373 | move.4 d4, PT_D4(sp) ; Restore d4 from kernel stack |
| 374 | move.4 d5, PT_D5(sp) ; Restore d5 from kernel stack |
| 375 | move.4 d8, PT_D8(sp) ; Restore d8 from kernel stack |
| 376 | move.4 d10, PT_D10(sp) ; Restore d10 from kernel stack |
| 377 | move.4 d11, PT_D11(sp) ; Restore d11 from kernel stack |
| 378 | move.4 d12, PT_D12(sp) ; Restore d12 from kernel stack |
| 379 | move.4 d13, PT_D13(sp) ; Restore d13 from kernel stack |
| 380 | move.4 a1, PT_A1(sp) ; Restore A1 from kernel stack |
| 381 | move.4 a2, PT_A2(sp) ; Restore A2 from kernel stack |
| 382 | move.4 a5, PT_A5(sp) ; Restore A5 from kernel stack |
| 383 | move.4 a6, PT_A6(sp) ; Restore A6 from kernel stack |
| 384 | move.4 a0, PT_A0(sp) ; Restore A6 from kernel stack |
| 385 | |
| 386 | /* |
| 387 | * this is only for debug, and could be removed for production builds |
| 388 | */ |
| 389 | move.4 PT_FRAME_TYPE(sp), #0 ; invalidate frame_type |
| 390 | |
| 391 | #ifdef CONFIG_PROTECT_KERNEL |
| 392 | |
| 393 | call a4, __syscall_exit_bottom_half |
| 394 | |
| 395 | .section .kernel_unprotected, "ax", @progbits |
| 396 | __syscall_exit_bottom_half: |
| 397 | /* |
| 398 | * Enter critical section |
| 399 | */ |
| 400 | atomic_lock_acquire |
| 401 | disable_kernel_ranges_for_current d15 |
| 402 | #endif |
| 403 | /* |
| 404 | * Lastly restore userspace stack ptr |
| 405 | * |
| 406 | * Note: that when protection is on we need to hold the lock around the |
| 407 | * stack swap as well because otherwise the protection could get |
| 408 | * inadvertently disabled again at the end of a context switch. |
| 409 | */ |
| 410 | move.4 a7, PT_A7(sp) ; Restore A7 from kernel stack |
| 411 | |
| 412 | /* |
| 413 | * We are now officially back in userspace! |
| 414 | */ |
| 415 | |
| 416 | #ifdef CONFIG_PROTECT_KERNEL |
| 417 | /* |
| 418 | * Leave critical section and return to user space. |
| 419 | */ |
| 420 | atomic_lock_release |
| 421 | #endif |
| 422 | calli a5, 0(a5) ; Back to userspace code. |
| 423 | |
| 424 | bkpt #-1 ; we will never get here |
| 425 | |
| 426 | /* |
| 427 | * Post syscall processing. (unlikely part of syscall_exit) |
| 428 | * |
| 429 | * Are we tracing syscalls. If TIF_SYSCALL_TRACE is set, call |
| 430 | * syscall_trace routine and return here. |
| 431 | */ |
| 432 | .section .text.syscall_exit, "ax", @progbits |
| 433 | .Lsyscall_exit__post_processing: |
| 434 | btst TI_FLAGS(a1), #ASM_TIF_SYSCALL_TRACE |
| 435 | jmpeq.t 1f |
| 436 | call a5, syscall_trace |
| 437 | |
| 438 | /* |
| 439 | * Do we need to resched ie call schedule. If TIF_NEED_RESCHED is set, |
| 440 | * call the scheduler, it will come back here. |
| 441 | */ |
| 442 | 1: btst TI_FLAGS(a1), #ASM_TIF_NEED_RESCHED |
| 443 | jmpeq.t 2f |
| 444 | call a5, schedule |
| 445 | |
| 446 | /* |
| 447 | * Do we need to post a signal, if TIF_SIGPENDING is set call the |
| 448 | * do_signal. |
| 449 | */ |
| 450 | 2: btst TI_FLAGS(a1), #ASM_TIF_SIGPENDING |
| 451 | jmpeq.t .Lsyscall_exit__post_processing_complete |
| 452 | |
| 453 | /* |
| 454 | * setup the do signal call |
| 455 | */ |
| 456 | move.4 d0, #0 ; oldset pointer is NULL |
| 457 | lea.1 d1, (sp) ; d1 is the regs pointer. |
| 458 | call a5, do_signal |
| 459 | |
| 460 | jmpt.t .Lsyscall_exit__post_processing_complete |
| 461 | |
| 462 | /* .size syscall_exit, . - syscall_exit */ |
| 463 | |
| 464 | /* |
| 465 | * kernel_execve() |
| 466 | * kernel_execv is called when we the kernel is starting a |
| 467 | * userspace application. |
| 468 | */ |
| 469 | .section .kernel_unprotected, "ax", @progbits |
| 470 | .global kernel_execve |
| 471 | kernel_execve: |
| 472 | move.4 -4(sp)++, a5 ; Save return address |
| 473 | /* |
| 474 | * Call execve |
| 475 | */ |
| 476 | movei d8, #__NR_execve ; call execve |
| 477 | call a5, system_call |
| 478 | move.4 a5, (sp)4++ |
| 479 | |
| 480 | /* |
| 481 | * protection was enabled again at syscall exit, but we want |
| 482 | * to return to kernel so we enable it again. |
| 483 | */ |
| 484 | #ifdef CONFIG_PROTECT_KERNEL |
| 485 | /* |
| 486 | * We are entering the kernel so we need to disable the protection. |
| 487 | * Enter critical section, disable ranges and leave critical section. |
| 488 | */ |
| 489 | call a3, __enable_kernel_ranges ; and jump back to kernel |
| 490 | #else |
| 491 | ret a5 ; jump back to the kernel |
| 492 | #endif |
| 493 | |
| 494 | .size kernel_execve, . - kernel_execve |
| 495 | |
| 496 | /* |
| 497 | * signal_trampoline() |
| 498 | * |
| 499 | * Deals with transitioning from to userspace signal handlers and returning |
| 500 | * to userspace, only called from the kernel. |
| 501 | * |
| 502 | */ |
| 503 | .section .kernel_unprotected, "ax", @progbits |
| 504 | .global signal_trampoline |
| 505 | signal_trampoline: |
| 506 | /* |
| 507 | * signal_trampoline is called when we are jumping from the kernel to |
| 508 | * the userspace signal handler. |
| 509 | * |
| 510 | * The following registers are relevant. (set setup_rt_frame) |
| 511 | * sp is the user space stack not the kernel stack |
| 512 | * d0 = signal number |
| 513 | * d1 = siginfo_t * |
| 514 | * d2 = ucontext * |
| 515 | * d3 = the user space signal handler |
| 516 | * a0 is set to the GOT if userspace application is FDPIC, otherwise 0 |
| 517 | * a3 is set to the FD for the signal if userspace application is FDPIC |
| 518 | */ |
| 519 | #ifdef CONFIG_PROTECT_KERNEL |
| 520 | /* |
| 521 | * We are leaving the kernel so we need to enable the protection. |
| 522 | * Enter critical section, disable ranges and leave critical section. |
| 523 | */ |
| 524 | atomic_lock_acquire ; Enter critical section |
| 525 | disable_kernel_ranges_for_current d15 ; disable kernel ranges |
| 526 | atomic_lock_release ; Leave critical section |
| 527 | #endif |
| 528 | /* |
| 529 | * The signal handler pointer is in register d3 so tranfer it to a4 and |
| 530 | * call it |
| 531 | */ |
| 532 | movea a4, d3 ; signal handler |
| 533 | calli a5, 0(a4) |
| 534 | |
| 535 | /* |
| 536 | * Return to userspace through rt_syscall which is stored on top of the |
| 537 | * stack d1 contains ret_via_interrupt status. |
| 538 | */ |
| 539 | move.4 d8, (sp) ; d8 (syscall #) = rt_syscall |
| 540 | move.4 d1, 4(sp) ; d1 = ret_via_interrupt |
| 541 | call a5, system_call ; as we are 'in' the kernel |
| 542 | ; we can call kernel_syscall |
| 543 | |
| 544 | bkpt #-1 ; will never get here. |
| 545 | .size signal_trampoline, . - signal_trampoline |
| 546 | |
| 547 | /* |
| 548 | * kernel_thread_helper() |
| 549 | * |
| 550 | * Entry point for kernel threads (only referenced by kernel_thread()). |
| 551 | * |
| 552 | * On execution d0 will be 0, d1 will be the argument to be passed to the |
| 553 | * kernel function. |
| 554 | * d2 contains the kernel function that needs to get called. |
| 555 | * d3 will contain address to do_exit which needs to get moved into a5. |
| 556 | * |
| 557 | * On return from fork the child thread d0 will be 0. We call this dummy |
| 558 | * function which in turn loads the argument |
| 559 | */ |
| 560 | .section .kernel_unprotected, "ax", @progbits |
| 561 | .global kernel_thread_helper |
| 562 | kernel_thread_helper: |
| 563 | /* |
| 564 | * Create a kernel thread. This is called from ret_from_vfork (a |
| 565 | * userspace return routine) so we need to put it in an unprotected |
| 566 | * section and re-enable protection before calling the vector in d2. |
| 567 | */ |
| 568 | |
| 569 | #ifdef CONFIG_PROTECT_KERNEL |
| 570 | /* |
| 571 | * We are entering the kernel so we need to disable the protection. |
| 572 | * Enter critical section, disable ranges and leave critical section. |
| 573 | */ |
| 574 | call a5, __enable_kernel_ranges |
| 575 | #endif |
| 576 | /* |
| 577 | * Move argument for kernel function into d0, and set a5 return address |
| 578 | * (a5) to do_exit and return through a2 |
| 579 | */ |
| 580 | move.4 d0, d1 ; d0 = arg |
| 581 | move.4 a5, d3 ; a5 = do_exit |
| 582 | ret d2 ; call function ptr in d2 |
| 583 | .size kernel_thread_helper, . - kernel_thread_helper |
| 584 | |
| 585 | #ifdef CONFIG_PROTECT_KERNEL |
| 586 | .section .kernel_unprotected, "ax", @progbits |
| 587 | __enable_kernel_ranges: |
| 588 | atomic_lock_acquire ; Enter critical section |
| 589 | enable_kernel_ranges_for_current d15 |
| 590 | atomic_lock_release ; Leave critical section |
| 591 | calli a5, 0(a5) |
| 592 | .size __enable_kernel_ranges, . - __enable_kernel_ranges |
| 593 | |
| 594 | #endif |
| 595 | |
| 596 | /* |
| 597 | * The following system call intercept functions where we setup the |
| 598 | * input to the real system call. In all cases these are just taking |
| 599 | * the current sp which is pointing to pt_regs and pushing it into the |
| 600 | * last arg of the system call. |
| 601 | * |
| 602 | * i.e. the public definition of sys_execv is |
| 603 | * sys_execve( char *name, |
| 604 | * char **argv, |
| 605 | * char **envp ) |
| 606 | * but process.c defines it as |
| 607 | * sys_execve( char *name, |
| 608 | * char **argv, |
| 609 | * char **envp, |
| 610 | * struct pt_regs *regs ) |
| 611 | * |
| 612 | * so execve_intercept needs to populate the 4th arg with pt_regs*, |
| 613 | * which is the stack pointer as we know we must be coming out of |
| 614 | * system_call |
| 615 | * |
| 616 | * The intercept vectors are referenced by syscalltable.S |
| 617 | */ |
| 618 | |
| 619 | /* |
| 620 | * execve_intercept() |
| 621 | */ |
| 622 | .section .text.execve_intercept, "ax", @progbits |
| 623 | .global execve_intercept |
| 624 | execve_intercept: |
| 625 | move.4 d3, sp ; Save pt_regs address |
| 626 | call a3, sys_execve |
| 627 | |
| 628 | .size execve_intercept, . - execve_intercept |
| 629 | |
| 630 | /* |
| 631 | * vfork_intercept() |
| 632 | */ |
| 633 | .section .text.vfork_intercept, "ax", @progbits |
| 634 | .global vfork_intercept |
| 635 | vfork_intercept: |
| 636 | move.4 d0, sp ; Save pt_regs address |
| 637 | call a3, sys_vfork |
| 638 | |
| 639 | .size vfork_intercept, . - vfork_intercept |
| 640 | |
| 641 | /* |
| 642 | * clone_intercept() |
| 643 | */ |
| 644 | .section .text.clone_intercept, "ax", @progbits |
| 645 | .global clone_intercept |
| 646 | clone_intercept: |
| 647 | move.4 d2, sp ; Save pt_regs address |
| 648 | call a3, sys_clone |
| 649 | |
| 650 | .size clone_intercept, . - clone_intercept |
| 651 | |
| 652 | /* |
| 653 | * sys_sigsuspend() |
| 654 | */ |
| 655 | .section .text.sigclone_intercept, "ax", @progbits |
| 656 | .global sys_sigsuspend |
| 657 | sys_sigsuspend: |
| 658 | move.4 d0, sp ; Pass pointer to pt_regs in d0 |
| 659 | call a3, do_sigsuspend |
| 660 | |
| 661 | .size sys_sigsuspend, . - sys_sigsuspend |
| 662 | |
| 663 | /* |
| 664 | * sys_rt_sigsuspend() |
| 665 | */ |
| 666 | .section .text.sys_rt_sigsuspend, "ax", @progbits |
| 667 | .global sys_rt_sigsuspend |
| 668 | sys_rt_sigsuspend: |
| 669 | move.4 d0, sp ; Pass pointer to pt_regs in d0 |
| 670 | call a3, do_rt_sigsuspend |
| 671 | |
| 672 | .size sys_rt_sigsuspend, . - sys_rt_sigsuspend |
| 673 | |
| 674 | /* |
| 675 | * sys_rt_sigreturn() |
| 676 | */ |
| 677 | .section .text.sys_rt_sigreturn, "ax", @progbits |
| 678 | .global sys_rt_sigreturn |
| 679 | sys_rt_sigreturn: |
| 680 | move.4 d0, sp ; Pass pointer to pt_regs in d0 |
| 681 | call a3, do_rt_sigreturn |
| 682 | |
| 683 | .size sys_rt_sigreturn, . - sys_rt_sigreturn |
| 684 | |
| 685 | /* |
| 686 | * sys_sigaltstack() |
| 687 | */ |
| 688 | .section .text.sys_sigaltstack, "ax", @progbits |
| 689 | .global sys_sigaltstack |
| 690 | sys_sigaltstack: |
| 691 | move.4 d0, sp ; Pass pointer to pt_regs in d0 |
| 692 | call a3, do_sys_sigaltstack |
| 693 | |
| 694 | .size sys_sigaltstack, . - sys_sigaltstack |
| 695 | |