| 1 | /* |
| 2 | * arch/ubicom32/kernel/irq.c |
| 3 | * Ubicom32 architecture IRQ support. |
| 4 | * |
| 5 | * (C) Copyright 2009, Ubicom, Inc. |
| 6 | * (C) Copyright 2007, Greg Ungerer <gerg@snapgear.com> |
| 7 | * |
| 8 | * This file is part of the Ubicom32 Linux Kernel Port. |
| 9 | * |
| 10 | * The Ubicom32 Linux Kernel Port is free software: you can redistribute |
| 11 | * it and/or modify it under the terms of the GNU General Public License |
| 12 | * as published by the Free Software Foundation, either version 2 of the |
| 13 | * License, or (at your option) any later version. |
| 14 | * |
| 15 | * The Ubicom32 Linux Kernel Port is distributed in the hope that it |
| 16 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied |
| 17 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See |
| 18 | * the GNU General Public License for more details. |
| 19 | * |
| 20 | * You should have received a copy of the GNU General Public License |
| 21 | * along with the Ubicom32 Linux Kernel Port. If not, |
| 22 | * see <http://www.gnu.org/licenses/>. |
| 23 | * |
| 24 | * Ubicom32 implementation derived from (with many thanks): |
| 25 | * arch/m68knommu |
| 26 | * arch/blackfin |
| 27 | * arch/parisc |
| 28 | */ |
| 29 | |
| 30 | #include <linux/types.h> |
| 31 | #include <linux/irq.h> |
| 32 | #include <linux/init.h> |
| 33 | #include <linux/kernel.h> |
| 34 | #include <linux/kernel_stat.h> |
| 35 | #include <linux/module.h> |
| 36 | #include <linux/seq_file.h> |
| 37 | #include <linux/proc_fs.h> |
| 38 | #include <asm/system.h> |
| 39 | #include <asm/traps.h> |
| 40 | #include <asm/ldsr.h> |
| 41 | #include <asm/ip5000.h> |
| 42 | #include <asm/machdep.h> |
| 43 | #include <asm/asm-offsets.h> |
| 44 | #include <asm/thread.h> |
| 45 | #include <asm/devtree.h> |
| 46 | |
| 47 | unsigned int irq_soft_avail; |
| 48 | static struct irqaction ubicom32_reserve_action[NR_IRQS]; |
| 49 | |
| 50 | #if !defined(CONFIG_DEBUG_IRQMEASURE) |
| 51 | #define IRQ_DECLARE_MEASUREMENT |
| 52 | #define IRQ_MEASUREMENT_START() |
| 53 | #define IRQ_MEASUREMENT_END(irq) |
| 54 | #else |
| 55 | #define IRQ_DECLARE_MEASUREMENT \ |
| 56 | int __diff; \ |
| 57 | unsigned int __tstart; |
| 58 | |
| 59 | #define IRQ_MEASUREMENT_START() \ |
| 60 | __tstart = UBICOM32_IO_TIMER->sysval; |
| 61 | |
| 62 | #define IRQ_MEASUREMENT_END(irq) \ |
| 63 | __diff = (int)UBICOM32_IO_TIMER->sysval - (int)__tstart; \ |
| 64 | irq_measurement_update((irq), __diff); |
| 65 | |
| 66 | /* |
| 67 | * We keep track of the time spent in both irq_enter() |
| 68 | * and irq_exit(). |
| 69 | */ |
| 70 | #define IRQ_WEIGHT 32 |
| 71 | |
| 72 | struct irq_measurement { |
| 73 | volatile unsigned int min; |
| 74 | volatile unsigned int avg; |
| 75 | volatile unsigned int max; |
| 76 | }; |
| 77 | |
| 78 | static DEFINE_SPINLOCK(irq_measurement_lock); |
| 79 | |
| 80 | /* |
| 81 | * Add 1 in for softirq (irq_exit()); |
| 82 | */ |
| 83 | static struct irq_measurement irq_measurements[NR_IRQS + 1]; |
| 84 | |
| 85 | /* |
| 86 | * irq_measurement_update() |
| 87 | * Update an entry in the measurement array for this irq. |
| 88 | */ |
| 89 | static void irq_measurement_update(int irq, int sample) |
| 90 | { |
| 91 | struct irq_measurement *im = &irq_measurements[irq]; |
| 92 | spin_lock(&irq_measurement_lock); |
| 93 | if ((im->min == 0) || (im->min > sample)) { |
| 94 | im->min = sample; |
| 95 | } |
| 96 | if (im->max < sample) { |
| 97 | im->max = sample; |
| 98 | } |
| 99 | im->avg = ((im->avg * (IRQ_WEIGHT - 1)) + sample) / IRQ_WEIGHT; |
| 100 | spin_unlock(&irq_measurement_lock); |
| 101 | } |
| 102 | #endif |
| 103 | |
| 104 | /* |
| 105 | * irq_kernel_stack_check() |
| 106 | * See if the kernel stack is within STACK_WARN of the end. |
| 107 | */ |
| 108 | static void irq_kernel_stack_check(int irq, struct pt_regs *regs) |
| 109 | { |
| 110 | #ifdef CONFIG_DEBUG_STACKOVERFLOW |
| 111 | unsigned long sp; |
| 112 | |
| 113 | /* |
| 114 | * Make sure that we are not close to the top of the stack and thus |
| 115 | * can not really service this interrupt. |
| 116 | */ |
| 117 | asm volatile ( |
| 118 | "and.4 %0, SP, %1 \n\t" |
| 119 | : "=d" (sp) |
| 120 | : "d" (THREAD_SIZE - 1) |
| 121 | : "cc" |
| 122 | ); |
| 123 | |
| 124 | if (sp < (sizeof(struct thread_info) + STACK_WARN)) { |
| 125 | printk(KERN_WARNING |
| 126 | "cpu[%d]: possible overflow detected sp remain: %p, " |
| 127 | "irq: %d, regs: %p\n", |
| 128 | thread_get_self(), (void *)sp, irq, regs); |
| 129 | dump_stack(); |
| 130 | } |
| 131 | |
| 132 | if (sp < (sizeof(struct thread_info) + 16)) { |
| 133 | THREAD_STALL; |
| 134 | } |
| 135 | #endif |
| 136 | } |
| 137 | |
| 138 | /* |
| 139 | * irq_get_lsb() |
| 140 | * Get the LSB set in value |
| 141 | */ |
| 142 | static int irq_get_lsb(unsigned int value) |
| 143 | { |
| 144 | static unsigned char irq_bits[8] = { |
| 145 | 3, 0, 1, 0, 2, 0, 1, 0 |
| 146 | }; |
| 147 | u32_t nextbit = 0; |
| 148 | |
| 149 | value = (value >> nextbit) | (value << ((sizeof(value) * 8) - nextbit)); |
| 150 | |
| 151 | /* |
| 152 | * It's unlikely that we find that we execute the body of this while |
| 153 | * loop. 50% of the time we won't take this at all and then of the |
| 154 | * cases where we do about 50% of those we only execute once. |
| 155 | */ |
| 156 | if (!(value & 0xffff)) { |
| 157 | nextbit += 0x10; |
| 158 | value >>= 16; |
| 159 | } |
| 160 | |
| 161 | if (!(value & 0xff)) { |
| 162 | nextbit += 0x08; |
| 163 | value >>= 8; |
| 164 | } |
| 165 | |
| 166 | if (!(value & 0xf)) { |
| 167 | nextbit += 0x04; |
| 168 | value >>= 4; |
| 169 | } |
| 170 | |
| 171 | nextbit += irq_bits[value & 0x7]; |
| 172 | if (nextbit > 63) { |
| 173 | panic("nextbit out of range: %d\n", nextbit); |
| 174 | } |
| 175 | return nextbit; |
| 176 | } |
| 177 | |
| 178 | /* |
| 179 | * ubicom32_reserve_handler() |
| 180 | * Bogus handler associated with pre-reserved IRQ(s). |
| 181 | */ |
| 182 | static irqreturn_t ubicom32_reserve_handler(int irq, void *dev_id) |
| 183 | { |
| 184 | BUG(); |
| 185 | return IRQ_HANDLED; |
| 186 | } |
| 187 | |
| 188 | /* |
| 189 | * __irq_disable_vector() |
| 190 | * Disable the interrupt by clearing the appropriate bit in the |
| 191 | * LDSR Mask Register. |
| 192 | */ |
| 193 | static void __irq_disable_vector(unsigned int irq) |
| 194 | { |
| 195 | ldsr_disable_vector(irq); |
| 196 | } |
| 197 | |
| 198 | /* |
| 199 | * __irq_ack_vector() |
| 200 | * Acknowledge the specific interrupt by clearing the associate bit in |
| 201 | * hardware |
| 202 | */ |
| 203 | static void __irq_ack_vector(unsigned int irq) |
| 204 | { |
| 205 | if (irq < 32) { |
| 206 | asm volatile ("move.4 INT_CLR0, %0" : : "d" (1 << irq)); |
| 207 | } else { |
| 208 | asm volatile ("move.4 INT_CLR1, %0" : : "d" (1 << (irq - 32))); |
| 209 | } |
| 210 | } |
| 211 | |
| 212 | /* |
| 213 | * __irq_enable_vector() |
| 214 | * Clean and then enable the interrupt by setting the appropriate bit in |
| 215 | * the LDSR Mask Register. |
| 216 | */ |
| 217 | static void __irq_enable_vector(unsigned int irq) |
| 218 | { |
| 219 | /* |
| 220 | * Acknowledge, really clear the vector. |
| 221 | */ |
| 222 | __irq_ack_vector(irq); |
| 223 | ldsr_enable_vector(irq); |
| 224 | } |
| 225 | |
| 226 | /* |
| 227 | * __irq_mask_vector() |
| 228 | */ |
| 229 | static void __irq_mask_vector(unsigned int irq) |
| 230 | { |
| 231 | ldsr_mask_vector(irq); |
| 232 | } |
| 233 | |
| 234 | /* |
| 235 | * __irq_unmask_vector() |
| 236 | */ |
| 237 | static void __irq_unmask_vector(unsigned int irq) |
| 238 | { |
| 239 | ldsr_unmask_vector(irq); |
| 240 | } |
| 241 | |
| 242 | /* |
| 243 | * __irq_end_vector() |
| 244 | * Called once an interrupt is completed (reset the LDSR mask). |
| 245 | */ |
| 246 | static void __irq_end_vector(unsigned int irq) |
| 247 | { |
| 248 | ldsr_unmask_vector(irq); |
| 249 | } |
| 250 | |
| 251 | #if defined(CONFIG_SMP) |
| 252 | /* |
| 253 | * __irq_set_affinity() |
| 254 | * Set the cpu affinity for this interrupt. |
| 255 | * affinity container allocated at boot |
| 256 | */ |
| 257 | static void __irq_set_affinity(unsigned int irq, const struct cpumask *dest) |
| 258 | { |
| 259 | smp_set_affinity(irq, dest); |
| 260 | cpumask_copy(irq_desc[irq].affinity, dest); |
| 261 | } |
| 262 | #endif |
| 263 | |
| 264 | /* |
| 265 | * On-Chip Generic Interrupt function handling. |
| 266 | */ |
| 267 | static struct irq_chip ubicom32_irq_chip = { |
| 268 | .name = "Ubicom32", |
| 269 | .startup = NULL, |
| 270 | .shutdown = NULL, |
| 271 | .enable = __irq_enable_vector, |
| 272 | .disable = __irq_disable_vector, |
| 273 | .ack = __irq_ack_vector, |
| 274 | .mask = __irq_mask_vector, |
| 275 | .unmask = __irq_unmask_vector, |
| 276 | .end = __irq_end_vector, |
| 277 | #if defined(CONFIG_SMP) |
| 278 | .set_affinity = __irq_set_affinity, |
| 279 | #endif |
| 280 | }; |
| 281 | |
| 282 | /* |
| 283 | * do_IRQ() |
| 284 | * Primary interface for handling IRQ() requests. |
| 285 | */ |
| 286 | asmlinkage void do_IRQ(int irq, struct pt_regs *regs) |
| 287 | { |
| 288 | struct pt_regs *oldregs; |
| 289 | struct thread_info *ti = current_thread_info(); |
| 290 | |
| 291 | IRQ_DECLARE_MEASUREMENT; |
| 292 | |
| 293 | /* |
| 294 | * Mark that we are inside of an interrupt and |
| 295 | * that interrupts are disabled. |
| 296 | */ |
| 297 | oldregs = set_irq_regs(regs); |
| 298 | ti->interrupt_nesting++; |
| 299 | trace_hardirqs_off(); |
| 300 | irq_kernel_stack_check(irq, regs); |
| 301 | |
| 302 | /* |
| 303 | * Start the interrupt sequence |
| 304 | */ |
| 305 | irq_enter(); |
| 306 | |
| 307 | /* |
| 308 | * Execute the IRQ handler and any pending SoftIRQ requests. |
| 309 | */ |
| 310 | BUG_ON(!irqs_disabled()); |
| 311 | IRQ_MEASUREMENT_START(); |
| 312 | __do_IRQ(irq); |
| 313 | IRQ_MEASUREMENT_END(irq); |
| 314 | BUG_ON(!irqs_disabled()); |
| 315 | |
| 316 | /* |
| 317 | * TODO: Since IRQ's are disabled when calling irq_exit() |
| 318 | * modify Kconfig to set __ARCH_IRQ_EXIT_IRQS_DISABLED flag. |
| 319 | * This will slightly improve performance by enabling |
| 320 | * softirq handling to avoid disabling/disabled interrupts. |
| 321 | */ |
| 322 | IRQ_MEASUREMENT_START(); |
| 323 | irq_exit(); |
| 324 | IRQ_MEASUREMENT_END(NR_IRQS); |
| 325 | BUG_ON(!irqs_disabled()); |
| 326 | |
| 327 | /* |
| 328 | * Outside of an interrupt (or nested exit). |
| 329 | */ |
| 330 | set_irq_regs(oldregs); |
| 331 | trace_hardirqs_on(); |
| 332 | ti->interrupt_nesting--; |
| 333 | } |
| 334 | |
| 335 | /* |
| 336 | * irq_soft_alloc() |
| 337 | * Allocate a soft IRQ. |
| 338 | */ |
| 339 | int irq_soft_alloc(unsigned int *soft) |
| 340 | { |
| 341 | if (irq_soft_avail == 0) { |
| 342 | printk(KERN_NOTICE "no soft irqs to allocate\n"); |
| 343 | return -EFAULT; |
| 344 | } |
| 345 | |
| 346 | *soft = irq_get_lsb(irq_soft_avail); |
| 347 | irq_soft_avail &= ~(1 << *soft); |
| 348 | return 0; |
| 349 | } |
| 350 | |
| 351 | /* |
| 352 | * ack_bad_irq() |
| 353 | * Called to handle an bad irq request. |
| 354 | */ |
| 355 | void ack_bad_irq(unsigned int irq) |
| 356 | { |
| 357 | printk(KERN_ERR "IRQ: unexpected irq=%d\n", irq); |
| 358 | __irq_end_vector(irq); |
| 359 | } |
| 360 | |
| 361 | /* |
| 362 | * show_interrupts() |
| 363 | * Return a string that displays the state of each of the interrupts. |
| 364 | */ |
| 365 | int show_interrupts(struct seq_file *p, void *v) |
| 366 | { |
| 367 | struct irqaction *ap; |
| 368 | int irq = *((loff_t *) v); |
| 369 | int j; |
| 370 | |
| 371 | if (irq >= NR_IRQS) { |
| 372 | return 0; |
| 373 | } |
| 374 | |
| 375 | if (irq == 0) { |
| 376 | seq_puts(p, " "); |
| 377 | for_each_online_cpu(j) { |
| 378 | seq_printf(p, "CPU%d ", j); |
| 379 | } |
| 380 | seq_putc(p, '\n'); |
| 381 | } |
| 382 | |
| 383 | ap = irq_desc[irq].action; |
| 384 | if (ap) { |
| 385 | seq_printf(p, "%3d: ", irq); |
| 386 | for_each_online_cpu(j) { |
| 387 | seq_printf(p, "%10u ", kstat_irqs_cpu(irq, j)); |
| 388 | } |
| 389 | seq_printf(p, "%14s ", irq_desc[irq].chip->name); |
| 390 | seq_printf(p, "%s", ap->name); |
| 391 | for (ap = ap->next; ap; ap = ap->next) { |
| 392 | seq_printf(p, ", %s", ap->name); |
| 393 | } |
| 394 | seq_putc(p, '\n'); |
| 395 | } |
| 396 | return 0; |
| 397 | } |
| 398 | |
| 399 | #if defined(CONFIG_DEBUG_IRQMEASURE) |
| 400 | static unsigned int irq_cycles_to_micro(unsigned int cycles, unsigned int frequency) |
| 401 | { |
| 402 | unsigned int micro = (cycles / (frequency / 1000000)); |
| 403 | return micro; |
| 404 | } |
| 405 | |
| 406 | /* |
| 407 | * irq_measurement_show() |
| 408 | * Print out the min, avg, max values for each IRQ |
| 409 | * |
| 410 | * By request, the max value is reset after each dump. |
| 411 | */ |
| 412 | static int irq_measurement_show(struct seq_file *p, void *v) |
| 413 | { |
| 414 | struct irqaction *ap; |
| 415 | unsigned int freq = processor_frequency(); |
| 416 | int irq = *((loff_t *) v); |
| 417 | |
| 418 | |
| 419 | if (irq == 0) { |
| 420 | seq_puts(p, "\tmin\tavg\tmax\t(micro-seconds)\n"); |
| 421 | } |
| 422 | |
| 423 | if (irq > NR_IRQS) { |
| 424 | return 0; |
| 425 | } |
| 426 | |
| 427 | if (irq == NR_IRQS) { |
| 428 | unsigned int min, avg, max; |
| 429 | spin_lock(&irq_measurement_lock); |
| 430 | min = irq_cycles_to_micro(irq_measurements[irq].min, freq); |
| 431 | avg = irq_cycles_to_micro(irq_measurements[irq].avg, freq); |
| 432 | max = irq_cycles_to_micro(irq_measurements[irq].max, freq); |
| 433 | irq_measurements[irq].max = 0; |
| 434 | spin_unlock(&irq_measurement_lock); |
| 435 | seq_printf(p, " \t%u\t%u\t%u\tsoftirq\n", min, avg, max); |
| 436 | return 0; |
| 437 | } |
| 438 | |
| 439 | ap = irq_desc[irq].action; |
| 440 | if (ap) { |
| 441 | unsigned int min, avg, max; |
| 442 | spin_lock(&irq_measurement_lock); |
| 443 | min = irq_cycles_to_micro(irq_measurements[irq].min, freq); |
| 444 | avg = irq_cycles_to_micro(irq_measurements[irq].avg, freq); |
| 445 | max = irq_cycles_to_micro(irq_measurements[irq].max, freq); |
| 446 | irq_measurements[irq].max = 0; |
| 447 | spin_unlock(&irq_measurement_lock); |
| 448 | seq_printf(p, "%2u:\t%u\t%u\t%u\t%s\n", irq, min, avg, max, ap->name); |
| 449 | } |
| 450 | return 0; |
| 451 | } |
| 452 | |
| 453 | static void *irq_measurement_start(struct seq_file *f, loff_t *pos) |
| 454 | { |
| 455 | return (*pos <= NR_IRQS) ? pos : NULL; |
| 456 | } |
| 457 | |
| 458 | static void *irq_measurement_next(struct seq_file *f, void *v, loff_t *pos) |
| 459 | { |
| 460 | (*pos)++; |
| 461 | if (*pos > NR_IRQS) |
| 462 | return NULL; |
| 463 | return pos; |
| 464 | } |
| 465 | |
| 466 | static void irq_measurement_stop(struct seq_file *f, void *v) |
| 467 | { |
| 468 | /* Nothing to do */ |
| 469 | } |
| 470 | |
| 471 | static const struct seq_operations irq_measurement_seq_ops = { |
| 472 | .start = irq_measurement_start, |
| 473 | .next = irq_measurement_next, |
| 474 | .stop = irq_measurement_stop, |
| 475 | .show = irq_measurement_show, |
| 476 | }; |
| 477 | |
| 478 | static int irq_measurement_open(struct inode *inode, struct file *filp) |
| 479 | { |
| 480 | return seq_open(filp, &irq_measurement_seq_ops); |
| 481 | } |
| 482 | |
| 483 | static const struct file_operations irq_measurement_fops = { |
| 484 | .open = irq_measurement_open, |
| 485 | .read = seq_read, |
| 486 | .llseek = seq_lseek, |
| 487 | .release = seq_release, |
| 488 | }; |
| 489 | |
| 490 | static int __init irq_measurement_init(void) |
| 491 | { |
| 492 | proc_create("irq_measurements", 0, NULL, &irq_measurement_fops); |
| 493 | return 0; |
| 494 | } |
| 495 | module_init(irq_measurement_init); |
| 496 | #endif |
| 497 | |
| 498 | /* |
| 499 | * init_IRQ(void) |
| 500 | * Initialize the on-chip IRQ subsystem. |
| 501 | */ |
| 502 | void __init init_IRQ(void) |
| 503 | { |
| 504 | int irq; |
| 505 | struct devtree_node *p = NULL; |
| 506 | struct devtree_node *iter = NULL; |
| 507 | unsigned int mask = 0; |
| 508 | unsigned int reserved = 0; |
| 509 | |
| 510 | /* |
| 511 | * Pull out the list of software interrupts that are avialable to |
| 512 | * Linux and provide an allocation function for them. The first |
| 513 | * 24 interrupts of INT0 are software interrupts. |
| 514 | */ |
| 515 | irq_soft_avail = 0; |
| 516 | if (processor_interrupts(&irq_soft_avail, NULL) < 0) { |
| 517 | printk(KERN_WARNING "No Soft IRQ(s) available\n"); |
| 518 | } |
| 519 | irq_soft_avail &= ((1 << 24) - 1); |
| 520 | |
| 521 | /* |
| 522 | * Initialize all of the on-chip interrupt handling |
| 523 | * to use a common set of interrupt functions. |
| 524 | */ |
| 525 | for (irq = 0; irq < NR_IRQS; irq++) { |
| 526 | irq_desc[irq].status = IRQ_DISABLED; |
| 527 | irq_desc[irq].action = NULL; |
| 528 | irq_desc[irq].depth = 1; |
| 529 | set_irq_chip(irq, &ubicom32_irq_chip); |
| 530 | } |
| 531 | |
| 532 | /* |
| 533 | * The sendirq of a devnode is not registered within Linux but instead |
| 534 | * is used by the software I/O thread. These interrupts are reserved. |
| 535 | * The recvirq is used by Linux and registered by a device driver, these |
| 536 | * are not reserved. |
| 537 | * |
| 538 | * recvirq(s) that are in the software interrupt range are not supposed |
| 539 | * to be marked as reserved. We track this while we scan the device |
| 540 | * nodes. |
| 541 | */ |
| 542 | p = devtree_find_next(&iter); |
| 543 | while (p) { |
| 544 | unsigned char sendirq, recvirq; |
| 545 | devtree_irq(p, &sendirq, &recvirq); |
| 546 | |
| 547 | /* |
| 548 | * If the sendirq is valid, mark that irq as taken by the |
| 549 | * devtree node. |
| 550 | */ |
| 551 | if (sendirq < NR_IRQS) { |
| 552 | ubicom32_reserve_action[sendirq].handler = |
| 553 | ubicom32_reserve_handler; |
| 554 | ubicom32_reserve_action[sendirq].name = p->name; |
| 555 | irq_desc[sendirq].action = |
| 556 | &ubicom32_reserve_action[sendirq]; |
| 557 | mask |= (1 << sendirq); |
| 558 | } |
| 559 | |
| 560 | /* |
| 561 | * Track the relevant recieve IRQ(s) |
| 562 | */ |
| 563 | if (recvirq < 24) { |
| 564 | mask |= (1 << recvirq); |
| 565 | } |
| 566 | |
| 567 | /* |
| 568 | * Move to the next node. |
| 569 | */ |
| 570 | p = devtree_find_next(&iter); |
| 571 | } |
| 572 | |
| 573 | /* |
| 574 | * Remove these bits from the irq_soft_avail list and then use the |
| 575 | * result as the list of pre-reserved IRQ(s). |
| 576 | */ |
| 577 | reserved = ~irq_soft_avail & ~mask; |
| 578 | for (irq = 0; irq < 24; irq++) { |
| 579 | if ((reserved & (1 << irq))) { |
| 580 | ubicom32_reserve_action[irq].handler = |
| 581 | ubicom32_reserve_handler; |
| 582 | ubicom32_reserve_action[irq].name = "reserved"; |
| 583 | irq_desc[irq].action = &ubicom32_reserve_action[irq]; |
| 584 | } |
| 585 | } |
| 586 | |
| 587 | /* |
| 588 | * Initialize the LDSR which is the Ubicom32 programmable |
| 589 | * interrupt controller. |
| 590 | */ |
| 591 | ldsr_init(); |
| 592 | |
| 593 | /* |
| 594 | * The Ubicom trap code needs a 2nd init after IRQ(s) are setup. |
| 595 | */ |
| 596 | trap_init_interrupt(); |
| 597 | } |
| 598 | |