Root/arch/sparc/mm/ultra.S

1/*
2 * ultra.S: Don't expand these all over the place...
3 *
4 * Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net)
5 */
6
7#include <asm/asi.h>
8#include <asm/pgtable.h>
9#include <asm/page.h>
10#include <asm/spitfire.h>
11#include <asm/mmu_context.h>
12#include <asm/mmu.h>
13#include <asm/pil.h>
14#include <asm/head.h>
15#include <asm/thread_info.h>
16#include <asm/cacheflush.h>
17#include <asm/hypervisor.h>
18#include <asm/cpudata.h>
19
20    /* Basically, most of the Spitfire vs. Cheetah madness
21     * has to do with the fact that Cheetah does not support
22     * IMMU flushes out of the secondary context. Someone needs
23     * to throw a south lake birthday party for the folks
24     * in Microelectronics who refused to fix this shit.
25     */
26
27    /* This file is meant to be read efficiently by the CPU, not humans.
28     * Staraj sie tego nikomu nie pierdolnac...
29     */
30    .text
31    .align 32
32    .globl __flush_tlb_mm
33__flush_tlb_mm: /* 18 insns */
34    /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
35    ldxa [%o1] ASI_DMMU, %g2
36    cmp %g2, %o0
37    bne,pn %icc, __spitfire_flush_tlb_mm_slow
38     mov 0x50, %g3
39    stxa %g0, [%g3] ASI_DMMU_DEMAP
40    stxa %g0, [%g3] ASI_IMMU_DEMAP
41    sethi %hi(KERNBASE), %g3
42    flush %g3
43    retl
44     nop
45    nop
46    nop
47    nop
48    nop
49    nop
50    nop
51    nop
52    nop
53    nop
54
55    .align 32
56    .globl __flush_tlb_pending
57__flush_tlb_pending: /* 26 insns */
58    /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
59    rdpr %pstate, %g7
60    sllx %o1, 3, %o1
61    andn %g7, PSTATE_IE, %g2
62    wrpr %g2, %pstate
63    mov SECONDARY_CONTEXT, %o4
64    ldxa [%o4] ASI_DMMU, %g2
65    stxa %o0, [%o4] ASI_DMMU
661: sub %o1, (1 << 3), %o1
67    ldx [%o2 + %o1], %o3
68    andcc %o3, 1, %g0
69    andn %o3, 1, %o3
70    be,pn %icc, 2f
71     or %o3, 0x10, %o3
72    stxa %g0, [%o3] ASI_IMMU_DEMAP
732: stxa %g0, [%o3] ASI_DMMU_DEMAP
74    membar #Sync
75    brnz,pt %o1, 1b
76     nop
77    stxa %g2, [%o4] ASI_DMMU
78    sethi %hi(KERNBASE), %o4
79    flush %o4
80    retl
81     wrpr %g7, 0x0, %pstate
82    nop
83    nop
84    nop
85    nop
86
87    .align 32
88    .globl __flush_tlb_kernel_range
89__flush_tlb_kernel_range: /* 16 insns */
90    /* %o0=start, %o1=end */
91    cmp %o0, %o1
92    be,pn %xcc, 2f
93     sethi %hi(PAGE_SIZE), %o4
94    sub %o1, %o0, %o3
95    sub %o3, %o4, %o3
96    or %o0, 0x20, %o0 ! Nucleus
971: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
98    stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
99    membar #Sync
100    brnz,pt %o3, 1b
101     sub %o3, %o4, %o3
1022: sethi %hi(KERNBASE), %o3
103    flush %o3
104    retl
105     nop
106    nop
107
108__spitfire_flush_tlb_mm_slow:
109    rdpr %pstate, %g1
110    wrpr %g1, PSTATE_IE, %pstate
111    stxa %o0, [%o1] ASI_DMMU
112    stxa %g0, [%g3] ASI_DMMU_DEMAP
113    stxa %g0, [%g3] ASI_IMMU_DEMAP
114    flush %g6
115    stxa %g2, [%o1] ASI_DMMU
116    sethi %hi(KERNBASE), %o1
117    flush %o1
118    retl
119     wrpr %g1, 0, %pstate
120
121/*
122 * The following code flushes one page_size worth.
123 */
124    .section .kprobes.text, "ax"
125    .align 32
126    .globl __flush_icache_page
127__flush_icache_page: /* %o0 = phys_page */
128    srlx %o0, PAGE_SHIFT, %o0
129    sethi %uhi(PAGE_OFFSET), %g1
130    sllx %o0, PAGE_SHIFT, %o0
131    sethi %hi(PAGE_SIZE), %g2
132    sllx %g1, 32, %g1
133    add %o0, %g1, %o0
1341: subcc %g2, 32, %g2
135    bne,pt %icc, 1b
136     flush %o0 + %g2
137    retl
138     nop
139
140#ifdef DCACHE_ALIASING_POSSIBLE
141
142#if (PAGE_SHIFT != 13)
143#error only page shift of 13 is supported by dcache flush
144#endif
145
146#define DTAG_MASK 0x3
147
148    /* This routine is Spitfire specific so the hardcoded
149     * D-cache size and line-size are OK.
150     */
151    .align 64
152    .globl __flush_dcache_page
153__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
154    sethi %uhi(PAGE_OFFSET), %g1
155    sllx %g1, 32, %g1
156    sub %o0, %g1, %o0 ! physical address
157    srlx %o0, 11, %o0 ! make D-cache TAG
158    sethi %hi(1 << 14), %o2 ! D-cache size
159    sub %o2, (1 << 5), %o2 ! D-cache line size
1601: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG
161    andcc %o3, DTAG_MASK, %g0 ! Valid?
162    be,pn %xcc, 2f ! Nope, branch
163     andn %o3, DTAG_MASK, %o3 ! Clear valid bits
164    cmp %o3, %o0 ! TAG match?
165    bne,pt %xcc, 2f ! Nope, branch
166     nop
167    stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG
168    membar #Sync
1692: brnz,pt %o2, 1b
170     sub %o2, (1 << 5), %o2 ! D-cache line size
171
172    /* The I-cache does not snoop local stores so we
173     * better flush that too when necessary.
174     */
175    brnz,pt %o1, __flush_icache_page
176     sllx %o0, 11, %o0
177    retl
178     nop
179
180#endif /* DCACHE_ALIASING_POSSIBLE */
181
182    .previous
183
184    /* Cheetah specific versions, patched at boot time. */
185__cheetah_flush_tlb_mm: /* 19 insns */
186    rdpr %pstate, %g7
187    andn %g7, PSTATE_IE, %g2
188    wrpr %g2, 0x0, %pstate
189    wrpr %g0, 1, %tl
190    mov PRIMARY_CONTEXT, %o2
191    mov 0x40, %g3
192    ldxa [%o2] ASI_DMMU, %g2
193    srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1
194    sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1
195    or %o0, %o1, %o0 /* Preserve nucleus page size fields */
196    stxa %o0, [%o2] ASI_DMMU
197    stxa %g0, [%g3] ASI_DMMU_DEMAP
198    stxa %g0, [%g3] ASI_IMMU_DEMAP
199    stxa %g2, [%o2] ASI_DMMU
200    sethi %hi(KERNBASE), %o2
201    flush %o2
202    wrpr %g0, 0, %tl
203    retl
204     wrpr %g7, 0x0, %pstate
205
206__cheetah_flush_tlb_pending: /* 27 insns */
207    /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
208    rdpr %pstate, %g7
209    sllx %o1, 3, %o1
210    andn %g7, PSTATE_IE, %g2
211    wrpr %g2, 0x0, %pstate
212    wrpr %g0, 1, %tl
213    mov PRIMARY_CONTEXT, %o4
214    ldxa [%o4] ASI_DMMU, %g2
215    srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
216    sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
217    or %o0, %o3, %o0 /* Preserve nucleus page size fields */
218    stxa %o0, [%o4] ASI_DMMU
2191: sub %o1, (1 << 3), %o1
220    ldx [%o2 + %o1], %o3
221    andcc %o3, 1, %g0
222    be,pn %icc, 2f
223     andn %o3, 1, %o3
224    stxa %g0, [%o3] ASI_IMMU_DEMAP
2252: stxa %g0, [%o3] ASI_DMMU_DEMAP
226    membar #Sync
227    brnz,pt %o1, 1b
228     nop
229    stxa %g2, [%o4] ASI_DMMU
230    sethi %hi(KERNBASE), %o4
231    flush %o4
232    wrpr %g0, 0, %tl
233    retl
234     wrpr %g7, 0x0, %pstate
235
236#ifdef DCACHE_ALIASING_POSSIBLE
237__cheetah_flush_dcache_page: /* 11 insns */
238    sethi %uhi(PAGE_OFFSET), %g1
239    sllx %g1, 32, %g1
240    sub %o0, %g1, %o0
241    sethi %hi(PAGE_SIZE), %o4
2421: subcc %o4, (1 << 5), %o4
243    stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
244    membar #Sync
245    bne,pt %icc, 1b
246     nop
247    retl /* I-cache flush never needed on Cheetah, see callers. */
248     nop
249#endif /* DCACHE_ALIASING_POSSIBLE */
250
251    /* Hypervisor specific versions, patched at boot time. */
252__hypervisor_tlb_tl0_error:
253    save %sp, -192, %sp
254    mov %i0, %o0
255    call hypervisor_tlbop_error
256     mov %i1, %o1
257    ret
258     restore
259
260__hypervisor_flush_tlb_mm: /* 10 insns */
261    mov %o0, %o2 /* ARG2: mmu context */
262    mov 0, %o0 /* ARG0: CPU lists unimplemented */
263    mov 0, %o1 /* ARG1: CPU lists unimplemented */
264    mov HV_MMU_ALL, %o3 /* ARG3: flags */
265    mov HV_FAST_MMU_DEMAP_CTX, %o5
266    ta HV_FAST_TRAP
267    brnz,pn %o0, __hypervisor_tlb_tl0_error
268     mov HV_FAST_MMU_DEMAP_CTX, %o1
269    retl
270     nop
271
272__hypervisor_flush_tlb_pending: /* 16 insns */
273    /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
274    sllx %o1, 3, %g1
275    mov %o2, %g2
276    mov %o0, %g3
2771: sub %g1, (1 << 3), %g1
278    ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */
279    mov %g3, %o1 /* ARG1: mmu context */
280    mov HV_MMU_ALL, %o2 /* ARG2: flags */
281    srlx %o0, PAGE_SHIFT, %o0
282    sllx %o0, PAGE_SHIFT, %o0
283    ta HV_MMU_UNMAP_ADDR_TRAP
284    brnz,pn %o0, __hypervisor_tlb_tl0_error
285     mov HV_MMU_UNMAP_ADDR_TRAP, %o1
286    brnz,pt %g1, 1b
287     nop
288    retl
289     nop
290
291__hypervisor_flush_tlb_kernel_range: /* 16 insns */
292    /* %o0=start, %o1=end */
293    cmp %o0, %o1
294    be,pn %xcc, 2f
295     sethi %hi(PAGE_SIZE), %g3
296    mov %o0, %g1
297    sub %o1, %g1, %g2
298    sub %g2, %g3, %g2
2991: add %g1, %g2, %o0 /* ARG0: virtual address */
300    mov 0, %o1 /* ARG1: mmu context */
301    mov HV_MMU_ALL, %o2 /* ARG2: flags */
302    ta HV_MMU_UNMAP_ADDR_TRAP
303    brnz,pn %o0, __hypervisor_tlb_tl0_error
304     mov HV_MMU_UNMAP_ADDR_TRAP, %o1
305    brnz,pt %g2, 1b
306     sub %g2, %g3, %g2
3072: retl
308     nop
309
310#ifdef DCACHE_ALIASING_POSSIBLE
311    /* XXX Niagara and friends have an 8K cache, so no aliasing is
312     * XXX possible, but nothing explicit in the Hypervisor API
313     * XXX guarantees this.
314     */
315__hypervisor_flush_dcache_page: /* 2 insns */
316    retl
317     nop
318#endif
319
320tlb_patch_one:
3211: lduw [%o1], %g1
322    stw %g1, [%o0]
323    flush %o0
324    subcc %o2, 1, %o2
325    add %o1, 4, %o1
326    bne,pt %icc, 1b
327     add %o0, 4, %o0
328    retl
329     nop
330
331    .globl cheetah_patch_cachetlbops
332cheetah_patch_cachetlbops:
333    save %sp, -128, %sp
334
335    sethi %hi(__flush_tlb_mm), %o0
336    or %o0, %lo(__flush_tlb_mm), %o0
337    sethi %hi(__cheetah_flush_tlb_mm), %o1
338    or %o1, %lo(__cheetah_flush_tlb_mm), %o1
339    call tlb_patch_one
340     mov 19, %o2
341
342    sethi %hi(__flush_tlb_pending), %o0
343    or %o0, %lo(__flush_tlb_pending), %o0
344    sethi %hi(__cheetah_flush_tlb_pending), %o1
345    or %o1, %lo(__cheetah_flush_tlb_pending), %o1
346    call tlb_patch_one
347     mov 27, %o2
348
349#ifdef DCACHE_ALIASING_POSSIBLE
350    sethi %hi(__flush_dcache_page), %o0
351    or %o0, %lo(__flush_dcache_page), %o0
352    sethi %hi(__cheetah_flush_dcache_page), %o1
353    or %o1, %lo(__cheetah_flush_dcache_page), %o1
354    call tlb_patch_one
355     mov 11, %o2
356#endif /* DCACHE_ALIASING_POSSIBLE */
357
358    ret
359     restore
360
361#ifdef CONFIG_SMP
362    /* These are all called by the slaves of a cross call, at
363     * trap level 1, with interrupts fully disabled.
364     *
365     * Register usage:
366     * %g5 mm->context (all tlb flushes)
367     * %g1 address arg 1 (tlb page and range flushes)
368     * %g7 address arg 2 (tlb range flush only)
369     *
370     * %g6 scratch 1
371     * %g2 scratch 2
372     * %g3 scratch 3
373     * %g4 scratch 4
374     */
375    .align 32
376    .globl xcall_flush_tlb_mm
377xcall_flush_tlb_mm: /* 21 insns */
378    mov PRIMARY_CONTEXT, %g2
379    ldxa [%g2] ASI_DMMU, %g3
380    srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
381    sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
382    or %g5, %g4, %g5 /* Preserve nucleus page size fields */
383    stxa %g5, [%g2] ASI_DMMU
384    mov 0x40, %g4
385    stxa %g0, [%g4] ASI_DMMU_DEMAP
386    stxa %g0, [%g4] ASI_IMMU_DEMAP
387    stxa %g3, [%g2] ASI_DMMU
388    retry
389    nop
390    nop
391    nop
392    nop
393    nop
394    nop
395    nop
396    nop
397    nop
398    nop
399
400    .globl xcall_flush_tlb_pending
401xcall_flush_tlb_pending: /* 21 insns */
402    /* %g5=context, %g1=nr, %g7=vaddrs[] */
403    sllx %g1, 3, %g1
404    mov PRIMARY_CONTEXT, %g4
405    ldxa [%g4] ASI_DMMU, %g2
406    srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
407    sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
408    or %g5, %g4, %g5
409    mov PRIMARY_CONTEXT, %g4
410    stxa %g5, [%g4] ASI_DMMU
4111: sub %g1, (1 << 3), %g1
412    ldx [%g7 + %g1], %g5
413    andcc %g5, 0x1, %g0
414    be,pn %icc, 2f
415
416     andn %g5, 0x1, %g5
417    stxa %g0, [%g5] ASI_IMMU_DEMAP
4182: stxa %g0, [%g5] ASI_DMMU_DEMAP
419    membar #Sync
420    brnz,pt %g1, 1b
421     nop
422    stxa %g2, [%g4] ASI_DMMU
423    retry
424    nop
425
426    .globl xcall_flush_tlb_kernel_range
427xcall_flush_tlb_kernel_range: /* 25 insns */
428    sethi %hi(PAGE_SIZE - 1), %g2
429    or %g2, %lo(PAGE_SIZE - 1), %g2
430    andn %g1, %g2, %g1
431    andn %g7, %g2, %g7
432    sub %g7, %g1, %g3
433    add %g2, 1, %g2
434    sub %g3, %g2, %g3
435    or %g1, 0x20, %g1 ! Nucleus
4361: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
437    stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
438    membar #Sync
439    brnz,pt %g3, 1b
440     sub %g3, %g2, %g3
441    retry
442    nop
443    nop
444    nop
445    nop
446    nop
447    nop
448    nop
449    nop
450    nop
451    nop
452    nop
453
454    /* This runs in a very controlled environment, so we do
455     * not need to worry about BH races etc.
456     */
457    .globl xcall_sync_tick
458xcall_sync_tick:
459
460661: rdpr %pstate, %g2
461    wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
462    .section .sun4v_2insn_patch, "ax"
463    .word 661b
464    nop
465    nop
466    .previous
467
468    rdpr %pil, %g2
469    wrpr %g0, PIL_NORMAL_MAX, %pil
470    sethi %hi(109f), %g7
471    b,pt %xcc, etrap_irq
472109: or %g7, %lo(109b), %g7
473#ifdef CONFIG_TRACE_IRQFLAGS
474    call trace_hardirqs_off
475     nop
476#endif
477    call smp_synchronize_tick_client
478     nop
479    b rtrap_xcall
480     ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
481
482    .globl xcall_fetch_glob_regs
483xcall_fetch_glob_regs:
484    sethi %hi(global_reg_snapshot), %g1
485    or %g1, %lo(global_reg_snapshot), %g1
486    __GET_CPUID(%g2)
487    sllx %g2, 6, %g3
488    add %g1, %g3, %g1
489    rdpr %tstate, %g7
490    stx %g7, [%g1 + GR_SNAP_TSTATE]
491    rdpr %tpc, %g7
492    stx %g7, [%g1 + GR_SNAP_TPC]
493    rdpr %tnpc, %g7
494    stx %g7, [%g1 + GR_SNAP_TNPC]
495    stx %o7, [%g1 + GR_SNAP_O7]
496    stx %i7, [%g1 + GR_SNAP_I7]
497    /* Don't try this at home kids... */
498    rdpr %cwp, %g2
499    sub %g2, 1, %g7
500    wrpr %g7, %cwp
501    mov %i7, %g7
502    wrpr %g2, %cwp
503    stx %g7, [%g1 + GR_SNAP_RPC]
504    sethi %hi(trap_block), %g7
505    or %g7, %lo(trap_block), %g7
506    sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2
507    add %g7, %g2, %g7
508    ldx [%g7 + TRAP_PER_CPU_THREAD], %g3
509    stx %g3, [%g1 + GR_SNAP_THREAD]
510    retry
511
512#ifdef DCACHE_ALIASING_POSSIBLE
513    .align 32
514    .globl xcall_flush_dcache_page_cheetah
515xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
516    sethi %hi(PAGE_SIZE), %g3
5171: subcc %g3, (1 << 5), %g3
518    stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
519    membar #Sync
520    bne,pt %icc, 1b
521     nop
522    retry
523    nop
524#endif /* DCACHE_ALIASING_POSSIBLE */
525
526    .globl xcall_flush_dcache_page_spitfire
527xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
528                     %g7 == kernel page virtual address
529                     %g5 == (page->mapping != NULL) */
530#ifdef DCACHE_ALIASING_POSSIBLE
531    srlx %g1, (13 - 2), %g1 ! Form tag comparitor
532    sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K
533    sub %g3, (1 << 5), %g3 ! D$ linesize == 32
5341: ldxa [%g3] ASI_DCACHE_TAG, %g2
535    andcc %g2, 0x3, %g0
536    be,pn %xcc, 2f
537     andn %g2, 0x3, %g2
538    cmp %g2, %g1
539
540    bne,pt %xcc, 2f
541     nop
542    stxa %g0, [%g3] ASI_DCACHE_TAG
543    membar #Sync
5442: cmp %g3, 0
545    bne,pt %xcc, 1b
546     sub %g3, (1 << 5), %g3
547
548    brz,pn %g5, 2f
549#endif /* DCACHE_ALIASING_POSSIBLE */
550     sethi %hi(PAGE_SIZE), %g3
551
5521: flush %g7
553    subcc %g3, (1 << 5), %g3
554    bne,pt %icc, 1b
555     add %g7, (1 << 5), %g7
556
5572: retry
558    nop
559    nop
560
561    /* %g5: error
562     * %g6: tlb op
563     */
564__hypervisor_tlb_xcall_error:
565    mov %g5, %g4
566    mov %g6, %g5
567    ba,pt %xcc, etrap
568     rd %pc, %g7
569    mov %l4, %o0
570    call hypervisor_tlbop_error_xcall
571     mov %l5, %o1
572    ba,a,pt %xcc, rtrap
573
574    .globl __hypervisor_xcall_flush_tlb_mm
575__hypervisor_xcall_flush_tlb_mm: /* 21 insns */
576    /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
577    mov %o0, %g2
578    mov %o1, %g3
579    mov %o2, %g4
580    mov %o3, %g1
581    mov %o5, %g7
582    clr %o0 /* ARG0: CPU lists unimplemented */
583    clr %o1 /* ARG1: CPU lists unimplemented */
584    mov %g5, %o2 /* ARG2: mmu context */
585    mov HV_MMU_ALL, %o3 /* ARG3: flags */
586    mov HV_FAST_MMU_DEMAP_CTX, %o5
587    ta HV_FAST_TRAP
588    mov HV_FAST_MMU_DEMAP_CTX, %g6
589    brnz,pn %o0, __hypervisor_tlb_xcall_error
590     mov %o0, %g5
591    mov %g2, %o0
592    mov %g3, %o1
593    mov %g4, %o2
594    mov %g1, %o3
595    mov %g7, %o5
596    membar #Sync
597    retry
598
599    .globl __hypervisor_xcall_flush_tlb_pending
600__hypervisor_xcall_flush_tlb_pending: /* 21 insns */
601    /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */
602    sllx %g1, 3, %g1
603    mov %o0, %g2
604    mov %o1, %g3
605    mov %o2, %g4
6061: sub %g1, (1 << 3), %g1
607    ldx [%g7 + %g1], %o0 /* ARG0: virtual address */
608    mov %g5, %o1 /* ARG1: mmu context */
609    mov HV_MMU_ALL, %o2 /* ARG2: flags */
610    srlx %o0, PAGE_SHIFT, %o0
611    sllx %o0, PAGE_SHIFT, %o0
612    ta HV_MMU_UNMAP_ADDR_TRAP
613    mov HV_MMU_UNMAP_ADDR_TRAP, %g6
614    brnz,a,pn %o0, __hypervisor_tlb_xcall_error
615     mov %o0, %g5
616    brnz,pt %g1, 1b
617     nop
618    mov %g2, %o0
619    mov %g3, %o1
620    mov %g4, %o2
621    membar #Sync
622    retry
623
624    .globl __hypervisor_xcall_flush_tlb_kernel_range
625__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */
626    /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
627    sethi %hi(PAGE_SIZE - 1), %g2
628    or %g2, %lo(PAGE_SIZE - 1), %g2
629    andn %g1, %g2, %g1
630    andn %g7, %g2, %g7
631    sub %g7, %g1, %g3
632    add %g2, 1, %g2
633    sub %g3, %g2, %g3
634    mov %o0, %g2
635    mov %o1, %g4
636    mov %o2, %g7
6371: add %g1, %g3, %o0 /* ARG0: virtual address */
638    mov 0, %o1 /* ARG1: mmu context */
639    mov HV_MMU_ALL, %o2 /* ARG2: flags */
640    ta HV_MMU_UNMAP_ADDR_TRAP
641    mov HV_MMU_UNMAP_ADDR_TRAP, %g6
642    brnz,pn %o0, __hypervisor_tlb_xcall_error
643     mov %o0, %g5
644    sethi %hi(PAGE_SIZE), %o2
645    brnz,pt %g3, 1b
646     sub %g3, %o2, %g3
647    mov %g2, %o0
648    mov %g4, %o1
649    mov %g7, %o2
650    membar #Sync
651    retry
652
653    /* These just get rescheduled to PIL vectors. */
654    .globl xcall_call_function
655xcall_call_function:
656    wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
657    retry
658
659    .globl xcall_call_function_single
660xcall_call_function_single:
661    wr %g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint
662    retry
663
664    .globl xcall_receive_signal
665xcall_receive_signal:
666    wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
667    retry
668
669    .globl xcall_capture
670xcall_capture:
671    wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
672    retry
673
674    .globl xcall_new_mmu_context_version
675xcall_new_mmu_context_version:
676    wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
677    retry
678
679#ifdef CONFIG_KGDB
680    .globl xcall_kgdb_capture
681xcall_kgdb_capture:
682    wr %g0, (1 << PIL_KGDB_CAPTURE), %set_softint
683    retry
684#endif
685
686#endif /* CONFIG_SMP */
687
688
689    .globl hypervisor_patch_cachetlbops
690hypervisor_patch_cachetlbops:
691    save %sp, -128, %sp
692
693    sethi %hi(__flush_tlb_mm), %o0
694    or %o0, %lo(__flush_tlb_mm), %o0
695    sethi %hi(__hypervisor_flush_tlb_mm), %o1
696    or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
697    call tlb_patch_one
698     mov 10, %o2
699
700    sethi %hi(__flush_tlb_pending), %o0
701    or %o0, %lo(__flush_tlb_pending), %o0
702    sethi %hi(__hypervisor_flush_tlb_pending), %o1
703    or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
704    call tlb_patch_one
705     mov 16, %o2
706
707    sethi %hi(__flush_tlb_kernel_range), %o0
708    or %o0, %lo(__flush_tlb_kernel_range), %o0
709    sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
710    or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
711    call tlb_patch_one
712     mov 16, %o2
713
714#ifdef DCACHE_ALIASING_POSSIBLE
715    sethi %hi(__flush_dcache_page), %o0
716    or %o0, %lo(__flush_dcache_page), %o0
717    sethi %hi(__hypervisor_flush_dcache_page), %o1
718    or %o1, %lo(__hypervisor_flush_dcache_page), %o1
719    call tlb_patch_one
720     mov 2, %o2
721#endif /* DCACHE_ALIASING_POSSIBLE */
722
723#ifdef CONFIG_SMP
724    sethi %hi(xcall_flush_tlb_mm), %o0
725    or %o0, %lo(xcall_flush_tlb_mm), %o0
726    sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
727    or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
728    call tlb_patch_one
729     mov 21, %o2
730
731    sethi %hi(xcall_flush_tlb_pending), %o0
732    or %o0, %lo(xcall_flush_tlb_pending), %o0
733    sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1
734    or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1
735    call tlb_patch_one
736     mov 21, %o2
737
738    sethi %hi(xcall_flush_tlb_kernel_range), %o0
739    or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
740    sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
741    or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
742    call tlb_patch_one
743     mov 25, %o2
744#endif /* CONFIG_SMP */
745
746    ret
747     restore
748

Archive Download this file



interactive