Root/arch/ia64/kvm/vmm_ivt.S

1/*
2 * arch/ia64/kvm/vmm_ivt.S
3 *
4 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger <davidm@hpl.hp.com>
7 * Copyright (C) 2000, 2002-2003 Intel Co
8 * Asit Mallick <asit.k.mallick@intel.com>
9 * Suresh Siddha <suresh.b.siddha@intel.com>
10 * Kenneth Chen <kenneth.w.chen@intel.com>
11 * Fenghua Yu <fenghua.yu@intel.com>
12 *
13 *
14 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling
15 * for SMP
16 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB
17 * handler now uses virtual PT.
18 *
19 * 07/6/20 Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
20 * Supporting Intel virtualization architecture
21 *
22 */
23
24/*
25 * This file defines the interruption vector table used by the CPU.
26 * It does not include one entry per possible cause of interruption.
27 *
28 * The first 20 entries of the table contain 64 bundles each while the
29 * remaining 48 entries contain only 16 bundles each.
30 *
31 * The 64 bundles are used to allow inlining the whole handler for
32 * critical
33 * interruptions like TLB misses.
34 *
35 * For each entry, the comment is as follows:
36 *
37 * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss
38 * (12,51)
39 * entry offset ----/ / / /
40 * /
41 * entry number ---------/ / /
42 * /
43 * size of the entry -------------/ /
44 * /
45 * vector name -------------------------------------/
46 * /
47 * interruptions triggering this vector
48 * ----------------------/
49 *
50 * The table is 32KB in size and must be aligned on 32KB
51 * boundary.
52 * (The CPU ignores the 15 lower bits of the address)
53 *
54 * Table is based upon EAS2.6 (Oct 1999)
55 */
56
57
58#include <asm/asmmacro.h>
59#include <asm/cache.h>
60#include <asm/pgtable.h>
61
62#include "asm-offsets.h"
63#include "vcpu.h"
64#include "kvm_minstate.h"
65#include "vti.h"
66
67#if 1
68# define PSR_DEFAULT_BITS psr.ac
69#else
70# define PSR_DEFAULT_BITS 0
71#endif
72
73#define KVM_FAULT(n) \
74    kvm_fault_##n:; \
75    mov r19=n;; \
76    br.sptk.many kvm_vmm_panic; \
77    ;; \
78
79#define KVM_REFLECT(n) \
80    mov r31=pr; \
81    mov r19=n; /* prepare to save predicates */ \
82    mov r29=cr.ipsr; \
83    ;; \
84    tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \
85(p7) br.sptk.many kvm_dispatch_reflection; \
86    br.sptk.many kvm_vmm_panic; \
87
88GLOBAL_ENTRY(kvm_vmm_panic)
89    KVM_SAVE_MIN_WITH_COVER_R19
90    alloc r14=ar.pfs,0,0,1,0
91    mov out0=r15
92    adds r3=8,r2 // set up second base pointer
93    ;;
94    ssm psr.ic
95    ;;
96    srlz.i // guarantee that interruption collection is on
97    ;;
98    (p15) ssm psr.i // restore psr.
99    addl r14=@gprel(ia64_leave_hypervisor),gp
100    ;;
101    KVM_SAVE_REST
102    mov rp=r14
103    ;;
104    br.call.sptk.many b6=vmm_panic_handler;
105END(kvm_vmm_panic)
106
107    .section .text..ivt,"ax"
108
109    .align 32768 // align on 32KB boundary
110    .global kvm_ia64_ivt
111kvm_ia64_ivt:
112///////////////////////////////////////////////////////////////
113// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
114ENTRY(kvm_vhpt_miss)
115    KVM_FAULT(0)
116END(kvm_vhpt_miss)
117
118    .org kvm_ia64_ivt+0x400
119////////////////////////////////////////////////////////////////
120// 0x0400 Entry 1 (size 64 bundles) ITLB (21)
121ENTRY(kvm_itlb_miss)
122    mov r31 = pr
123    mov r29=cr.ipsr;
124    ;;
125    tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
126(p6) br.sptk kvm_alt_itlb_miss
127    mov r19 = 1
128    br.sptk kvm_itlb_miss_dispatch
129    KVM_FAULT(1);
130END(kvm_itlb_miss)
131
132    .org kvm_ia64_ivt+0x0800
133//////////////////////////////////////////////////////////////////
134// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
135ENTRY(kvm_dtlb_miss)
136    mov r31 = pr
137    mov r29=cr.ipsr;
138    ;;
139    tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
140(p6) br.sptk kvm_alt_dtlb_miss
141    br.sptk kvm_dtlb_miss_dispatch
142END(kvm_dtlb_miss)
143
144     .org kvm_ia64_ivt+0x0c00
145////////////////////////////////////////////////////////////////////
146// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
147ENTRY(kvm_alt_itlb_miss)
148    mov r16=cr.ifa // get address that caused the TLB miss
149    ;;
150    movl r17=PAGE_KERNEL
151    mov r24=cr.ipsr
152    movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
153    ;;
154    and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
155    ;;
156    or r19=r17,r19 // insert PTE control bits into r19
157    ;;
158    movl r20=IA64_GRANULE_SHIFT<<2
159    ;;
160    mov cr.itir=r20
161    ;;
162    itc.i r19 // insert the TLB entry
163    mov pr=r31,-1
164    rfi
165END(kvm_alt_itlb_miss)
166
167    .org kvm_ia64_ivt+0x1000
168/////////////////////////////////////////////////////////////////////
169// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
170ENTRY(kvm_alt_dtlb_miss)
171    mov r16=cr.ifa // get address that caused the TLB miss
172    ;;
173    movl r17=PAGE_KERNEL
174    movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
175    mov r24=cr.ipsr
176    ;;
177    and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
178    ;;
179    or r19=r19,r17 // insert PTE control bits into r19
180    ;;
181    movl r20=IA64_GRANULE_SHIFT<<2
182    ;;
183    mov cr.itir=r20
184    ;;
185    itc.d r19 // insert the TLB entry
186    mov pr=r31,-1
187    rfi
188END(kvm_alt_dtlb_miss)
189
190    .org kvm_ia64_ivt+0x1400
191//////////////////////////////////////////////////////////////////////
192// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
193ENTRY(kvm_nested_dtlb_miss)
194    KVM_FAULT(5)
195END(kvm_nested_dtlb_miss)
196
197    .org kvm_ia64_ivt+0x1800
198/////////////////////////////////////////////////////////////////////
199// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
200ENTRY(kvm_ikey_miss)
201    KVM_REFLECT(6)
202END(kvm_ikey_miss)
203
204    .org kvm_ia64_ivt+0x1c00
205/////////////////////////////////////////////////////////////////////
206// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
207ENTRY(kvm_dkey_miss)
208    KVM_REFLECT(7)
209END(kvm_dkey_miss)
210
211    .org kvm_ia64_ivt+0x2000
212////////////////////////////////////////////////////////////////////
213// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
214ENTRY(kvm_dirty_bit)
215    KVM_REFLECT(8)
216END(kvm_dirty_bit)
217
218    .org kvm_ia64_ivt+0x2400
219////////////////////////////////////////////////////////////////////
220// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
221ENTRY(kvm_iaccess_bit)
222    KVM_REFLECT(9)
223END(kvm_iaccess_bit)
224
225    .org kvm_ia64_ivt+0x2800
226///////////////////////////////////////////////////////////////////
227// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
228ENTRY(kvm_daccess_bit)
229    KVM_REFLECT(10)
230END(kvm_daccess_bit)
231
232    .org kvm_ia64_ivt+0x2c00
233/////////////////////////////////////////////////////////////////
234// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
235ENTRY(kvm_break_fault)
236    mov r31=pr
237    mov r19=11
238    mov r29=cr.ipsr
239    ;;
240    KVM_SAVE_MIN_WITH_COVER_R19
241    ;;
242    alloc r14=ar.pfs,0,0,4,0 //(must be first in insn group!)
243    mov out0=cr.ifa
244    mov out2=cr.isr // FIXME: pity to make this slow access twice
245    mov out3=cr.iim // FIXME: pity to make this slow access twice
246    adds r3=8,r2 // set up second base pointer
247    ;;
248    ssm psr.ic
249    ;;
250    srlz.i // guarantee that interruption collection is on
251    ;;
252    (p15)ssm psr.i // restore psr.i
253    addl r14=@gprel(ia64_leave_hypervisor),gp
254    ;;
255    KVM_SAVE_REST
256    mov rp=r14
257    ;;
258    adds out1=16,sp
259    br.call.sptk.many b6=kvm_ia64_handle_break
260    ;;
261END(kvm_break_fault)
262
263    .org kvm_ia64_ivt+0x3000
264/////////////////////////////////////////////////////////////////
265// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
266ENTRY(kvm_interrupt)
267    mov r31=pr // prepare to save predicates
268    mov r19=12
269    mov r29=cr.ipsr
270    ;;
271    tbit.z p6,p7=r29,IA64_PSR_VM_BIT
272    tbit.z p0,p15=r29,IA64_PSR_I_BIT
273    ;;
274(p7) br.sptk kvm_dispatch_interrupt
275    ;;
276    mov r27=ar.rsc /* M */
277    mov r20=r1 /* A */
278    mov r25=ar.unat /* M */
279    mov r26=ar.pfs /* I */
280    mov r28=cr.iip /* M */
281    cover /* B (or nothing) */
282    ;;
283    mov r1=sp
284    ;;
285    invala /* M */
286    mov r30=cr.ifs
287    ;;
288    addl r1=-VMM_PT_REGS_SIZE,r1
289    ;;
290    adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */
291    adds r16=PT(CR_IPSR),r1
292    ;;
293    lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES
294    st8 [r16]=r29 /* save cr.ipsr */
295    ;;
296    lfetch.fault.excl.nt1 [r17]
297    mov r29=b0
298    ;;
299    adds r16=PT(R8),r1 /* initialize first base pointer */
300    adds r17=PT(R9),r1 /* initialize second base pointer */
301    mov r18=r0 /* make sure r18 isn't NaT */
302    ;;
303.mem.offset 0,0; st8.spill [r16]=r8,16
304.mem.offset 8,0; st8.spill [r17]=r9,16
305        ;;
306.mem.offset 0,0; st8.spill [r16]=r10,24
307.mem.offset 8,0; st8.spill [r17]=r11,24
308        ;;
309    st8 [r16]=r28,16 /* save cr.iip */
310    st8 [r17]=r30,16 /* save cr.ifs */
311    mov r8=ar.fpsr /* M */
312    mov r9=ar.csd
313    mov r10=ar.ssd
314    movl r11=FPSR_DEFAULT /* L-unit */
315    ;;
316    st8 [r16]=r25,16 /* save ar.unat */
317    st8 [r17]=r26,16 /* save ar.pfs */
318    shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */
319    ;;
320    st8 [r16]=r27,16 /* save ar.rsc */
321    adds r17=16,r17 /* skip over ar_rnat field */
322    ;;
323    st8 [r17]=r31,16 /* save predicates */
324    adds r16=16,r16 /* skip over ar_bspstore field */
325    ;;
326    st8 [r16]=r29,16 /* save b0 */
327    st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */
328    ;;
329.mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */
330.mem.offset 8,0; st8.spill [r17]=r12,16
331    adds r12=-16,r1
332    /* switch to kernel memory stack (with 16 bytes of scratch) */
333    ;;
334.mem.offset 0,0; st8.spill [r16]=r13,16
335.mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */
336    ;;
337.mem.offset 0,0; st8.spill [r16]=r15,16
338.mem.offset 8,0; st8.spill [r17]=r14,16
339    dep r14=-1,r0,60,4
340    ;;
341.mem.offset 0,0; st8.spill [r16]=r2,16
342.mem.offset 8,0; st8.spill [r17]=r3,16
343    adds r2=VMM_PT_REGS_R16_OFFSET,r1
344    adds r14 = VMM_VCPU_GP_OFFSET,r13
345    ;;
346    mov r8=ar.ccv
347    ld8 r14 = [r14]
348    ;;
349    mov r1=r14 /* establish kernel global pointer */
350    ;; \
351    bsw.1
352    ;;
353    alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
354    mov out0=r13
355    ;;
356    ssm psr.ic
357    ;;
358    srlz.i
359    ;;
360    //(p15) ssm psr.i
361    adds r3=8,r2 // set up second base pointer for SAVE_REST
362    srlz.i // ensure everybody knows psr.ic is back on
363    ;;
364.mem.offset 0,0; st8.spill [r2]=r16,16
365.mem.offset 8,0; st8.spill [r3]=r17,16
366    ;;
367.mem.offset 0,0; st8.spill [r2]=r18,16
368.mem.offset 8,0; st8.spill [r3]=r19,16
369    ;;
370.mem.offset 0,0; st8.spill [r2]=r20,16
371.mem.offset 8,0; st8.spill [r3]=r21,16
372    mov r18=b6
373    ;;
374.mem.offset 0,0; st8.spill [r2]=r22,16
375.mem.offset 8,0; st8.spill [r3]=r23,16
376    mov r19=b7
377    ;;
378.mem.offset 0,0; st8.spill [r2]=r24,16
379.mem.offset 8,0; st8.spill [r3]=r25,16
380    ;;
381.mem.offset 0,0; st8.spill [r2]=r26,16
382.mem.offset 8,0; st8.spill [r3]=r27,16
383    ;;
384.mem.offset 0,0; st8.spill [r2]=r28,16
385.mem.offset 8,0; st8.spill [r3]=r29,16
386    ;;
387.mem.offset 0,0; st8.spill [r2]=r30,16
388.mem.offset 8,0; st8.spill [r3]=r31,32
389    ;;
390    mov ar.fpsr=r11 /* M-unit */
391    st8 [r2]=r8,8 /* ar.ccv */
392    adds r24=PT(B6)-PT(F7),r3
393    ;;
394    stf.spill [r2]=f6,32
395    stf.spill [r3]=f7,32
396    ;;
397    stf.spill [r2]=f8,32
398    stf.spill [r3]=f9,32
399    ;;
400    stf.spill [r2]=f10
401    stf.spill [r3]=f11
402    adds r25=PT(B7)-PT(F11),r3
403    ;;
404    st8 [r24]=r18,16 /* b6 */
405    st8 [r25]=r19,16 /* b7 */
406    ;;
407    st8 [r24]=r9 /* ar.csd */
408    st8 [r25]=r10 /* ar.ssd */
409    ;;
410    srlz.d // make sure we see the effect of cr.ivr
411    addl r14=@gprel(ia64_leave_nested),gp
412    ;;
413    mov rp=r14
414    br.call.sptk.many b6=kvm_ia64_handle_irq
415    ;;
416END(kvm_interrupt)
417
418    .global kvm_dispatch_vexirq
419    .org kvm_ia64_ivt+0x3400
420//////////////////////////////////////////////////////////////////////
421// 0x3400 Entry 13 (size 64 bundles) Reserved
422ENTRY(kvm_virtual_exirq)
423    mov r31=pr
424    mov r19=13
425    mov r30 =r0
426    ;;
427kvm_dispatch_vexirq:
428    cmp.eq p6,p0 = 1,r30
429    ;;
430(p6) add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21
431    ;;
432(p6) ld8 r1 = [r29]
433    ;;
434    KVM_SAVE_MIN_WITH_COVER_R19
435    alloc r14=ar.pfs,0,0,1,0
436    mov out0=r13
437
438    ssm psr.ic
439    ;;
440    srlz.i // guarantee that interruption collection is on
441    ;;
442    (p15) ssm psr.i // restore psr.i
443    adds r3=8,r2 // set up second base pointer
444    ;;
445    KVM_SAVE_REST
446    addl r14=@gprel(ia64_leave_hypervisor),gp
447    ;;
448    mov rp=r14
449    br.call.sptk.many b6=kvm_vexirq
450END(kvm_virtual_exirq)
451
452    .org kvm_ia64_ivt+0x3800
453/////////////////////////////////////////////////////////////////////
454// 0x3800 Entry 14 (size 64 bundles) Reserved
455    KVM_FAULT(14)
456    // this code segment is from 2.6.16.13
457
458    .org kvm_ia64_ivt+0x3c00
459///////////////////////////////////////////////////////////////////////
460// 0x3c00 Entry 15 (size 64 bundles) Reserved
461    KVM_FAULT(15)
462
463    .org kvm_ia64_ivt+0x4000
464///////////////////////////////////////////////////////////////////////
465// 0x4000 Entry 16 (size 64 bundles) Reserved
466    KVM_FAULT(16)
467
468    .org kvm_ia64_ivt+0x4400
469//////////////////////////////////////////////////////////////////////
470// 0x4400 Entry 17 (size 64 bundles) Reserved
471    KVM_FAULT(17)
472
473    .org kvm_ia64_ivt+0x4800
474//////////////////////////////////////////////////////////////////////
475// 0x4800 Entry 18 (size 64 bundles) Reserved
476    KVM_FAULT(18)
477
478    .org kvm_ia64_ivt+0x4c00
479//////////////////////////////////////////////////////////////////////
480// 0x4c00 Entry 19 (size 64 bundles) Reserved
481    KVM_FAULT(19)
482
483    .org kvm_ia64_ivt+0x5000
484//////////////////////////////////////////////////////////////////////
485// 0x5000 Entry 20 (size 16 bundles) Page Not Present
486ENTRY(kvm_page_not_present)
487    KVM_REFLECT(20)
488END(kvm_page_not_present)
489
490    .org kvm_ia64_ivt+0x5100
491///////////////////////////////////////////////////////////////////////
492// 0x5100 Entry 21 (size 16 bundles) Key Permission vector
493ENTRY(kvm_key_permission)
494    KVM_REFLECT(21)
495END(kvm_key_permission)
496
497    .org kvm_ia64_ivt+0x5200
498//////////////////////////////////////////////////////////////////////
499// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
500ENTRY(kvm_iaccess_rights)
501    KVM_REFLECT(22)
502END(kvm_iaccess_rights)
503
504    .org kvm_ia64_ivt+0x5300
505//////////////////////////////////////////////////////////////////////
506// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
507ENTRY(kvm_daccess_rights)
508    KVM_REFLECT(23)
509END(kvm_daccess_rights)
510
511    .org kvm_ia64_ivt+0x5400
512/////////////////////////////////////////////////////////////////////
513// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
514ENTRY(kvm_general_exception)
515    KVM_REFLECT(24)
516    KVM_FAULT(24)
517END(kvm_general_exception)
518
519    .org kvm_ia64_ivt+0x5500
520//////////////////////////////////////////////////////////////////////
521// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
522ENTRY(kvm_disabled_fp_reg)
523    KVM_REFLECT(25)
524END(kvm_disabled_fp_reg)
525
526    .org kvm_ia64_ivt+0x5600
527////////////////////////////////////////////////////////////////////
528// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
529ENTRY(kvm_nat_consumption)
530    KVM_REFLECT(26)
531END(kvm_nat_consumption)
532
533    .org kvm_ia64_ivt+0x5700
534/////////////////////////////////////////////////////////////////////
535// 0x5700 Entry 27 (size 16 bundles) Speculation (40)
536ENTRY(kvm_speculation_vector)
537    KVM_REFLECT(27)
538END(kvm_speculation_vector)
539
540    .org kvm_ia64_ivt+0x5800
541/////////////////////////////////////////////////////////////////////
542// 0x5800 Entry 28 (size 16 bundles) Reserved
543    KVM_FAULT(28)
544
545    .org kvm_ia64_ivt+0x5900
546///////////////////////////////////////////////////////////////////
547// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
548ENTRY(kvm_debug_vector)
549    KVM_FAULT(29)
550END(kvm_debug_vector)
551
552    .org kvm_ia64_ivt+0x5a00
553///////////////////////////////////////////////////////////////
554// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
555ENTRY(kvm_unaligned_access)
556    KVM_REFLECT(30)
557END(kvm_unaligned_access)
558
559    .org kvm_ia64_ivt+0x5b00
560//////////////////////////////////////////////////////////////////////
561// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
562ENTRY(kvm_unsupported_data_reference)
563    KVM_REFLECT(31)
564END(kvm_unsupported_data_reference)
565
566    .org kvm_ia64_ivt+0x5c00
567////////////////////////////////////////////////////////////////////
568// 0x5c00 Entry 32 (size 16 bundles) Floating Point FAULT (65)
569ENTRY(kvm_floating_point_fault)
570    KVM_REFLECT(32)
571END(kvm_floating_point_fault)
572
573    .org kvm_ia64_ivt+0x5d00
574/////////////////////////////////////////////////////////////////////
575// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
576ENTRY(kvm_floating_point_trap)
577    KVM_REFLECT(33)
578END(kvm_floating_point_trap)
579
580    .org kvm_ia64_ivt+0x5e00
581//////////////////////////////////////////////////////////////////////
582// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
583ENTRY(kvm_lower_privilege_trap)
584    KVM_REFLECT(34)
585END(kvm_lower_privilege_trap)
586
587    .org kvm_ia64_ivt+0x5f00
588//////////////////////////////////////////////////////////////////////
589// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
590ENTRY(kvm_taken_branch_trap)
591    KVM_REFLECT(35)
592END(kvm_taken_branch_trap)
593
594    .org kvm_ia64_ivt+0x6000
595////////////////////////////////////////////////////////////////////
596// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
597ENTRY(kvm_single_step_trap)
598    KVM_REFLECT(36)
599END(kvm_single_step_trap)
600    .global kvm_virtualization_fault_back
601    .org kvm_ia64_ivt+0x6100
602/////////////////////////////////////////////////////////////////////
603// 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
604ENTRY(kvm_virtualization_fault)
605    mov r31=pr
606    adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
607    ;;
608    st8 [r16] = r1
609    adds r17 = VMM_VCPU_GP_OFFSET, r21
610    ;;
611    ld8 r1 = [r17]
612    cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24
613    cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24
614    cmp.eq p8,p0=EVENT_MOV_TO_RR,r24
615    cmp.eq p9,p0=EVENT_RSM,r24
616    cmp.eq p10,p0=EVENT_SSM,r24
617    cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24
618    cmp.eq p12,p0=EVENT_THASH,r24
619(p6) br.dptk.many kvm_asm_mov_from_ar
620(p7) br.dptk.many kvm_asm_mov_from_rr
621(p8) br.dptk.many kvm_asm_mov_to_rr
622(p9) br.dptk.many kvm_asm_rsm
623(p10) br.dptk.many kvm_asm_ssm
624(p11) br.dptk.many kvm_asm_mov_to_psr
625(p12) br.dptk.many kvm_asm_thash
626    ;;
627kvm_virtualization_fault_back:
628    adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
629    ;;
630    ld8 r1 = [r16]
631    ;;
632    mov r19=37
633    adds r16 = VMM_VCPU_CAUSE_OFFSET,r21
634    adds r17 = VMM_VCPU_OPCODE_OFFSET,r21
635    ;;
636    st8 [r16] = r24
637    st8 [r17] = r25
638    ;;
639    cmp.ne p6,p0=EVENT_RFI, r24
640(p6) br.sptk kvm_dispatch_virtualization_fault
641    ;;
642    adds r18=VMM_VPD_BASE_OFFSET,r21
643    ;;
644    ld8 r18=[r18]
645    ;;
646    adds r18=VMM_VPD_VIFS_OFFSET,r18
647    ;;
648    ld8 r18=[r18]
649    ;;
650    tbit.z p6,p0=r18,63
651(p6) br.sptk kvm_dispatch_virtualization_fault
652    ;;
653//if vifs.v=1 desert current register frame
654    alloc r18=ar.pfs,0,0,0,0
655    br.sptk kvm_dispatch_virtualization_fault
656END(kvm_virtualization_fault)
657
658    .org kvm_ia64_ivt+0x6200
659//////////////////////////////////////////////////////////////
660// 0x6200 Entry 38 (size 16 bundles) Reserved
661    KVM_FAULT(38)
662
663    .org kvm_ia64_ivt+0x6300
664/////////////////////////////////////////////////////////////////
665// 0x6300 Entry 39 (size 16 bundles) Reserved
666    KVM_FAULT(39)
667
668    .org kvm_ia64_ivt+0x6400
669/////////////////////////////////////////////////////////////////
670// 0x6400 Entry 40 (size 16 bundles) Reserved
671    KVM_FAULT(40)
672
673    .org kvm_ia64_ivt+0x6500
674//////////////////////////////////////////////////////////////////
675// 0x6500 Entry 41 (size 16 bundles) Reserved
676    KVM_FAULT(41)
677
678    .org kvm_ia64_ivt+0x6600
679//////////////////////////////////////////////////////////////////
680// 0x6600 Entry 42 (size 16 bundles) Reserved
681    KVM_FAULT(42)
682
683    .org kvm_ia64_ivt+0x6700
684//////////////////////////////////////////////////////////////////
685// 0x6700 Entry 43 (size 16 bundles) Reserved
686    KVM_FAULT(43)
687
688    .org kvm_ia64_ivt+0x6800
689//////////////////////////////////////////////////////////////////
690// 0x6800 Entry 44 (size 16 bundles) Reserved
691    KVM_FAULT(44)
692
693    .org kvm_ia64_ivt+0x6900
694///////////////////////////////////////////////////////////////////
695// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception
696//(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
697ENTRY(kvm_ia32_exception)
698    KVM_FAULT(45)
699END(kvm_ia32_exception)
700
701    .org kvm_ia64_ivt+0x6a00
702////////////////////////////////////////////////////////////////////
703// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
704ENTRY(kvm_ia32_intercept)
705    KVM_FAULT(47)
706END(kvm_ia32_intercept)
707
708    .org kvm_ia64_ivt+0x6c00
709/////////////////////////////////////////////////////////////////////
710// 0x6c00 Entry 48 (size 16 bundles) Reserved
711    KVM_FAULT(48)
712
713    .org kvm_ia64_ivt+0x6d00
714//////////////////////////////////////////////////////////////////////
715// 0x6d00 Entry 49 (size 16 bundles) Reserved
716    KVM_FAULT(49)
717
718    .org kvm_ia64_ivt+0x6e00
719//////////////////////////////////////////////////////////////////////
720// 0x6e00 Entry 50 (size 16 bundles) Reserved
721    KVM_FAULT(50)
722
723    .org kvm_ia64_ivt+0x6f00
724/////////////////////////////////////////////////////////////////////
725// 0x6f00 Entry 51 (size 16 bundles) Reserved
726    KVM_FAULT(52)
727
728    .org kvm_ia64_ivt+0x7100
729////////////////////////////////////////////////////////////////////
730// 0x7100 Entry 53 (size 16 bundles) Reserved
731    KVM_FAULT(53)
732
733    .org kvm_ia64_ivt+0x7200
734/////////////////////////////////////////////////////////////////////
735// 0x7200 Entry 54 (size 16 bundles) Reserved
736    KVM_FAULT(54)
737
738    .org kvm_ia64_ivt+0x7300
739////////////////////////////////////////////////////////////////////
740// 0x7300 Entry 55 (size 16 bundles) Reserved
741    KVM_FAULT(55)
742
743    .org kvm_ia64_ivt+0x7400
744////////////////////////////////////////////////////////////////////
745// 0x7400 Entry 56 (size 16 bundles) Reserved
746    KVM_FAULT(56)
747
748    .org kvm_ia64_ivt+0x7500
749/////////////////////////////////////////////////////////////////////
750// 0x7500 Entry 57 (size 16 bundles) Reserved
751    KVM_FAULT(57)
752
753    .org kvm_ia64_ivt+0x7600
754/////////////////////////////////////////////////////////////////////
755// 0x7600 Entry 58 (size 16 bundles) Reserved
756    KVM_FAULT(58)
757
758    .org kvm_ia64_ivt+0x7700
759////////////////////////////////////////////////////////////////////
760// 0x7700 Entry 59 (size 16 bundles) Reserved
761    KVM_FAULT(59)
762
763    .org kvm_ia64_ivt+0x7800
764////////////////////////////////////////////////////////////////////
765// 0x7800 Entry 60 (size 16 bundles) Reserved
766    KVM_FAULT(60)
767
768    .org kvm_ia64_ivt+0x7900
769/////////////////////////////////////////////////////////////////////
770// 0x7900 Entry 61 (size 16 bundles) Reserved
771    KVM_FAULT(61)
772
773    .org kvm_ia64_ivt+0x7a00
774/////////////////////////////////////////////////////////////////////
775// 0x7a00 Entry 62 (size 16 bundles) Reserved
776    KVM_FAULT(62)
777
778    .org kvm_ia64_ivt+0x7b00
779/////////////////////////////////////////////////////////////////////
780// 0x7b00 Entry 63 (size 16 bundles) Reserved
781    KVM_FAULT(63)
782
783    .org kvm_ia64_ivt+0x7c00
784////////////////////////////////////////////////////////////////////
785// 0x7c00 Entry 64 (size 16 bundles) Reserved
786    KVM_FAULT(64)
787
788    .org kvm_ia64_ivt+0x7d00
789/////////////////////////////////////////////////////////////////////
790// 0x7d00 Entry 65 (size 16 bundles) Reserved
791    KVM_FAULT(65)
792
793    .org kvm_ia64_ivt+0x7e00
794/////////////////////////////////////////////////////////////////////
795// 0x7e00 Entry 66 (size 16 bundles) Reserved
796    KVM_FAULT(66)
797
798    .org kvm_ia64_ivt+0x7f00
799////////////////////////////////////////////////////////////////////
800// 0x7f00 Entry 67 (size 16 bundles) Reserved
801    KVM_FAULT(67)
802
803    .org kvm_ia64_ivt+0x8000
804// There is no particular reason for this code to be here, other than that
805// there happens to be space here that would go unused otherwise. If this
806// fault ever gets "unreserved", simply moved the following code to a more
807// suitable spot...
808
809
810ENTRY(kvm_dtlb_miss_dispatch)
811    mov r19 = 2
812    KVM_SAVE_MIN_WITH_COVER_R19
813    alloc r14=ar.pfs,0,0,3,0
814    mov out0=cr.ifa
815    mov out1=r15
816    adds r3=8,r2 // set up second base pointer
817    ;;
818    ssm psr.ic
819    ;;
820    srlz.i // guarantee that interruption collection is on
821    ;;
822    (p15) ssm psr.i // restore psr.i
823    addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
824    ;;
825    KVM_SAVE_REST
826    KVM_SAVE_EXTRA
827    mov rp=r14
828    ;;
829    adds out2=16,r12
830    br.call.sptk.many b6=kvm_page_fault
831END(kvm_dtlb_miss_dispatch)
832
833ENTRY(kvm_itlb_miss_dispatch)
834
835    KVM_SAVE_MIN_WITH_COVER_R19
836    alloc r14=ar.pfs,0,0,3,0
837    mov out0=cr.ifa
838    mov out1=r15
839    adds r3=8,r2 // set up second base pointer
840    ;;
841    ssm psr.ic
842    ;;
843    srlz.i // guarantee that interruption collection is on
844    ;;
845    (p15) ssm psr.i // restore psr.i
846    addl r14=@gprel(ia64_leave_hypervisor),gp
847    ;;
848    KVM_SAVE_REST
849    mov rp=r14
850    ;;
851    adds out2=16,r12
852    br.call.sptk.many b6=kvm_page_fault
853END(kvm_itlb_miss_dispatch)
854
855ENTRY(kvm_dispatch_reflection)
856/*
857 * Input:
858 * psr.ic: off
859 * r19: intr type (offset into ivt, see ia64_int.h)
860 * r31: contains saved predicates (pr)
861 */
862    KVM_SAVE_MIN_WITH_COVER_R19
863    alloc r14=ar.pfs,0,0,5,0
864    mov out0=cr.ifa
865    mov out1=cr.isr
866    mov out2=cr.iim
867    mov out3=r15
868    adds r3=8,r2 // set up second base pointer
869    ;;
870    ssm psr.ic
871    ;;
872    srlz.i // guarantee that interruption collection is on
873    ;;
874    (p15) ssm psr.i // restore psr.i
875    addl r14=@gprel(ia64_leave_hypervisor),gp
876    ;;
877    KVM_SAVE_REST
878    mov rp=r14
879    ;;
880    adds out4=16,r12
881    br.call.sptk.many b6=reflect_interruption
882END(kvm_dispatch_reflection)
883
884ENTRY(kvm_dispatch_virtualization_fault)
885    adds r16 = VMM_VCPU_CAUSE_OFFSET,r21
886    adds r17 = VMM_VCPU_OPCODE_OFFSET,r21
887    ;;
888    st8 [r16] = r24
889    st8 [r17] = r25
890    ;;
891    KVM_SAVE_MIN_WITH_COVER_R19
892    ;;
893    alloc r14=ar.pfs,0,0,2,0 // (must be first in insn group!)
894    mov out0=r13 //vcpu
895    adds r3=8,r2 // set up second base pointer
896    ;;
897    ssm psr.ic
898    ;;
899    srlz.i // guarantee that interruption collection is on
900    ;;
901    (p15) ssm psr.i // restore psr.i
902    addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
903    ;;
904    KVM_SAVE_REST
905    KVM_SAVE_EXTRA
906    mov rp=r14
907    ;;
908    adds out1=16,sp //regs
909    br.call.sptk.many b6=kvm_emulate
910END(kvm_dispatch_virtualization_fault)
911
912
913ENTRY(kvm_dispatch_interrupt)
914    KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3
915    ;;
916    alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
917    adds r3=8,r2 // set up second base pointer for SAVE_REST
918    ;;
919    ssm psr.ic
920    ;;
921    srlz.i
922    ;;
923    (p15) ssm psr.i
924    addl r14=@gprel(ia64_leave_hypervisor),gp
925    ;;
926    KVM_SAVE_REST
927    mov rp=r14
928    ;;
929    mov out0=r13 // pass pointer to pt_regs as second arg
930    br.call.sptk.many b6=kvm_ia64_handle_irq
931END(kvm_dispatch_interrupt)
932
933GLOBAL_ENTRY(ia64_leave_nested)
934    rsm psr.i
935    ;;
936    adds r21=PT(PR)+16,r12
937    ;;
938    lfetch [r21],PT(CR_IPSR)-PT(PR)
939    adds r2=PT(B6)+16,r12
940    adds r3=PT(R16)+16,r12
941    ;;
942    lfetch [r21]
943    ld8 r28=[r2],8 // load b6
944    adds r29=PT(R24)+16,r12
945
946    ld8.fill r16=[r3]
947    adds r3=PT(AR_CSD)-PT(R16),r3
948    adds r30=PT(AR_CCV)+16,r12
949    ;;
950    ld8.fill r24=[r29]
951    ld8 r15=[r30] // load ar.ccv
952    ;;
953    ld8 r29=[r2],16 // load b7
954    ld8 r30=[r3],16 // load ar.csd
955    ;;
956    ld8 r31=[r2],16 // load ar.ssd
957    ld8.fill r8=[r3],16
958    ;;
959    ld8.fill r9=[r2],16
960    ld8.fill r10=[r3],PT(R17)-PT(R10)
961    ;;
962    ld8.fill r11=[r2],PT(R18)-PT(R11)
963    ld8.fill r17=[r3],16
964    ;;
965    ld8.fill r18=[r2],16
966    ld8.fill r19=[r3],16
967    ;;
968    ld8.fill r20=[r2],16
969    ld8.fill r21=[r3],16
970    mov ar.csd=r30
971    mov ar.ssd=r31
972    ;;
973    rsm psr.i | psr.ic
974    // initiate turning off of interrupt and interruption collection
975    invala // invalidate ALAT
976    ;;
977    srlz.i
978    ;;
979    ld8.fill r22=[r2],24
980    ld8.fill r23=[r3],24
981    mov b6=r28
982    ;;
983    ld8.fill r25=[r2],16
984    ld8.fill r26=[r3],16
985    mov b7=r29
986    ;;
987    ld8.fill r27=[r2],16
988    ld8.fill r28=[r3],16
989    ;;
990    ld8.fill r29=[r2],16
991    ld8.fill r30=[r3],24
992    ;;
993    ld8.fill r31=[r2],PT(F9)-PT(R31)
994    adds r3=PT(F10)-PT(F6),r3
995    ;;
996    ldf.fill f9=[r2],PT(F6)-PT(F9)
997    ldf.fill f10=[r3],PT(F8)-PT(F10)
998    ;;
999    ldf.fill f6=[r2],PT(F7)-PT(F6)
1000    ;;
1001    ldf.fill f7=[r2],PT(F11)-PT(F7)
1002    ldf.fill f8=[r3],32
1003    ;;
1004    srlz.i // ensure interruption collection is off
1005    mov ar.ccv=r15
1006    ;;
1007    bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
1008    ;;
1009    ldf.fill f11=[r2]
1010// mov r18=r13
1011// mov r21=r13
1012    adds r16=PT(CR_IPSR)+16,r12
1013    adds r17=PT(CR_IIP)+16,r12
1014    ;;
1015    ld8 r29=[r16],16 // load cr.ipsr
1016    ld8 r28=[r17],16 // load cr.iip
1017    ;;
1018    ld8 r30=[r16],16 // load cr.ifs
1019    ld8 r25=[r17],16 // load ar.unat
1020    ;;
1021    ld8 r26=[r16],16 // load ar.pfs
1022    ld8 r27=[r17],16 // load ar.rsc
1023    cmp.eq p9,p0=r0,r0
1024    // set p9 to indicate that we should restore cr.ifs
1025    ;;
1026    ld8 r24=[r16],16 // load ar.rnat (may be garbage)
1027    ld8 r23=[r17],16// load ar.bspstore (may be garbage)
1028    ;;
1029    ld8 r31=[r16],16 // load predicates
1030    ld8 r22=[r17],16 // load b0
1031    ;;
1032    ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
1033    ld8.fill r1=[r17],16 // load r1
1034    ;;
1035    ld8.fill r12=[r16],16
1036    ld8.fill r13=[r17],16
1037    ;;
1038    ld8 r20=[r16],16 // ar.fpsr
1039    ld8.fill r15=[r17],16
1040    ;;
1041    ld8.fill r14=[r16],16
1042    ld8.fill r2=[r17]
1043    ;;
1044    ld8.fill r3=[r16]
1045    ;;
1046    mov r16=ar.bsp // get existing backing store pointer
1047    ;;
1048    mov b0=r22
1049    mov ar.pfs=r26
1050    mov cr.ifs=r30
1051    mov cr.ipsr=r29
1052    mov ar.fpsr=r20
1053    mov cr.iip=r28
1054    ;;
1055    mov ar.rsc=r27
1056    mov ar.unat=r25
1057    mov pr=r31,-1
1058    rfi
1059END(ia64_leave_nested)
1060
1061GLOBAL_ENTRY(ia64_leave_hypervisor_prepare)
1062/*
1063 * work.need_resched etc. mustn't get changed
1064 *by this CPU before it returns to
1065 * user- or fsys-mode, hence we disable interrupts early on:
1066 */
1067    adds r2 = PT(R4)+16,r12
1068    adds r3 = PT(R5)+16,r12
1069    adds r8 = PT(EML_UNAT)+16,r12
1070    ;;
1071    ld8 r8 = [r8]
1072    ;;
1073    mov ar.unat=r8
1074    ;;
1075    ld8.fill r4=[r2],16 //load r4
1076    ld8.fill r5=[r3],16 //load r5
1077    ;;
1078    ld8.fill r6=[r2] //load r6
1079    ld8.fill r7=[r3] //load r7
1080    ;;
1081END(ia64_leave_hypervisor_prepare)
1082//fall through
1083GLOBAL_ENTRY(ia64_leave_hypervisor)
1084    rsm psr.i
1085    ;;
1086    br.call.sptk.many b0=leave_hypervisor_tail
1087    ;;
1088    adds r20=PT(PR)+16,r12
1089    adds r8=PT(EML_UNAT)+16,r12
1090    ;;
1091    ld8 r8=[r8]
1092    ;;
1093    mov ar.unat=r8
1094    ;;
1095    lfetch [r20],PT(CR_IPSR)-PT(PR)
1096    adds r2 = PT(B6)+16,r12
1097    adds r3 = PT(B7)+16,r12
1098    ;;
1099    lfetch [r20]
1100    ;;
1101    ld8 r24=[r2],16 /* B6 */
1102    ld8 r25=[r3],16 /* B7 */
1103    ;;
1104    ld8 r26=[r2],16 /* ar_csd */
1105    ld8 r27=[r3],16 /* ar_ssd */
1106    mov b6 = r24
1107    ;;
1108    ld8.fill r8=[r2],16
1109    ld8.fill r9=[r3],16
1110    mov b7 = r25
1111    ;;
1112    mov ar.csd = r26
1113    mov ar.ssd = r27
1114    ;;
1115    ld8.fill r10=[r2],PT(R15)-PT(R10)
1116    ld8.fill r11=[r3],PT(R14)-PT(R11)
1117    ;;
1118    ld8.fill r15=[r2],PT(R16)-PT(R15)
1119    ld8.fill r14=[r3],PT(R17)-PT(R14)
1120    ;;
1121    ld8.fill r16=[r2],16
1122    ld8.fill r17=[r3],16
1123    ;;
1124    ld8.fill r18=[r2],16
1125    ld8.fill r19=[r3],16
1126    ;;
1127    ld8.fill r20=[r2],16
1128    ld8.fill r21=[r3],16
1129    ;;
1130    ld8.fill r22=[r2],16
1131    ld8.fill r23=[r3],16
1132    ;;
1133    ld8.fill r24=[r2],16
1134    ld8.fill r25=[r3],16
1135    ;;
1136    ld8.fill r26=[r2],16
1137    ld8.fill r27=[r3],16
1138    ;;
1139    ld8.fill r28=[r2],16
1140    ld8.fill r29=[r3],16
1141    ;;
1142    ld8.fill r30=[r2],PT(F6)-PT(R30)
1143    ld8.fill r31=[r3],PT(F7)-PT(R31)
1144    ;;
1145    rsm psr.i | psr.ic
1146    // initiate turning off of interrupt and interruption collection
1147    invala // invalidate ALAT
1148    ;;
1149    srlz.i // ensure interruption collection is off
1150    ;;
1151    bsw.0
1152    ;;
1153    adds r16 = PT(CR_IPSR)+16,r12
1154    adds r17 = PT(CR_IIP)+16,r12
1155    mov r21=r13 // get current
1156    ;;
1157    ld8 r31=[r16],16 // load cr.ipsr
1158    ld8 r30=[r17],16 // load cr.iip
1159    ;;
1160    ld8 r29=[r16],16 // load cr.ifs
1161    ld8 r28=[r17],16 // load ar.unat
1162    ;;
1163    ld8 r27=[r16],16 // load ar.pfs
1164    ld8 r26=[r17],16 // load ar.rsc
1165    ;;
1166    ld8 r25=[r16],16 // load ar.rnat
1167    ld8 r24=[r17],16 // load ar.bspstore
1168    ;;
1169    ld8 r23=[r16],16 // load predicates
1170    ld8 r22=[r17],16 // load b0
1171    ;;
1172    ld8 r20=[r16],16 // load ar.rsc value for "loadrs"
1173    ld8.fill r1=[r17],16 //load r1
1174    ;;
1175    ld8.fill r12=[r16],16 //load r12
1176    ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13
1177    ;;
1178    ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr
1179    ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2
1180    ;;
1181    ld8.fill r3=[r16] //load r3
1182    ld8 r18=[r17] //load ar_ccv
1183    ;;
1184    mov ar.fpsr=r19
1185    mov ar.ccv=r18
1186    shr.u r18=r20,16
1187    ;;
1188kvm_rbs_switch:
1189    mov r19=96
1190
1191kvm_dont_preserve_current_frame:
1192/*
1193    * To prevent leaking bits between the hypervisor and guest domain,
1194    * we must clear the stacked registers in the "invalid" partition here.
1195    * 5 registers/cycle on McKinley).
1196    */
1197# define pRecurse p6
1198# define pReturn p7
1199# define Nregs 14
1200
1201    alloc loc0=ar.pfs,2,Nregs-2,2,0
1202    shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8))
1203    sub r19=r19,r18 // r19 = (physStackedSize + 8) - dirtySize
1204    ;;
1205    mov ar.rsc=r20 // load ar.rsc to be used for "loadrs"
1206    shladd in0=loc1,3,r19
1207    mov in1=0
1208    ;;
1209    TEXT_ALIGN(32)
1210kvm_rse_clear_invalid:
1211    alloc loc0=ar.pfs,2,Nregs-2,2,0
1212    cmp.lt pRecurse,p0=Nregs*8,in0
1213    // if more than Nregs regs left to clear, (re)curse
1214    add out0=-Nregs*8,in0
1215    add out1=1,in1 // increment recursion count
1216    mov loc1=0
1217    mov loc2=0
1218    ;;
1219    mov loc3=0
1220    mov loc4=0
1221    mov loc5=0
1222    mov loc6=0
1223    mov loc7=0
1224(pRecurse) br.call.dptk.few b0=kvm_rse_clear_invalid
1225    ;;
1226    mov loc8=0
1227    mov loc9=0
1228    cmp.ne pReturn,p0=r0,in1
1229    // if recursion count != 0, we need to do a br.ret
1230    mov loc10=0
1231    mov loc11=0
1232(pReturn) br.ret.dptk.many b0
1233
1234# undef pRecurse
1235# undef pReturn
1236
1237// loadrs has already been shifted
1238    alloc r16=ar.pfs,0,0,0,0 // drop current register frame
1239    ;;
1240    loadrs
1241    ;;
1242    mov ar.bspstore=r24
1243    ;;
1244    mov ar.unat=r28
1245    mov ar.rnat=r25
1246    mov ar.rsc=r26
1247    ;;
1248    mov cr.ipsr=r31
1249    mov cr.iip=r30
1250    mov cr.ifs=r29
1251    mov ar.pfs=r27
1252    adds r18=VMM_VPD_BASE_OFFSET,r21
1253    ;;
1254    ld8 r18=[r18] //vpd
1255    adds r17=VMM_VCPU_ISR_OFFSET,r21
1256    ;;
1257    ld8 r17=[r17]
1258    adds r19=VMM_VPD_VPSR_OFFSET,r18
1259    ;;
1260    ld8 r19=[r19] //vpsr
1261    mov r25=r18
1262    adds r16= VMM_VCPU_GP_OFFSET,r21
1263    ;;
1264    ld8 r16= [r16] // Put gp in r24
1265    movl r24=@gprel(ia64_vmm_entry) // calculate return address
1266    ;;
1267    add r24=r24,r16
1268    ;;
1269    br.sptk.many kvm_vps_sync_write // call the service
1270    ;;
1271END(ia64_leave_hypervisor)
1272// fall through
1273GLOBAL_ENTRY(ia64_vmm_entry)
1274/*
1275 * must be at bank 0
1276 * parameter:
1277 * r17:cr.isr
1278 * r18:vpd
1279 * r19:vpsr
1280 * r22:b0
1281 * r23:predicate
1282 */
1283    mov r24=r22
1284    mov r25=r18
1285    tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
1286(p1) br.cond.sptk.few kvm_vps_resume_normal
1287(p2) br.cond.sptk.many kvm_vps_resume_handler
1288    ;;
1289END(ia64_vmm_entry)
1290
1291/*
1292 * extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2,
1293 * u64 arg3, u64 arg4, u64 arg5,
1294 * u64 arg6, u64 arg7);
1295 *
1296 * XXX: The currently defined services use only 4 args at the max. The
1297 * rest are not consumed.
1298 */
1299GLOBAL_ENTRY(ia64_call_vsa)
1300    .regstk 4,4,0,0
1301
1302rpsave = loc0
1303pfssave = loc1
1304psrsave = loc2
1305entry = loc3
1306hostret = r24
1307
1308    alloc pfssave=ar.pfs,4,4,0,0
1309    mov rpsave=rp
1310    adds entry=VMM_VCPU_VSA_BASE_OFFSET, r13
1311    ;;
1312    ld8 entry=[entry]
13131: mov hostret=ip
1314    mov r25=in1 // copy arguments
1315    mov r26=in2
1316    mov r27=in3
1317    mov psrsave=psr
1318    ;;
1319    tbit.nz p6,p0=psrsave,14 // IA64_PSR_I
1320    tbit.nz p7,p0=psrsave,13 // IA64_PSR_IC
1321    ;;
1322    add hostret=2f-1b,hostret // calculate return address
1323    add entry=entry,in0
1324    ;;
1325    rsm psr.i | psr.ic
1326    ;;
1327    srlz.i
1328    mov b6=entry
1329    br.cond.sptk b6 // call the service
13302:
1331// Architectural sequence for enabling interrupts if necessary
1332(p7) ssm psr.ic
1333    ;;
1334(p7) srlz.i
1335    ;;
1336(p6) ssm psr.i
1337    ;;
1338    mov rp=rpsave
1339    mov ar.pfs=pfssave
1340    mov r8=r31
1341    ;;
1342    srlz.d
1343    br.ret.sptk rp
1344
1345END(ia64_call_vsa)
1346
1347#define INIT_BSPSTORE ((4<<30)-(12<<20)-0x100)
1348
1349GLOBAL_ENTRY(vmm_reset_entry)
1350    //set up ipsr, iip, vpd.vpsr, dcr
1351    // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1
1352    // For DCR: all bits 0
1353    bsw.0
1354    ;;
1355    mov r21 =r13
1356    adds r14=-VMM_PT_REGS_SIZE, r12
1357    ;;
1358    movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1
1359    movl r10=0x8000000000000000
1360    adds r16=PT(CR_IIP), r14
1361    adds r20=PT(R1), r14
1362    ;;
1363    rsm psr.ic | psr.i
1364    ;;
1365    srlz.i
1366    ;;
1367    mov ar.rsc = 0
1368    ;;
1369    flushrs
1370    ;;
1371    mov ar.bspstore = 0
1372    // clear BSPSTORE
1373    ;;
1374    mov cr.ipsr=r6
1375    mov cr.ifs=r10
1376    ld8 r4 = [r16] // Set init iip for first run.
1377    ld8 r1 = [r20]
1378    ;;
1379    mov cr.iip=r4
1380    adds r16=VMM_VPD_BASE_OFFSET,r13
1381    ;;
1382    ld8 r18=[r16]
1383    ;;
1384    adds r19=VMM_VPD_VPSR_OFFSET,r18
1385    ;;
1386    ld8 r19=[r19]
1387    mov r17=r0
1388    mov r22=r0
1389    mov r23=r0
1390    br.cond.sptk ia64_vmm_entry
1391    br.ret.sptk b0
1392END(vmm_reset_entry)
1393

Archive Download this file



interactive