22
33.globl _start
44_start:
5- mrs x0 , mpidr_el1 // check core id , we only use one core.
5+ mrs x0 , mpidr_el1 // check core id , only one core is used .
66 mov x1 , # 0xc1000000
77 bic x0 , x0 , x1
88 cbz x0 , master
99 b hang
1010
1111master:
1212 ldr x0 , = 0x1000000
13- mov sp , x0 // set el3 sp
13+ mov sp , x0 // set el2 sp
1414 bl el2_main
1515
1616hang:
1717 b hang
18-
18+
1919.globl get_current_el
2020get_current_el:
2121 mrs x0 , currentel
2222 mov x1 , # 2
2323 lsr x0 , x0 , # 2
2424 ret
25-
25+
2626.globl el1_mmu_activate
2727el1_mmu_activate:
2828 ldr x0 , = 0x04cc
2929 msr mair_el1 , x0
3030 isb
3131
32- ldr x1 , = 0x02000000
32+ ldr x1 , = 0x01000000
3333 msr ttbr0_el1 , x1
3434 isb
3535
3636 mrs x2 , tcr_el1
3737 ldr x3 , = 0x70040ffbf
3838 bic x2 , x2 , x3
3939
40- ldr x3 , = 0x200803f18
40+ ldr x3 , = 0x2bf183f18
4141 orr x2 , x2 , x3
4242 msr tcr_el1 , x2
4343 isb
4444
4545 mrs x3 , sctlr_el1
4646 ldr x4 , = 0x80000
4747 bic x3 , x3 , x4
48-
48+
4949 ldr x4 , = 0x1005
5050 orr x3 , x3 , x4
5151 msr sctlr_el1 , x3
@@ -54,45 +54,68 @@ el1_mmu_activate:
5454
5555.globl jump_to_el1
5656jump_to_el1:
57- mrs x0, currentel // check if already in el1
58- cmp x0, # 4
59- beq 1f
57+ mrs x0, currentel // check if already in el1
58+ cmp x0, # 4
59+ beq 1f
6060
61- ldr x0, = 0xf00000
62- msr sp_el1, x0 // init the stack of el1
61+ ldr x0, = 0xf00000
62+ msr sp_el1, x0 // init the stack of el1
6363
6464 // disable coprocessor traps
65- mov x0, # 0x33ff
66- msr cptr_el2, x0 // disable coprocessor traps to el2
67- msr hstr_el2, xzr // disable coprocessor traps to el2
68- mov x0, # 3 << 20
69- msr cpacr_el1, x0 // enable fp/simd at el1
70-
71- // initialize hcr_el2
72- mov x0, #( 1 << 31 )
73- msr hcr_el2, x0 // set el1 to 64 bit
74- mov x0, # 0x0800
75- movk x0, # 0x30d0 , lsl # 16
76- msr sctlr_el1, x0
77-
78- // return to the el1_sp1 mode from el2
79- mov x0, # 0x3c5
80- msr spsr_el2, x0 // el1_sp0 | d | a | i | f
81- adr x0, 1f
82- msr elr_el2, x0
83- eret
84-
65+ mov x0, # 0x33ff
66+ msr cptr_el2, x0 // disable coprocessor traps to el2
67+ msr hstr_el2, xzr // disable coprocessor traps to el2
68+ mov x0, # 3 << 20
69+ msr cpacr_el1, x0 // enable fp/simd at el1
70+
71+ // initialize hcr_el2
72+ mov x0, #( 1 << 31 )
73+ msr hcr_el2, x0 // set el1 to 64 bit
74+ mov x0, # 0x0800
75+ movk x0, # 0x30d0 , lsl # 16
76+ msr sctlr_el1, x0
77+
78+ // return to the el1_sp1 mode from el2
79+ mov x0, # 0x5
80+ msr spsr_el2, x0 // el1_sp1 with DAIF = 0
81+ adr x0, 1f
82+ msr elr_el2, x0
83+ eret
84+
85851 :
86- mrs x0 , sctlr_el1
87- orr x0 , x0 , #( 1 << 12 )
88- msr sctlr_el1 , x0 // enable instruction cache
86+ mrs x0 , sctlr_el1
87+ orr x0 , x0 , #( 1 << 12 )
88+ msr sctlr_el1 , x0 // enable instruction cache
89+
90+ ldr x0 , =vectors
91+ msr vbar_el1 , x0 // init exception vector table
92+
8993 b main
90-
94+
95+ .globl jump_to_el0
96+ jump_to_el0:
97+ mov x1 , x0
98+ mrs x0 , currentel // check if in el2
99+ cmp x0 , # 8
100+ beq 2f
101+
102+ ldr x0 , = 0xe00000
103+ msr sp_el0 , x0 // init the stack of el0
104+
105+ // return to el0 from el1
106+ mov x0 , # 0x0
107+ msr spsr_el1 , x0 // el0 with DAIF = 0
108+ msr elr_el1 , x1
109+
110+ eret
111+
112+ 2 :
113+ ret
114+
91115.globl tlb_invalidate
92116tlb_invalidate:
93117 dsb ishst // ensure write has completed
94- ldr x0 , [ x0 ] // load va from x0
95- tlbi vmalle1 // invalidate tlb by va , all asid , el1.
118+ tlbi vmalle1is // invalidate tlb , all asid , el1.
96119 dsb ish // ensure completion of tlb invalidation
97120 isb // synchronize context and ensure th at no instructions
98121 // are fetched using the old translation
@@ -111,3 +134,23 @@ get32:
111134.globl dummy
112135dummy:
113136 ret
137+
138+ .globl load_ttbr1_context
139+ load_ttbr1_context:
140+ msr ttbr1_el1 , x0
141+ ret
142+
143+ .globl load_ttbr0_context
144+ load_ttbr0_context:
145+ msr ttbr0_el1 , x0
146+ ret
147+
148+ .globl get_sctlr_el1
149+ get_sctlr_el1:
150+ mrs x0 , sctlr_el1
151+ ret
152+
153+ .globl get_esr
154+ get_esr:
155+ mrs x0 , esr_el1
156+ ret
0 commit comments