@@ -119,23 +119,26 @@ void __init mmu_mapin_immr(void)
119
119
PAGE_KERNEL_NCG , MMU_PAGE_512K , true);
120
120
}
121
121
122
- static void mmu_mapin_ram_chunk (unsigned long offset , unsigned long top ,
123
- pgprot_t prot , bool new )
122
+ static int mmu_mapin_ram_chunk (unsigned long offset , unsigned long top ,
123
+ pgprot_t prot , bool new )
124
124
{
125
125
unsigned long v = PAGE_OFFSET + offset ;
126
126
unsigned long p = offset ;
127
+ int err = 0 ;
127
128
128
129
WARN_ON (!IS_ALIGNED (offset , SZ_512K ) || !IS_ALIGNED (top , SZ_512K ));
129
130
130
- for (; p < ALIGN (p , SZ_8M ) && p < top ; p += SZ_512K , v += SZ_512K )
131
- __early_map_kernel_hugepage (v , p , prot , MMU_PAGE_512K , new );
132
- for (; p < ALIGN_DOWN (top , SZ_8M ) && p < top ; p += SZ_8M , v += SZ_8M )
133
- __early_map_kernel_hugepage (v , p , prot , MMU_PAGE_8M , new );
134
- for (; p < ALIGN_DOWN (top , SZ_512K ) && p < top ; p += SZ_512K , v += SZ_512K )
135
- __early_map_kernel_hugepage (v , p , prot , MMU_PAGE_512K , new );
131
+ for (; p < ALIGN (p , SZ_8M ) && p < top && ! err ; p += SZ_512K , v += SZ_512K )
132
+ err = __early_map_kernel_hugepage (v , p , prot , MMU_PAGE_512K , new );
133
+ for (; p < ALIGN_DOWN (top , SZ_8M ) && p < top && ! err ; p += SZ_8M , v += SZ_8M )
134
+ err = __early_map_kernel_hugepage (v , p , prot , MMU_PAGE_8M , new );
135
+ for (; p < ALIGN_DOWN (top , SZ_512K ) && p < top && ! err ; p += SZ_512K , v += SZ_512K )
136
+ err = __early_map_kernel_hugepage (v , p , prot , MMU_PAGE_512K , new );
136
137
137
138
if (!new )
138
139
flush_tlb_kernel_range (PAGE_OFFSET + v , PAGE_OFFSET + top );
140
+
141
+ return err ;
139
142
}
140
143
141
144
unsigned long __init mmu_mapin_ram (unsigned long base , unsigned long top )
@@ -166,27 +169,33 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
166
169
return top ;
167
170
}
168
171
169
- void mmu_mark_initmem_nx (void )
172
+ int mmu_mark_initmem_nx (void )
170
173
{
171
174
unsigned long etext8 = ALIGN (__pa (_etext ), SZ_8M );
172
175
unsigned long sinittext = __pa (_sinittext );
173
176
unsigned long boundary = strict_kernel_rwx_enabled () ? sinittext : etext8 ;
174
177
unsigned long einittext8 = ALIGN (__pa (_einittext ), SZ_8M );
178
+ int err = 0 ;
175
179
176
180
if (!debug_pagealloc_enabled_or_kfence ())
177
- mmu_mapin_ram_chunk (boundary , einittext8 , PAGE_KERNEL , false);
181
+ err = mmu_mapin_ram_chunk (boundary , einittext8 , PAGE_KERNEL , false);
178
182
179
183
mmu_pin_tlb (block_mapped_ram , false);
184
+
185
+ return err ;
180
186
}
181
187
182
188
#ifdef CONFIG_STRICT_KERNEL_RWX
183
- void mmu_mark_rodata_ro (void )
189
+ int mmu_mark_rodata_ro (void )
184
190
{
185
191
unsigned long sinittext = __pa (_sinittext );
192
+ int err ;
186
193
187
- mmu_mapin_ram_chunk (0 , sinittext , PAGE_KERNEL_ROX , false);
194
+ err = mmu_mapin_ram_chunk (0 , sinittext , PAGE_KERNEL_ROX , false);
188
195
if (IS_ENABLED (CONFIG_PIN_TLB_DATA ))
189
196
mmu_pin_tlb (block_mapped_ram , true);
197
+
198
+ return err ;
190
199
}
191
200
#endif
192
201
0 commit comments