@@ -81,6 +81,21 @@ static int vdso64_mremap(const struct vm_special_mapping *sm, struct vm_area_str
81
81
return vdso_mremap (sm , new_vma , & vdso64_end - & vdso64_start );
82
82
}
83
83
84
+ static void vdso_close (const struct vm_special_mapping * sm , struct vm_area_struct * vma )
85
+ {
86
+ struct mm_struct * mm = vma -> vm_mm ;
87
+
88
+ /*
89
+ * close() is called for munmap() but also for mremap(). In the mremap()
90
+ * case the vdso pointer has already been updated by the mremap() hook
91
+ * above, so it must not be set to NULL here.
92
+ */
93
+ if (vma -> vm_start != (unsigned long )mm -> context .vdso )
94
+ return ;
95
+
96
+ mm -> context .vdso = NULL ;
97
+ }
98
+
84
99
static vm_fault_t vvar_fault (const struct vm_special_mapping * sm ,
85
100
struct vm_area_struct * vma , struct vm_fault * vmf );
86
101
@@ -92,11 +107,13 @@ static struct vm_special_mapping vvar_spec __ro_after_init = {
92
107
static struct vm_special_mapping vdso32_spec __ro_after_init = {
93
108
.name = "[vdso]" ,
94
109
.mremap = vdso32_mremap ,
110
+ .close = vdso_close ,
95
111
};
96
112
97
113
static struct vm_special_mapping vdso64_spec __ro_after_init = {
98
114
.name = "[vdso]" ,
99
115
.mremap = vdso64_mremap ,
116
+ .close = vdso_close ,
100
117
};
101
118
102
119
#ifdef CONFIG_TIME_NS
@@ -197,13 +214,6 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
197
214
/* Add required alignment. */
198
215
vdso_base = ALIGN (vdso_base , VDSO_ALIGNMENT );
199
216
200
- /*
201
- * Put vDSO base into mm struct. We need to do this before calling
202
- * install_special_mapping or the perf counter mmap tracking code
203
- * will fail to recognise it as a vDSO.
204
- */
205
- mm -> context .vdso = (void __user * )vdso_base + vvar_size ;
206
-
207
217
vma = _install_special_mapping (mm , vdso_base , vvar_size ,
208
218
VM_READ | VM_MAYREAD | VM_IO |
209
219
VM_DONTDUMP | VM_PFNMAP , & vvar_spec );
@@ -223,10 +233,15 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
223
233
vma = _install_special_mapping (mm , vdso_base + vvar_size , vdso_size ,
224
234
VM_READ | VM_EXEC | VM_MAYREAD |
225
235
VM_MAYWRITE | VM_MAYEXEC , vdso_spec );
226
- if (IS_ERR (vma ))
236
+ if (IS_ERR (vma )) {
227
237
do_munmap (mm , vdso_base , vvar_size , NULL );
238
+ return PTR_ERR (vma );
239
+ }
240
+
241
+ // Now that the mappings are in place, set the mm VDSO pointer
242
+ mm -> context .vdso = (void __user * )vdso_base + vvar_size ;
228
243
229
- return PTR_ERR_OR_ZERO ( vma ) ;
244
+ return 0 ;
230
245
}
231
246
232
247
int arch_setup_additional_pages (struct linux_binprm * bprm , int uses_interp )
@@ -240,8 +255,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
240
255
return - EINTR ;
241
256
242
257
rc = __arch_setup_additional_pages (bprm , uses_interp );
243
- if (rc )
244
- mm -> context .vdso = NULL ;
245
258
246
259
mmap_write_unlock (mm );
247
260
return rc ;
0 commit comments