@@ -529,6 +529,12 @@ SYSCTL_INT(_vm_pmap, OID_AUTO, pcid_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
529
529
int invpcid_works = 0 ;
530
530
SYSCTL_INT (_vm_pmap , OID_AUTO , invpcid_works , CTLFLAG_RD , & invpcid_works , 0 ,
531
531
"Is the invpcid instruction available ?" );
532
+ int pmap_pcid_invlpg_workaround = 0 ;
533
+ SYSCTL_INT (_vm_pmap , OID_AUTO , pcid_invlpg_workaround ,
534
+ CTLFLAG_RDTUN | CTLFLAG_NOFETCH ,
535
+ & pmap_pcid_invlpg_workaround , 0 ,
536
+ "Enable small core PCID/INVLPG workaround" );
537
+ int pmap_pcid_invlpg_workaround_uena = 1 ;
532
538
533
539
int __read_frequently pti = 0 ;
534
540
SYSCTL_INT (_vm_pmap , OID_AUTO , pti , CTLFLAG_RDTUN | CTLFLAG_NOFETCH ,
@@ -2560,6 +2566,9 @@ pmap_init(void)
2560
2566
VM_PAGE_TO_PHYS (m );
2561
2567
}
2562
2568
}
2569
+
2570
+ TUNABLE_INT_FETCH ("vm.pmap.pcid_invlpg_workaround" ,
2571
+ & pmap_pcid_invlpg_workaround_uena );
2563
2572
}
2564
2573
2565
2574
SYSCTL_UINT (_vm_pmap , OID_AUTO , large_map_pml4_entries ,
@@ -2791,7 +2800,7 @@ pmap_update_pde_invalidate(pmap_t pmap, vm_offset_t va, pd_entry_t newpde)
2791
2800
2792
2801
if ((newpde & PG_PS ) == 0 )
2793
2802
/* Demotion: flush a specific 2MB page mapping. */
2794
- invlpg ( va );
2803
+ pmap_invlpg ( pmap , va );
2795
2804
else if ((newpde & PG_G ) == 0 )
2796
2805
/*
2797
2806
* Promotion: flush every 4KB page mapping from the TLB
@@ -3130,7 +3139,7 @@ pmap_invalidate_page_curcpu_cb(pmap_t pmap, vm_offset_t va,
3130
3139
vm_offset_t addr2 __unused )
3131
3140
{
3132
3141
if (pmap == kernel_pmap ) {
3133
- invlpg ( va );
3142
+ pmap_invlpg ( kernel_pmap , va );
3134
3143
} else if (pmap == PCPU_GET (curpmap )) {
3135
3144
invlpg (va );
3136
3145
pmap_invalidate_page_cb (pmap , va );
@@ -3221,8 +3230,14 @@ pmap_invalidate_range_curcpu_cb(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3221
3230
vm_offset_t addr ;
3222
3231
3223
3232
if (pmap == kernel_pmap ) {
3224
- for (addr = sva ; addr < eva ; addr += PAGE_SIZE )
3225
- invlpg (addr );
3233
+ if (PCPU_GET (pcid_invlpg_workaround )) {
3234
+ struct invpcid_descr d = { 0 };
3235
+
3236
+ invpcid (& d , INVPCID_CTXGLOB );
3237
+ } else {
3238
+ for (addr = sva ; addr < eva ; addr += PAGE_SIZE )
3239
+ invlpg (addr );
3240
+ }
3226
3241
} else if (pmap == PCPU_GET (curpmap )) {
3227
3242
for (addr = sva ; addr < eva ; addr += PAGE_SIZE )
3228
3243
invlpg (addr );
@@ -3760,7 +3775,7 @@ pmap_flush_cache_phys_range(vm_paddr_t spa, vm_paddr_t epa, vm_memattr_t mattr)
3760
3775
for (; spa < epa ; spa += PAGE_SIZE ) {
3761
3776
sched_pin ();
3762
3777
pte_store (pte , spa | pte_bits );
3763
- invlpg ( vaddr );
3778
+ pmap_invlpg ( kernel_pmap , vaddr );
3764
3779
/* XXXKIB atomic inside flush_cache_range are excessive */
3765
3780
pmap_flush_cache_range (vaddr , vaddr + PAGE_SIZE );
3766
3781
sched_unpin ();
@@ -7668,7 +7683,7 @@ pmap_kenter_temporary(vm_paddr_t pa, int i)
7668
7683
7669
7684
va = (vm_offset_t )crashdumpmap + (i * PAGE_SIZE );
7670
7685
pmap_kenter (va , pa );
7671
- invlpg ( va );
7686
+ pmap_invlpg ( kernel_pmap , va );
7672
7687
return ((void * )crashdumpmap );
7673
7688
}
7674
7689
@@ -10371,7 +10386,7 @@ pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
10371
10386
page [i ]-> md .pat_mode , 0 );
10372
10387
pte_store (pte , paddr | X86_PG_RW | X86_PG_V |
10373
10388
cache_bits );
10374
- invlpg ( vaddr [i ]);
10389
+ pmap_invlpg ( kernel_pmap , vaddr [i ]);
10375
10390
}
10376
10391
}
10377
10392
}
@@ -10420,7 +10435,14 @@ pmap_quick_remove_page(vm_offset_t addr)
10420
10435
if (addr != qframe )
10421
10436
return ;
10422
10437
pte_store (vtopte (qframe ), 0 );
10438
+
10439
+ /*
10440
+ * Since qframe is exclusively mapped by
10441
+ * pmap_quick_enter_page() and that function doesn't set PG_G,
10442
+ * we can use INVLPG here.
10443
+ */
10423
10444
invlpg (qframe );
10445
+
10424
10446
mtx_unlock_spin (& qframe_mtx );
10425
10447
}
10426
10448
0 commit comments