@@ -174,6 +174,13 @@ static inline int pud_young(pud_t pud)
174
174
return pud_flags (pud ) & _PAGE_ACCESSED ;
175
175
}
176
176
177
+ static inline bool pud_shstk (pud_t pud )
178
+ {
179
+ return cpu_feature_enabled (X86_FEATURE_SHSTK ) &&
180
+ (pud_flags (pud ) & (_PAGE_RW | _PAGE_DIRTY | _PAGE_PSE )) ==
181
+ (_PAGE_DIRTY | _PAGE_PSE );
182
+ }
183
+
177
184
static inline int pte_write (pte_t pte )
178
185
{
179
186
/*
@@ -780,6 +787,12 @@ static inline pmd_t pmd_mkinvalid(pmd_t pmd)
780
787
__pgprot (pmd_flags (pmd ) & ~(_PAGE_PRESENT |_PAGE_PROTNONE )));
781
788
}
782
789
790
+ static inline pud_t pud_mkinvalid (pud_t pud )
791
+ {
792
+ return pfn_pud (pud_pfn (pud ),
793
+ __pgprot (pud_flags (pud ) & ~(_PAGE_PRESENT |_PAGE_PROTNONE )));
794
+ }
795
+
783
796
static inline u64 flip_protnone_guard (u64 oldval , u64 val , u64 mask );
784
797
785
798
static inline pte_t pte_modify (pte_t pte , pgprot_t newprot )
@@ -827,14 +840,8 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
827
840
pmd_result = __pmd (val );
828
841
829
842
/*
830
- * To avoid creating Write=0,Dirty=1 PMDs, pte_modify() needs to avoid:
831
- * 1. Marking Write=0 PMDs Dirty=1
832
- * 2. Marking Dirty=1 PMDs Write=0
833
- *
834
- * The first case cannot happen because the _PAGE_CHG_MASK will filter
835
- * out any Dirty bit passed in newprot. Handle the second case by
836
- * going through the mksaveddirty exercise. Only do this if the old
837
- * value was Write=1 to avoid doing this on Shadow Stack PTEs.
843
+ * Avoid creating shadow stack PMD by accident. See comment in
844
+ * pte_modify().
838
845
*/
839
846
if (oldval & _PAGE_RW )
840
847
pmd_result = pmd_mksaveddirty (pmd_result );
@@ -844,6 +851,29 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
844
851
return pmd_result ;
845
852
}
846
853
854
+ static inline pud_t pud_modify (pud_t pud , pgprot_t newprot )
855
+ {
856
+ pudval_t val = pud_val (pud ), oldval = val ;
857
+ pud_t pud_result ;
858
+
859
+ val &= _HPAGE_CHG_MASK ;
860
+ val |= check_pgprot (newprot ) & ~_HPAGE_CHG_MASK ;
861
+ val = flip_protnone_guard (oldval , val , PHYSICAL_PUD_PAGE_MASK );
862
+
863
+ pud_result = __pud (val );
864
+
865
+ /*
866
+ * Avoid creating shadow stack PUD by accident. See comment in
867
+ * pte_modify().
868
+ */
869
+ if (oldval & _PAGE_RW )
870
+ pud_result = pud_mksaveddirty (pud_result );
871
+ else
872
+ pud_result = pud_clear_saveddirty (pud_result );
873
+
874
+ return pud_result ;
875
+ }
876
+
847
877
/*
848
878
* mprotect needs to preserve PAT and encryption bits when updating
849
879
* vm_page_prot
@@ -1078,8 +1108,7 @@ static inline pmd_t *pud_pgtable(pud_t pud)
1078
1108
#define pud_leaf pud_leaf
1079
1109
static inline bool pud_leaf (pud_t pud )
1080
1110
{
1081
- return (pud_val (pud ) & (_PAGE_PSE | _PAGE_PRESENT )) ==
1082
- (_PAGE_PSE | _PAGE_PRESENT );
1111
+ return pud_val (pud ) & _PAGE_PSE ;
1083
1112
}
1084
1113
1085
1114
static inline int pud_bad (pud_t pud )
@@ -1383,10 +1412,28 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
1383
1412
}
1384
1413
#endif
1385
1414
1415
+ #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1416
+ static inline pud_t pudp_establish (struct vm_area_struct * vma ,
1417
+ unsigned long address , pud_t * pudp , pud_t pud )
1418
+ {
1419
+ page_table_check_pud_set (vma -> vm_mm , pudp , pud );
1420
+ if (IS_ENABLED (CONFIG_SMP )) {
1421
+ return xchg (pudp , pud );
1422
+ } else {
1423
+ pud_t old = * pudp ;
1424
+ WRITE_ONCE (* pudp , pud );
1425
+ return old ;
1426
+ }
1427
+ }
1428
+ #endif
1429
+
1386
1430
#define __HAVE_ARCH_PMDP_INVALIDATE_AD
1387
1431
extern pmd_t pmdp_invalidate_ad (struct vm_area_struct * vma ,
1388
1432
unsigned long address , pmd_t * pmdp );
1389
1433
1434
+ pud_t pudp_invalidate (struct vm_area_struct * vma , unsigned long address ,
1435
+ pud_t * pudp );
1436
+
1390
1437
/*
1391
1438
* Page table pages are page-aligned. The lower half of the top
1392
1439
* level is used for userspace and the top half for the kernel.
@@ -1668,6 +1715,9 @@ void arch_check_zapped_pte(struct vm_area_struct *vma, pte_t pte);
1668
1715
#define arch_check_zapped_pmd arch_check_zapped_pmd
1669
1716
void arch_check_zapped_pmd (struct vm_area_struct * vma , pmd_t pmd );
1670
1717
1718
+ #define arch_check_zapped_pud arch_check_zapped_pud
1719
+ void arch_check_zapped_pud (struct vm_area_struct * vma , pud_t pud );
1720
+
1671
1721
#ifdef CONFIG_XEN_PV
1672
1722
#define arch_has_hw_nonleaf_pmd_young arch_has_hw_nonleaf_pmd_young
1673
1723
static inline bool arch_has_hw_nonleaf_pmd_young (void )
0 commit comments