@@ -552,12 +552,10 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
552
552
static int kvm_map_page_fast (struct kvm_vcpu * vcpu , unsigned long gpa , bool write )
553
553
{
554
554
int ret = 0 ;
555
- kvm_pfn_t pfn = 0 ;
556
555
kvm_pte_t * ptep , changed , new ;
557
556
gfn_t gfn = gpa >> PAGE_SHIFT ;
558
557
struct kvm * kvm = vcpu -> kvm ;
559
558
struct kvm_memory_slot * slot ;
560
- struct page * page ;
561
559
562
560
spin_lock (& kvm -> mmu_lock );
563
561
@@ -570,8 +568,6 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
570
568
571
569
/* Track access to pages marked old */
572
570
new = kvm_pte_mkyoung (* ptep );
573
- /* call kvm_set_pfn_accessed() after unlock */
574
-
575
571
if (write && !kvm_pte_dirty (new )) {
576
572
if (!kvm_pte_write (new )) {
577
573
ret = - EFAULT ;
@@ -595,26 +591,14 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
595
591
}
596
592
597
593
changed = new ^ (* ptep );
598
- if (changed ) {
594
+ if (changed )
599
595
kvm_set_pte (ptep , new );
600
- pfn = kvm_pte_pfn (new );
601
- page = kvm_pfn_to_refcounted_page (pfn );
602
- if (page )
603
- get_page (page );
604
- }
596
+
605
597
spin_unlock (& kvm -> mmu_lock );
606
598
607
- if (changed ) {
608
- if (kvm_pte_young (changed ))
609
- kvm_set_pfn_accessed (pfn );
599
+ if (kvm_pte_dirty (changed ))
600
+ mark_page_dirty (kvm , gfn );
610
601
611
- if (kvm_pte_dirty (changed )) {
612
- mark_page_dirty (kvm , gfn );
613
- kvm_set_pfn_dirty (pfn );
614
- }
615
- if (page )
616
- put_page (page );
617
- }
618
602
return ret ;
619
603
out :
620
604
spin_unlock (& kvm -> mmu_lock );
@@ -796,6 +780,7 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
796
780
struct kvm * kvm = vcpu -> kvm ;
797
781
struct kvm_memory_slot * memslot ;
798
782
struct kvm_mmu_memory_cache * memcache = & vcpu -> arch .mmu_page_cache ;
783
+ struct page * page ;
799
784
800
785
/* Try the fast path to handle old / clean pages */
801
786
srcu_idx = srcu_read_lock (& kvm -> srcu );
@@ -823,7 +808,7 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
823
808
mmu_seq = kvm -> mmu_invalidate_seq ;
824
809
/*
825
810
* Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads in
826
- * gfn_to_pfn_prot () (which calls get_user_pages()), so that we don't
811
+ * kvm_faultin_pfn () (which calls get_user_pages()), so that we don't
827
812
* risk the page we get a reference to getting unmapped before we have a
828
813
* chance to grab the mmu_lock without mmu_invalidate_retry() noticing.
829
814
*
@@ -835,7 +820,7 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
835
820
smp_rmb ();
836
821
837
822
/* Slow path - ask KVM core whether we can access this GPA */
838
- pfn = gfn_to_pfn_prot ( kvm , gfn , write , & writeable );
823
+ pfn = kvm_faultin_pfn ( vcpu , gfn , write , & writeable , & page );
839
824
if (is_error_noslot_pfn (pfn )) {
840
825
err = - EFAULT ;
841
826
goto out ;
@@ -847,10 +832,10 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
847
832
/*
848
833
* This can happen when mappings are changed asynchronously, but
849
834
* also synchronously if a COW is triggered by
850
- * gfn_to_pfn_prot ().
835
+ * kvm_faultin_pfn ().
851
836
*/
852
837
spin_unlock (& kvm -> mmu_lock );
853
- kvm_release_pfn_clean ( pfn );
838
+ kvm_release_page_unused ( page );
854
839
if (retry_no > 100 ) {
855
840
retry_no = 0 ;
856
841
schedule ();
@@ -915,14 +900,13 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
915
900
else
916
901
++ kvm -> stat .pages ;
917
902
kvm_set_pte (ptep , new_pte );
903
+
904
+ kvm_release_faultin_page (kvm , page , false, writeable );
918
905
spin_unlock (& kvm -> mmu_lock );
919
906
920
- if (prot_bits & _PAGE_DIRTY ) {
907
+ if (prot_bits & _PAGE_DIRTY )
921
908
mark_page_dirty_in_slot (kvm , memslot , gfn );
922
- kvm_set_pfn_dirty (pfn );
923
- }
924
909
925
- kvm_release_pfn_clean (pfn );
926
910
out :
927
911
srcu_read_unlock (& kvm -> srcu , srcu_idx );
928
912
return err ;
0 commit comments