@@ -400,7 +400,10 @@ static inline unsigned long map_pcr_to_cap(unsigned long pcr)
400
400
cap = H_GUEST_CAP_POWER9 ;
401
401
break ;
402
402
case PCR_ARCH_31 :
403
- cap = H_GUEST_CAP_POWER10 ;
403
+ if (cpu_has_feature (CPU_FTR_P11_PVR ))
404
+ cap = H_GUEST_CAP_POWER11 ;
405
+ else
406
+ cap = H_GUEST_CAP_POWER10 ;
404
407
break ;
405
408
default :
406
409
break ;
@@ -415,7 +418,7 @@ static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
415
418
struct kvmppc_vcore * vc = vcpu -> arch .vcore ;
416
419
417
420
/* We can (emulate) our own architecture version and anything older */
418
- if (cpu_has_feature (CPU_FTR_ARCH_31 ))
421
+ if (cpu_has_feature (CPU_FTR_P11_PVR ) || cpu_has_feature ( CPU_FTR_ARCH_31 ))
419
422
host_pcr_bit = PCR_ARCH_31 ;
420
423
else if (cpu_has_feature (CPU_FTR_ARCH_300 ))
421
424
host_pcr_bit = PCR_ARCH_300 ;
@@ -2060,36 +2063,9 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
2060
2063
fallthrough ; /* go to facility unavailable handler */
2061
2064
#endif
2062
2065
2063
- case BOOK3S_INTERRUPT_H_FAC_UNAVAIL : {
2064
- u64 cause = vcpu -> arch .hfscr >> 56 ;
2065
-
2066
- /*
2067
- * Only pass HFU interrupts to the L1 if the facility is
2068
- * permitted but disabled by the L1's HFSCR, otherwise
2069
- * the interrupt does not make sense to the L1 so turn
2070
- * it into a HEAI.
2071
- */
2072
- if (!(vcpu -> arch .hfscr_permitted & (1UL << cause )) ||
2073
- (vcpu -> arch .nested_hfscr & (1UL << cause ))) {
2074
- ppc_inst_t pinst ;
2075
- vcpu -> arch .trap = BOOK3S_INTERRUPT_H_EMUL_ASSIST ;
2076
-
2077
- /*
2078
- * If the fetch failed, return to guest and
2079
- * try executing it again.
2080
- */
2081
- r = kvmppc_get_last_inst (vcpu , INST_GENERIC , & pinst );
2082
- vcpu -> arch .emul_inst = ppc_inst_val (pinst );
2083
- if (r != EMULATE_DONE )
2084
- r = RESUME_GUEST ;
2085
- else
2086
- r = RESUME_HOST ;
2087
- } else {
2088
- r = RESUME_HOST ;
2089
- }
2090
-
2066
+ case BOOK3S_INTERRUPT_H_FAC_UNAVAIL :
2067
+ r = RESUME_HOST ;
2091
2068
break ;
2092
- }
2093
2069
2094
2070
case BOOK3S_INTERRUPT_HV_RM_HARD :
2095
2071
vcpu -> arch .trap = 0 ;
@@ -4154,7 +4130,7 @@ void kvmhv_set_l2_counters_status(int cpu, bool status)
4154
4130
lppaca_of (cpu ).l2_counters_enable = 0 ;
4155
4131
}
4156
4132
4157
- int kmvhv_counters_tracepoint_regfunc (void )
4133
+ int kvmhv_counters_tracepoint_regfunc (void )
4158
4134
{
4159
4135
int cpu ;
4160
4136
@@ -4164,7 +4140,7 @@ int kmvhv_counters_tracepoint_regfunc(void)
4164
4140
return 0 ;
4165
4141
}
4166
4142
4167
- void kmvhv_counters_tracepoint_unregfunc (void )
4143
+ void kvmhv_counters_tracepoint_unregfunc (void )
4168
4144
{
4169
4145
int cpu ;
4170
4146
@@ -4309,6 +4285,15 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns
4309
4285
}
4310
4286
hvregs .hdec_expiry = time_limit ;
4311
4287
4288
+ /*
4289
+ * hvregs has the doorbell status, so zero it here which
4290
+ * enables us to receive doorbells when H_ENTER_NESTED is
4291
+ * in progress for this vCPU
4292
+ */
4293
+
4294
+ if (vcpu -> arch .doorbell_request )
4295
+ vcpu -> arch .doorbell_request = 0 ;
4296
+
4312
4297
/*
4313
4298
* When setting DEC, we must always deal with irq_work_raise
4314
4299
* via NMI vs setting DEC. The problem occurs right as we
@@ -4912,7 +4897,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
4912
4897
lpcr &= ~LPCR_MER ;
4913
4898
}
4914
4899
} else if (vcpu -> arch .pending_exceptions ||
4915
- vcpu -> arch .doorbell_request ||
4916
4900
xive_interrupt_pending (vcpu )) {
4917
4901
vcpu -> arch .ret = RESUME_HOST ;
4918
4902
goto out ;
0 commit comments