Load host MSRs automatically · intel/haxm@f800982 (original) (raw)
`@@ -1052,27 +1052,50 @@ static void load_host_msr(struct vcpu_t *vcpu)
`
1052
1052
`int i;
`
1053
1053
`struct hstate *hstate = &get_cpu_data(vcpu->cpu_id)->hstate;
`
1054
1054
`bool em64t_support = cpu_has_feature(X86_FEATURE_EM64T);
`
``
1055
`+
uint32_t count = 0;
`
1055
1056
``
1056
``
`-
for (i = 0; i < NR_HMSR; i++) {
`
``
1057
`+
// Load below MSR values manually on VM exits.
`
``
1058
+
``
1059
`+
// * IA32_STAR, IA32_LSTAR and IA32_SF_MASK
`
``
1060
`+
// Host will crash immediatelly on automatic load. See IA SDM Vol. 3C
`
``
1061
`+
// 31.10.4.3 (Handling the SYSCALL and SYSRET Instructions).
`
``
1062
`+
// * IA32_EFER and IA32_CSTAR
`
``
1063
`+
// See the same section as above.
`
``
1064
`+
// * IA32_KERNEL_GS_BASE
`
``
1065
`+
// See IA SDM Vol. 3C 31.10.4.4 (Handling the SWAPGS Instruction).
`
``
1066
`+
for (i = 0; i < NR_HMSR; ++i) {
`
1057
1067
`if (em64t_support || !is_emt64_msr(hstate->hmsr[i].entry)) {
`
1058
1068
`ia32_wrmsr(hstate->hmsr[i].entry, hstate->hmsr[i].value);
`
1059
1069
` }
`
1060
1070
` }
`
1061
1071
``
``
1072
`+
// * IA32_TSC_AUX
`
``
1073
`+
// BSOD will occur in host after automatic loading for a while, sometimes
`
``
1074
`+
// even after VM is shutdown.
`
1062
1075
`if (cpu_has_feature(X86_FEATURE_RDTSCP)) {
`
1063
1076
`ia32_wrmsr(IA32_TSC_AUX, hstate->tsc_aux);
`
1064
1077
` }
`
1065
1078
``
1066
1079
`if (!hax->apm_version)
`
1067
1080
`return;
`
1068
1081
``
``
1082
`+
// Load below MSR values automatically on VM exits.
`
``
1083
+
``
1084
`+
// TODO: It will be implemented to trap IA32_PERFEVTSELx MSRs and
`
``
1085
`+
// automatically load below host values only when IA32_PERFEVTSELx MSRs are
`
``
1086
`+
// changed during the guest runtime.
`
1069
1087
`// APM v1: restore IA32_PMCx and IA32_PERFEVTSELx
`
1070
``
`-
for (i = 0; i < (int)hax->apm_general_count; i++) {
`
1071
``
`-
uint32_t msr = (uint32_t)(IA32_PMC0 + i);
`
1072
``
`-
ia32_wrmsr(msr, hstate->apm_pmc_msrs[i]);
`
1073
``
`-
msr = (uint32_t)(IA32_PERFEVTSEL0 + i);
`
1074
``
`-
ia32_wrmsr(msr, hstate->apm_pes_msrs[i]);
`
``
1088
`+
for (i = 0; i < (int)hax->apm_general_count; ++i) {
`
``
1089
`+
hstate->hmsr_autoload[count].index = (uint32_t)(IA32_PMC0 + i);
`
``
1090
`+
hstate->hmsr_autoload[count++].data = hstate->apm_pmc_msrs[i];
`
``
1091
`+
}
`
``
1092
+
``
1093
`+
for (i = 0; i < (int)hax->apm_general_count; ++i) {
`
``
1094
`+
hstate->hmsr_autoload[count].index = (uint32_t)(IA32_PERFEVTSEL0 + i);
`
``
1095
`+
hstate->hmsr_autoload[count++].data = hstate->apm_pes_msrs[i];
`
1075
1096
` }
`
``
1097
+
``
1098
`+
vmwrite(vcpu, VMX_EXIT_MSR_LOAD_COUNT, count);
`
1076
1099
`}
`
1077
1100
``
1078
1101
`static inline bool is_host_debug_enabled(struct vcpu_t *vcpu)
`
`@@ -1513,7 +1536,8 @@ static void fill_common_vmcs(struct vcpu_t *vcpu)
`
1513
1536
`vmwrite(vcpu, VMX_EXIT_MSR_STORE_ADDRESS, 0);
`
1514
1537
``
1515
1538
`vmwrite(vcpu, VMX_EXIT_MSR_LOAD_COUNT, 0);
`
1516
``
`-
vmwrite(vcpu, VMX_EXIT_MSR_LOAD_ADDRESS, 0);
`
``
1539
`+
vmwrite(vcpu, VMX_EXIT_MSR_LOAD_ADDRESS,
`
``
1540
`+
(uint64_t)hax_pa(cpu_data->hstate.hmsr_autoload));
`
1517
1541
``
1518
1542
`vmwrite(vcpu, VMX_ENTRY_INTERRUPT_INFO, 0);
`
1519
1543
`// vmwrite(NULL, VMX_ENTRY_EXCEPTION_ERROR_CODE, 0);
`