Skip to content

Commit 50f6c7d

Browse files
committed
Merge tag 'x86-urgent-2020-08-15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Ingo Molnar: "Misc fixes and small updates all around the place: - Fix mitigation state sysfs output - Fix an FPU xstate/sxave code assumption bug triggered by Architectural LBR support - Fix Lightning Mountain SoC TSC frequency enumeration bug - Fix kexec debug output - Fix kexec memory range assumption bug - Fix a boundary condition in the crash kernel code - Optimize porgatory.ro generation a bit - Enable ACRN guests to use X2APIC mode - Reduce a __text_poke() IRQs-off critical section for the benefit of PREEMPT_RT" * tag 'x86-urgent-2020-08-15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/alternatives: Acquire pte lock with interrupts enabled x86/bugs/multihit: Fix mitigation reporting when VMX is not in use x86/fpu/xstate: Fix an xstate size check warning with architectural LBRs x86/purgatory: Don't generate debug info for purgatory.ro x86/tsr: Fix tsc frequency enumeration bug on Lightning Mountain SoC kexec_file: Correctly output debugging information for the PT_LOAD ELF header kexec: Improve & fix crash_exclude_mem_range() to handle overlapping ranges x86/crash: Correct the address boundary of function parameters x86/acrn: Remove redundant chars from ACRN signature x86/acrn: Allow ACRN guest to use X2APIC mode
2 parents 1195d58 + a6d996c commit 50f6c7d

File tree

9 files changed

+88
-32
lines changed

9 files changed

+88
-32
lines changed

Documentation/admin-guide/hw-vuln/multihit.rst

+4
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,10 @@ The possible values in this file are:
8080
- The processor is not vulnerable.
8181
* - KVM: Mitigation: Split huge pages
8282
- Software changes mitigate this issue.
83+
* - KVM: Mitigation: VMX unsupported
84+
- KVM is not vulnerable because Virtual Machine Extensions (VMX) is not supported.
85+
* - KVM: Mitigation: VMX disabled
86+
- KVM is not vulnerable because Virtual Machine Extensions (VMX) is disabled.
8387
* - KVM: Vulnerable
8488
- The processor is vulnerable, but no mitigation enabled
8589

arch/x86/kernel/alternative.c

+3-3
Original file line numberDiff line numberDiff line change
@@ -875,8 +875,6 @@ static void *__text_poke(void *addr, const void *opcode, size_t len)
875875
*/
876876
BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
877877

878-
local_irq_save(flags);
879-
880878
/*
881879
* Map the page without the global bit, as TLB flushing is done with
882880
* flush_tlb_mm_range(), which is intended for non-global PTEs.
@@ -893,6 +891,8 @@ static void *__text_poke(void *addr, const void *opcode, size_t len)
893891
*/
894892
VM_BUG_ON(!ptep);
895893

894+
local_irq_save(flags);
895+
896896
pte = mk_pte(pages[0], pgprot);
897897
set_pte_at(poking_mm, poking_addr, ptep, pte);
898898

@@ -942,8 +942,8 @@ static void *__text_poke(void *addr, const void *opcode, size_t len)
942942
*/
943943
BUG_ON(memcmp(addr, opcode, len));
944944

945-
pte_unmap_unlock(ptep, ptl);
946945
local_irq_restore(flags);
946+
pte_unmap_unlock(ptep, ptl);
947947
return addr;
948948
}
949949

arch/x86/kernel/cpu/acrn.c

+4-8
Original file line numberDiff line numberDiff line change
@@ -11,14 +11,15 @@
1111

1212
#include <linux/interrupt.h>
1313
#include <asm/apic.h>
14+
#include <asm/cpufeatures.h>
1415
#include <asm/desc.h>
1516
#include <asm/hypervisor.h>
1617
#include <asm/idtentry.h>
1718
#include <asm/irq_regs.h>
1819

19-
static uint32_t __init acrn_detect(void)
20+
static u32 __init acrn_detect(void)
2021
{
21-
return hypervisor_cpuid_base("ACRNACRNACRN\0\0", 0);
22+
return hypervisor_cpuid_base("ACRNACRNACRN", 0);
2223
}
2324

2425
static void __init acrn_init_platform(void)
@@ -29,12 +30,7 @@ static void __init acrn_init_platform(void)
2930

3031
static bool acrn_x2apic_available(void)
3132
{
32-
/*
33-
* x2apic is not supported for now. Future enablement will have to check
34-
* X86_FEATURE_X2APIC to determine whether x2apic is supported in the
35-
* guest.
36-
*/
37-
return false;
33+
return boot_cpu_has(X86_FEATURE_X2APIC);
3834
}
3935

4036
static void (*acrn_intr_handler)(void);

arch/x86/kernel/cpu/bugs.c

+7-1
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131
#include <asm/intel-family.h>
3232
#include <asm/e820/api.h>
3333
#include <asm/hypervisor.h>
34+
#include <asm/tlbflush.h>
3435

3536
#include "cpu.h"
3637

@@ -1549,7 +1550,12 @@ static ssize_t l1tf_show_state(char *buf)
15491550

15501551
static ssize_t itlb_multihit_show_state(char *buf)
15511552
{
1552-
if (itlb_multihit_kvm_mitigation)
1553+
if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
1554+
!boot_cpu_has(X86_FEATURE_VMX))
1555+
return sprintf(buf, "KVM: Mitigation: VMX unsupported\n");
1556+
else if (!(cr4_read_shadow() & X86_CR4_VMXE))
1557+
return sprintf(buf, "KVM: Mitigation: VMX disabled\n");
1558+
else if (itlb_multihit_kvm_mitigation)
15531559
return sprintf(buf, "KVM: Mitigation: Split huge pages\n");
15541560
else
15551561
return sprintf(buf, "KVM: Vulnerable\n");

arch/x86/kernel/crash.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -230,7 +230,7 @@ static int elf_header_exclude_ranges(struct crash_mem *cmem)
230230
int ret = 0;
231231

232232
/* Exclude the low 1M because it is always reserved */
233-
ret = crash_exclude_mem_range(cmem, 0, 1<<20);
233+
ret = crash_exclude_mem_range(cmem, 0, (1<<20)-1);
234234
if (ret)
235235
return ret;
236236

arch/x86/kernel/fpu/xstate.c

+32-1
Original file line numberDiff line numberDiff line change
@@ -611,6 +611,10 @@ static void check_xstate_against_struct(int nr)
611611
* This essentially double-checks what the cpu told us about
612612
* how large the XSAVE buffer needs to be. We are recalculating
613613
* it to be safe.
614+
*
615+
* Dynamic XSAVE features allocate their own buffers and are not
616+
* covered by these checks. Only the size of the buffer for task->fpu
617+
* is checked here.
614618
*/
615619
static void do_extra_xstate_size_checks(void)
616620
{
@@ -673,6 +677,33 @@ static unsigned int __init get_xsaves_size(void)
673677
return ebx;
674678
}
675679

680+
/*
681+
* Get the total size of the enabled xstates without the dynamic supervisor
682+
* features.
683+
*/
684+
static unsigned int __init get_xsaves_size_no_dynamic(void)
685+
{
686+
u64 mask = xfeatures_mask_dynamic();
687+
unsigned int size;
688+
689+
if (!mask)
690+
return get_xsaves_size();
691+
692+
/* Disable dynamic features. */
693+
wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor());
694+
695+
/*
696+
* Ask the hardware what size is required of the buffer.
697+
* This is the size required for the task->fpu buffer.
698+
*/
699+
size = get_xsaves_size();
700+
701+
/* Re-enable dynamic features so XSAVES will work on them again. */
702+
wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | mask);
703+
704+
return size;
705+
}
706+
676707
static unsigned int __init get_xsave_size(void)
677708
{
678709
unsigned int eax, ebx, ecx, edx;
@@ -710,7 +741,7 @@ static int __init init_xstate_size(void)
710741
xsave_size = get_xsave_size();
711742

712743
if (boot_cpu_has(X86_FEATURE_XSAVES))
713-
possible_xstate_size = get_xsaves_size();
744+
possible_xstate_size = get_xsaves_size_no_dynamic();
714745
else
715746
possible_xstate_size = xsave_size;
716747

arch/x86/kernel/tsc_msr.c

+7-2
Original file line numberDiff line numberDiff line change
@@ -134,10 +134,15 @@ static const struct freq_desc freq_desc_ann = {
134134
.mask = 0x0f,
135135
};
136136

137-
/* 24 MHz crystal? : 24 * 13 / 4 = 78 MHz */
137+
/*
138+
* 24 MHz crystal? : 24 * 13 / 4 = 78 MHz
139+
* Frequency step for Lightning Mountain SoC is fixed to 78 MHz,
140+
* so all the frequency entries are 78000.
141+
*/
138142
static const struct freq_desc freq_desc_lgm = {
139143
.use_msr_plat = true,
140-
.freqs = { 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000 },
144+
.freqs = { 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000,
145+
78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000 },
141146
.mask = 0x0f,
142147
};
143148

arch/x86/purgatory/Makefile

+4-1
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ KCOV_INSTRUMENT := n
3232
# make up the standalone purgatory.ro
3333

3434
PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
35-
PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss
35+
PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss -g0
3636
PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING
3737
PURGATORY_CFLAGS += -fno-stack-protector
3838

@@ -64,6 +64,9 @@ CFLAGS_sha256.o += $(PURGATORY_CFLAGS)
6464
CFLAGS_REMOVE_string.o += $(PURGATORY_CFLAGS_REMOVE)
6565
CFLAGS_string.o += $(PURGATORY_CFLAGS)
6666

67+
AFLAGS_REMOVE_setup-x86_$(BITS).o += -Wa,-gdwarf-2
68+
AFLAGS_REMOVE_entry64.o += -Wa,-gdwarf-2
69+
6770
$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
6871
$(call if_changed,ld)
6972

kernel/kexec_file.c

+26-15
Original file line numberDiff line numberDiff line change
@@ -1169,24 +1169,26 @@ int crash_exclude_mem_range(struct crash_mem *mem,
11691169
unsigned long long mstart, unsigned long long mend)
11701170
{
11711171
int i, j;
1172-
unsigned long long start, end;
1172+
unsigned long long start, end, p_start, p_end;
11731173
struct crash_mem_range temp_range = {0, 0};
11741174

11751175
for (i = 0; i < mem->nr_ranges; i++) {
11761176
start = mem->ranges[i].start;
11771177
end = mem->ranges[i].end;
1178+
p_start = mstart;
1179+
p_end = mend;
11781180

11791181
if (mstart > end || mend < start)
11801182
continue;
11811183

11821184
/* Truncate any area outside of range */
11831185
if (mstart < start)
1184-
mstart = start;
1186+
p_start = start;
11851187
if (mend > end)
1186-
mend = end;
1188+
p_end = end;
11871189

11881190
/* Found completely overlapping range */
1189-
if (mstart == start && mend == end) {
1191+
if (p_start == start && p_end == end) {
11901192
mem->ranges[i].start = 0;
11911193
mem->ranges[i].end = 0;
11921194
if (i < mem->nr_ranges - 1) {
@@ -1197,20 +1199,29 @@ int crash_exclude_mem_range(struct crash_mem *mem,
11971199
mem->ranges[j].end =
11981200
mem->ranges[j+1].end;
11991201
}
1202+
1203+
/*
1204+
* Continue to check if there are another overlapping ranges
1205+
* from the current position because of shifting the above
1206+
* mem ranges.
1207+
*/
1208+
i--;
1209+
mem->nr_ranges--;
1210+
continue;
12001211
}
12011212
mem->nr_ranges--;
12021213
return 0;
12031214
}
12041215

1205-
if (mstart > start && mend < end) {
1216+
if (p_start > start && p_end < end) {
12061217
/* Split original range */
1207-
mem->ranges[i].end = mstart - 1;
1208-
temp_range.start = mend + 1;
1218+
mem->ranges[i].end = p_start - 1;
1219+
temp_range.start = p_end + 1;
12091220
temp_range.end = end;
1210-
} else if (mstart != start)
1211-
mem->ranges[i].end = mstart - 1;
1221+
} else if (p_start != start)
1222+
mem->ranges[i].end = p_start - 1;
12121223
else
1213-
mem->ranges[i].start = mend + 1;
1224+
mem->ranges[i].start = p_end + 1;
12141225
break;
12151226
}
12161227

@@ -1247,15 +1258,15 @@ int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
12471258
unsigned long long notes_addr;
12481259
unsigned long mstart, mend;
12491260

1250-
/* extra phdr for vmcoreinfo elf note */
1261+
/* extra phdr for vmcoreinfo ELF note */
12511262
nr_phdr = nr_cpus + 1;
12521263
nr_phdr += mem->nr_ranges;
12531264

12541265
/*
12551266
* kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
12561267
* area (for example, ffffffff80000000 - ffffffffa0000000 on x86_64).
12571268
* I think this is required by tools like gdb. So same physical
1258-
* memory will be mapped in two elf headers. One will contain kernel
1269+
* memory will be mapped in two ELF headers. One will contain kernel
12591270
* text virtual addresses and other will have __va(physical) addresses.
12601271
*/
12611272

@@ -1282,7 +1293,7 @@ int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
12821293
ehdr->e_ehsize = sizeof(Elf64_Ehdr);
12831294
ehdr->e_phentsize = sizeof(Elf64_Phdr);
12841295

1285-
/* Prepare one phdr of type PT_NOTE for each present cpu */
1296+
/* Prepare one phdr of type PT_NOTE for each present CPU */
12861297
for_each_present_cpu(cpu) {
12871298
phdr->p_type = PT_NOTE;
12881299
notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
@@ -1324,10 +1335,10 @@ int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
13241335
phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
13251336
phdr->p_align = 0;
13261337
ehdr->e_phnum++;
1327-
phdr++;
1328-
pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
1338+
pr_debug("Crash PT_LOAD ELF header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
13291339
phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
13301340
ehdr->e_phnum, phdr->p_offset);
1341+
phdr++;
13311342
}
13321343

13331344
*addr = buf;

0 commit comments

Comments
 (0)