Skip to content

Commit 4422913

Browse files
author
Fox Snowpatch
committed
1 parent 4db4221 commit 4422913

File tree

3 files changed

+22
-0
lines changed

3 files changed

+22
-0
lines changed

arch/powerpc/include/asm/interrupt.h

+10
Original file line numberDiff line numberDiff line change
@@ -336,6 +336,14 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
336336
if (IS_ENABLED(CONFIG_KASAN))
337337
return;
338338

339+
/*
340+
* Likewise, do not use it in real mode if percpu first chunk is not
341+
* embedded. With CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK enabled there
342+
* are chances where percpu allocation can come from vmalloc area.
343+
*/
344+
if (percpu_first_chunk_is_paged)
345+
return;
346+
339347
/* Otherwise, it should be safe to call it */
340348
nmi_enter();
341349
}
@@ -351,6 +359,8 @@ static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct inter
351359
// no nmi_exit for a pseries hash guest taking a real mode exception
352360
} else if (IS_ENABLED(CONFIG_KASAN)) {
353361
// no nmi_exit for KASAN in real mode
362+
} else if (percpu_first_chunk_is_paged) {
363+
// no nmi_exit if percpu first chunk is not embedded
354364
} else {
355365
nmi_exit();
356366
}

arch/powerpc/include/asm/percpu.h

+10
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,16 @@
1515
#endif /* CONFIG_SMP */
1616
#endif /* __powerpc64__ */
1717

18+
#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) && defined(CONFIG_SMP)
19+
#include <linux/jump_label.h>
20+
DECLARE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged);
21+
22+
#define percpu_first_chunk_is_paged \
23+
(static_key_enabled(&__percpu_first_chunk_is_paged.key))
24+
#else
25+
#define percpu_first_chunk_is_paged false
26+
#endif /* CONFIG_PPC64 && CONFIG_SMP */
27+
1828
#include <asm-generic/percpu.h>
1929

2030
#include <asm/paca.h>

arch/powerpc/kernel/setup_64.c

+2
Original file line numberDiff line numberDiff line change
@@ -834,6 +834,7 @@ static __init int pcpu_cpu_to_node(int cpu)
834834

835835
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
836836
EXPORT_SYMBOL(__per_cpu_offset);
837+
DEFINE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged);
837838

838839
void __init setup_per_cpu_areas(void)
839840
{
@@ -876,6 +877,7 @@ void __init setup_per_cpu_areas(void)
876877
if (rc < 0)
877878
panic("cannot initialize percpu area (err=%d)", rc);
878879

880+
static_key_enable(&__percpu_first_chunk_is_paged.key);
879881
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
880882
for_each_possible_cpu(cpu) {
881883
__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];

0 commit comments

Comments
 (0)