Skip to content

Commit ad0be02

Browse files
committed
Automatic merge of 'next' into merge (2024-11-04 17:04)
2 parents f09cf1c + bee08a9 commit ad0be02

File tree

11 files changed

+345
-148
lines changed

11 files changed

+345
-148
lines changed

arch/powerpc/Kconfig

+2-2
Original file line numberDiff line numberDiff line change
@@ -275,8 +275,8 @@ config PPC
275275
select HAVE_RSEQ
276276
select HAVE_SETUP_PER_CPU_AREA if PPC64
277277
select HAVE_SOFTIRQ_ON_OWN_STACK
278-
select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2)
279-
select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13)
278+
select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,$(m32-flag) -mstack-protector-guard=tls -mstack-protector-guard-reg=r2 -mstack-protector-guard-offset=0)
279+
select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,$(m64-flag) -mstack-protector-guard=tls -mstack-protector-guard-reg=r13 -mstack-protector-guard-offset=0)
280280
select HAVE_STATIC_CALL if PPC32
281281
select HAVE_SYSCALL_TRACEPOINTS
282282
select HAVE_VIRT_CPU_ACCOUNTING

arch/powerpc/Makefile

+4-9
Original file line numberDiff line numberDiff line change
@@ -100,13 +100,6 @@ KBUILD_AFLAGS += -m$(BITS)
100100
KBUILD_LDFLAGS += -m elf$(BITS)$(LDEMULATION)
101101
endif
102102

103-
cflags-$(CONFIG_STACKPROTECTOR) += -mstack-protector-guard=tls
104-
ifdef CONFIG_PPC64
105-
cflags-$(CONFIG_STACKPROTECTOR) += -mstack-protector-guard-reg=r13
106-
else
107-
cflags-$(CONFIG_STACKPROTECTOR) += -mstack-protector-guard-reg=r2
108-
endif
109-
110103
LDFLAGS_vmlinux-y := -Bstatic
111104
LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) := -pie
112105
LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) += -z notext
@@ -402,9 +395,11 @@ prepare: stack_protector_prepare
402395
PHONY += stack_protector_prepare
403396
stack_protector_prepare: prepare0
404397
ifdef CONFIG_PPC64
405-
$(eval KBUILD_CFLAGS += -mstack-protector-guard-offset=$(shell awk '{if ($$2 == "PACA_CANARY") print $$3;}' include/generated/asm-offsets.h))
398+
$(eval KBUILD_CFLAGS += -mstack-protector-guard=tls -mstack-protector-guard-reg=r13 \
399+
-mstack-protector-guard-offset=$(shell awk '{if ($$2 == "PACA_CANARY") print $$3;}' include/generated/asm-offsets.h))
406400
else
407-
$(eval KBUILD_CFLAGS += -mstack-protector-guard-offset=$(shell awk '{if ($$2 == "TASK_CANARY") print $$3;}' include/generated/asm-offsets.h))
401+
$(eval KBUILD_CFLAGS += -mstack-protector-guard=tls -mstack-protector-guard-reg=r2 \
402+
-mstack-protector-guard-offset=$(shell awk '{if ($$2 == "TASK_CANARY") print $$3;}' include/generated/asm-offsets.h))
408403
endif
409404
endif
410405

arch/powerpc/include/asm/fadump.h

+7
Original file line numberDiff line numberDiff line change
@@ -34,4 +34,11 @@ extern int early_init_dt_scan_fw_dump(unsigned long node, const char *uname,
3434
int depth, void *data);
3535
extern int fadump_reserve_mem(void);
3636
#endif
37+
38+
#if defined(CONFIG_FA_DUMP) && defined(CONFIG_CMA)
39+
void fadump_cma_init(void);
40+
#else
41+
static inline void fadump_cma_init(void) { }
42+
#endif
43+
3744
#endif /* _ASM_POWERPC_FADUMP_H */

arch/powerpc/include/asm/kfence.h

+6-2
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
#define ARCH_FUNC_PREFIX "."
1616
#endif
1717

18-
#ifdef CONFIG_KFENCE
18+
extern bool kfence_early_init;
1919
extern bool kfence_disabled;
2020

2121
static inline void disable_kfence(void)
@@ -27,7 +27,11 @@ static inline bool arch_kfence_init_pool(void)
2727
{
2828
return !kfence_disabled;
2929
}
30-
#endif
30+
31+
static inline bool kfence_early_init_enabled(void)
32+
{
33+
return IS_ENABLED(CONFIG_KFENCE) && kfence_early_init;
34+
}
3135

3236
#ifdef CONFIG_PPC64
3337
static inline bool kfence_protect_page(unsigned long addr, bool protect)

arch/powerpc/kernel/fadump.c

+28-27
Original file line numberDiff line numberDiff line change
@@ -78,26 +78,38 @@ static struct cma *fadump_cma;
7878
* But for some reason even if it fails we still have the memory reservation
7979
* with us and we can still continue doing fadump.
8080
*/
81-
static int __init fadump_cma_init(void)
81+
void __init fadump_cma_init(void)
8282
{
83-
unsigned long long base, size;
83+
unsigned long long base, size, end;
8484
int rc;
8585

86-
if (!fw_dump.fadump_enabled)
87-
return 0;
88-
86+
if (!fw_dump.fadump_supported || !fw_dump.fadump_enabled ||
87+
fw_dump.dump_active)
88+
return;
8989
/*
9090
* Do not use CMA if user has provided fadump=nocma kernel parameter.
91-
* Return 1 to continue with fadump old behaviour.
9291
*/
93-
if (fw_dump.nocma)
94-
return 1;
92+
if (fw_dump.nocma || !fw_dump.boot_memory_size)
93+
return;
9594

95+
/*
96+
* [base, end) should be reserved during early init in
97+
* fadump_reserve_mem(). No need to check this here as
98+
* cma_init_reserved_mem() already checks for overlap.
99+
* Here we give the aligned chunk of this reserved memory to CMA.
100+
*/
96101
base = fw_dump.reserve_dump_area_start;
97102
size = fw_dump.boot_memory_size;
103+
end = base + size;
98104

99-
if (!size)
100-
return 0;
105+
base = ALIGN(base, CMA_MIN_ALIGNMENT_BYTES);
106+
end = ALIGN_DOWN(end, CMA_MIN_ALIGNMENT_BYTES);
107+
size = end - base;
108+
109+
if (end <= base) {
110+
pr_warn("%s: Too less memory to give to CMA\n", __func__);
111+
return;
112+
}
101113

102114
rc = cma_init_reserved_mem(base, size, 0, "fadump_cma", &fadump_cma);
103115
if (rc) {
@@ -108,7 +120,7 @@ static int __init fadump_cma_init(void)
108120
* blocked from production system usage. Hence return 1,
109121
* so that we can continue with fadump.
110122
*/
111-
return 1;
123+
return;
112124
}
113125

114126
/*
@@ -120,15 +132,13 @@ static int __init fadump_cma_init(void)
120132
/*
121133
* So we now have successfully initialized cma area for fadump.
122134
*/
123-
pr_info("Initialized 0x%lx bytes cma area at %ldMB from 0x%lx "
135+
pr_info("Initialized [0x%llx, %luMB] cma area from [0x%lx, %luMB] "
124136
"bytes of memory reserved for firmware-assisted dump\n",
125-
cma_get_size(fadump_cma),
126-
(unsigned long)cma_get_base(fadump_cma) >> 20,
127-
fw_dump.reserve_dump_area_size);
128-
return 1;
137+
cma_get_base(fadump_cma), cma_get_size(fadump_cma) >> 20,
138+
fw_dump.reserve_dump_area_start,
139+
fw_dump.boot_memory_size >> 20);
140+
return;
129141
}
130-
#else
131-
static int __init fadump_cma_init(void) { return 1; }
132142
#endif /* CONFIG_CMA */
133143

134144
/*
@@ -558,13 +568,6 @@ int __init fadump_reserve_mem(void)
558568
if (!fw_dump.dump_active) {
559569
fw_dump.boot_memory_size =
560570
PAGE_ALIGN(fadump_calculate_reserve_size());
561-
#ifdef CONFIG_CMA
562-
if (!fw_dump.nocma) {
563-
fw_dump.boot_memory_size =
564-
ALIGN(fw_dump.boot_memory_size,
565-
CMA_MIN_ALIGNMENT_BYTES);
566-
}
567-
#endif
568571

569572
bootmem_min = fw_dump.ops->fadump_get_bootmem_min();
570573
if (fw_dump.boot_memory_size < bootmem_min) {
@@ -637,8 +640,6 @@ int __init fadump_reserve_mem(void)
637640

638641
pr_info("Reserved %lldMB of memory at %#016llx (System RAM: %lldMB)\n",
639642
(size >> 20), base, (memblock_phys_mem_size() >> 20));
640-
641-
ret = fadump_cma_init();
642643
}
643644

644645
return ret;

arch/powerpc/kernel/setup-common.c

+4-2
Original file line numberDiff line numberDiff line change
@@ -997,9 +997,11 @@ void __init setup_arch(char **cmdline_p)
997997
initmem_init();
998998

999999
/*
1000-
* Reserve large chunks of memory for use by CMA for KVM and hugetlb. These must
1001-
* be called after initmem_init(), so that pageblock_order is initialised.
1000+
* Reserve large chunks of memory for use by CMA for fadump, KVM and
1001+
* hugetlb. These must be called after initmem_init(), so that
1002+
* pageblock_order is initialised.
10021003
*/
1004+
fadump_cma_init();
10031005
kvm_cma_reserve();
10041006
gigantic_hugetlb_cma_reserve();
10051007

0 commit comments

Comments
 (0)