Commit 050e9baa authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Kbuild: rename CC_STACKPROTECTOR[_STRONG] config variables

The changes to automatically test for working stack protector compiler
support in the Kconfig files removed the special STACKPROTECTOR_AUTO
option that picked the strongest stack protector that the compiler
supported.

That was all a nice cleanup - it makes no sense to have the AUTO case
now that the Kconfig phase can just determine the compiler support
directly.

HOWEVER.

It also meant that doing "make oldconfig" would now _disable_ the strong
stackprotector if you had AUTO enabled, because in a legacy config file,
the sane stack protector configuration would look like

  CONFIG_HAVE_CC_STACKPROTECTOR=y
  # CONFIG_CC_STACKPROTECTOR_NONE is not set
  # CONFIG_CC_STACKPROTECTOR_REGULAR is not set
  # CONFIG_CC_STACKPROTECTOR_STRONG is not set
  CONFIG_CC_STACKPROTECTOR_AUTO=y

and when you ran this through "make oldconfig" with the Kbuild changes,
it would ask you about the regular CONFIG_CC_STACKPROTECTOR (that had
been renamed from CONFIG_CC_STA...
parent be779f03
......@@ -480,7 +480,7 @@ There are several features that need compiler support. The recommended way
to describe the dependency on the compiler feature is to use "depends on"
followed by a test macro.
config CC_STACKPROTECTOR
config STACKPROTECTOR
bool "Stack Protector buffer overflow detection"
depends on $(cc-option,-fstack-protector)
...
......
......@@ -156,7 +156,7 @@ The classic stack buffer overflow involves writing past the expected end
of a variable stored on the stack, ultimately writing a controlled value
to the stack frame's stored return address. The most widely used defense
is the presence of a stack canary between the stack variables and the
return address (``CONFIG_CC_STACKPROTECTOR``), which is verified just before
return address (``CONFIG_STACKPROTECTOR``), which is verified just before
the function returns. Other defenses include things like shadow stacks.
Stack depth overflow
......
......@@ -687,8 +687,8 @@ KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN})
endif
stackp-flags-$(CONFIG_CC_HAS_STACKPROTECTOR_NONE) := -fno-stack-protector
stackp-flags-$(CONFIG_CC_STACKPROTECTOR) := -fstack-protector
stackp-flags-$(CONFIG_CC_STACKPROTECTOR_STRONG) := -fstack-protector-strong
stackp-flags-$(CONFIG_STACKPROTECTOR) := -fstack-protector
stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG) := -fstack-protector-strong
KBUILD_CFLAGS += $(stackp-flags-y)
......
......@@ -558,7 +558,7 @@ config HAVE_CC_STACKPROTECTOR
config CC_HAS_STACKPROTECTOR_NONE
def_bool $(cc-option,-fno-stack-protector)
config CC_STACKPROTECTOR
config STACKPROTECTOR
bool "Stack Protector buffer overflow detection"
depends on HAVE_CC_STACKPROTECTOR
depends on $(cc-option,-fstack-protector)
......@@ -582,9 +582,9 @@ config CC_STACKPROTECTOR
about 3% of all kernel functions, which increases kernel code size
by about 0.3%.
config CC_STACKPROTECTOR_STRONG
config STACKPROTECTOR_STRONG
bool "Strong Stack Protector"
depends on CC_STACKPROTECTOR
depends on STACKPROTECTOR
depends on $(cc-option,-fstack-protector-strong)
default y
help
......
......@@ -61,7 +61,7 @@
int main(void)
{
DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
#ifdef CONFIG_CC_STACKPROTECTOR
#ifdef CONFIG_STACKPROTECTOR
DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
#endif
BLANK();
......
......@@ -791,7 +791,7 @@ ENTRY(__switch_to)
ldr r6, [r2, #TI_CPU_DOMAIN]
#endif
switch_tls r1, r4, r5, r3, r7
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
ldr r7, [r2, #TI_TASK]
ldr r8, =__stack_chk_guard
.if (TSK_STACK_CANARY > IMM12_MASK)
......@@ -807,7 +807,7 @@ ENTRY(__switch_to)
ldr r0, =thread_notify_head
mov r1, #THREAD_NOTIFY_SWITCH
bl atomic_notifier_call_chain
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
str r7, [r8]
#endif
THUMB( mov ip, r4 )
......
......@@ -39,7 +39,7 @@
#include <asm/tls.h>
#include <asm/vdso.h>
#ifdef CONFIG_CC_STACKPROTECTOR
#ifdef CONFIG_STACKPROTECTOR
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
......
......@@ -59,7 +59,7 @@
#include <asm/processor.h>
#include <asm/stacktrace.h>
#ifdef CONFIG_CC_STACKPROTECTOR
#ifdef CONFIG_STACKPROTECTOR
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
......
......@@ -83,7 +83,7 @@ void output_task_defines(void)
OFFSET(TASK_FLAGS, task_struct, flags);
OFFSET(TASK_MM, task_struct, mm);
OFFSET(TASK_PID, task_struct, pid);
#if defined(CONFIG_CC_STACKPROTECTOR)
#if defined(CONFIG_STACKPROTECTOR)
OFFSET(TASK_STACK_CANARY, task_struct, stack_canary);
#endif
DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
......
......@@ -61,7 +61,7 @@
#endif
3:
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
PTR_LA t8, __stack_chk_guard
LONG_L t9, TASK_STACK_CANARY(a1)
LONG_S t9, 0(t8)
......
......@@ -180,7 +180,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
return 0;
}
#ifdef CONFIG_CC_STACKPROTECTOR
#ifdef CONFIG_STACKPROTECTOR
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
......
......@@ -36,7 +36,7 @@ LEAF(resume)
cpu_save_nonscratch a0
sw ra, THREAD_REG31(a0)
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
PTR_LA t8, __stack_chk_guard
LONG_L t9, TASK_STACK_CANARY(a1)
LONG_S t9, 0(t8)
......
......@@ -31,7 +31,7 @@
cpu_save_nonscratch a0
LONG_S ra, THREAD_REG31(a0)
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
PTR_LA t8, __stack_chk_guard
LONG_L t9, TASK_STACK_CANARY(a1)
LONG_S t9, 0(t8)
......
......@@ -12,7 +12,7 @@
struct kmem_cache *task_xstate_cachep = NULL;
unsigned int xstate_size;
#ifdef CONFIG_CC_STACKPROTECTOR
#ifdef CONFIG_STACKPROTECTOR
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
......
......@@ -177,7 +177,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
{
struct thread_struct *next_t = &next->thread;
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
__stack_chk_guard = next->stack_canary;
#endif
......
......@@ -239,7 +239,7 @@ ENTRY(__switch_to_asm)
movl %esp, TASK_threadsp(%eax)
movl TASK_threadsp(%edx), %esp
#ifdef CONFIG_CC_STACKPROTECTOR
#ifdef CONFIG_STACKPROTECTOR
movl TASK_stack_canary(%edx), %ebx
movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
#endif
......
......@@ -357,7 +357,7 @@ ENTRY(__switch_to_asm)
movq %rsp, TASK_threadsp(%rdi)
movq TASK_threadsp(%rsi), %rsp
#ifdef CONFIG_CC_STACKPROTECTOR
#ifdef CONFIG_STACKPROTECTOR
movq TASK_stack_canary(%rsi), %rbx
movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset
#endif
......
......@@ -412,7 +412,7 @@ extern asmlinkage void ignore_sysret(void);
void save_fsgs_for_kvm(void);
#endif
#else /* X86_64 */
#ifdef CONFIG_CC_STACKPROTECTOR
#ifdef CONFIG_STACKPROTECTOR
/*
* Make sure stack canary segment base is cached-aligned:
* "For Intel Atom processors, avoid non zero segment base address
......
......@@ -146,7 +146,7 @@
# define __KERNEL_PERCPU 0
#endif
#ifdef CONFIG_CC_STACKPROTECTOR
#ifdef CONFIG_STACKPROTECTOR
# define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY*8)
#else
# define __KERNEL_STACK_CANARY 0
......
......@@ -34,7 +34,7 @@
#ifndef _ASM_STACKPROTECTOR_H
#define _ASM_STACKPROTECTOR_H 1
#ifdef CONFIG_CC_STACKPROTECTOR
#ifdef CONFIG_STACKPROTECTOR
#include <asm/tsc.h>
#include <asm/processor.h>
......@@ -105,7 +105,7 @@ static inline void load_stack_canary_segment(void)
#endif
}
#else /* CC_STACKPROTECTOR */
#else /* STACKPROTECTOR */
#define GDT_STACK_CANARY_INIT
......@@ -121,5 +121,5 @@ static inline void load_stack_canary_segment(void)
#endif
}
#endif /* CC_STACKPROTECTOR */
#endif /* STACKPROTECTOR */
#endif /* _ASM_STACKPROTECTOR_H */
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment