aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/Kconfig
diff options
context:
space:
mode:
Diffstat (limited to 'arch/Kconfig')
-rw-r--r--arch/Kconfig436
1 files changed, 314 insertions, 122 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 761d169ba7fd..59dee290d94b 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -11,31 +11,44 @@ source "arch/$(SRCARCH)/Kconfig"
menu "General architecture-dependent options"
-config CRASH_CORE
+config ARCH_HAS_SUBPAGE_FAULTS
bool
+ help
+ Select if the architecture can check permissions at sub-page
+ granularity (e.g. arm64 MTE). The probe_user_*() functions
+ must be implemented.
-config KEXEC_CORE
- select CRASH_CORE
+config HOTPLUG_SMT
bool
-config KEXEC_ELF
+config SMT_NUM_THREADS_DYNAMIC
bool
-config HAVE_IMA_KEXEC
+# Selected by HOTPLUG_CORE_SYNC_DEAD or HOTPLUG_CORE_SYNC_FULL
+config HOTPLUG_CORE_SYNC
bool
-config ARCH_HAS_SUBPAGE_FAULTS
+# Basic CPU dead synchronization selected by architecture
+config HOTPLUG_CORE_SYNC_DEAD
bool
- help
- Select if the architecture can check permissions at sub-page
- granularity (e.g. arm64 MTE). The probe_user_*() functions
- must be implemented.
+ select HOTPLUG_CORE_SYNC
-config HOTPLUG_SMT
+# Full CPU synchronization with alive state selected by architecture
+config HOTPLUG_CORE_SYNC_FULL
bool
+ select HOTPLUG_CORE_SYNC_DEAD if HOTPLUG_CPU
+ select HOTPLUG_CORE_SYNC
+
+config HOTPLUG_SPLIT_STARTUP
+ bool
+ select HOTPLUG_CORE_SYNC_FULL
+
+config HOTPLUG_PARALLEL
+ bool
+ select HOTPLUG_SPLIT_STARTUP
config GENERIC_ENTRY
- bool
+ bool
config KPROBES
bool "Kprobes"
@@ -53,29 +66,28 @@ config KPROBES
config JUMP_LABEL
bool "Optimize very unlikely/likely branches"
depends on HAVE_ARCH_JUMP_LABEL
- depends on CC_HAS_ASM_GOTO
select OBJTOOL if HAVE_JUMP_LABEL_HACK
help
- This option enables a transparent branch optimization that
- makes certain almost-always-true or almost-always-false branch
- conditions even cheaper to execute within the kernel.
+ This option enables a transparent branch optimization that
+ makes certain almost-always-true or almost-always-false branch
+ conditions even cheaper to execute within the kernel.
- Certain performance-sensitive kernel code, such as trace points,
- scheduler functionality, networking code and KVM have such
- branches and include support for this optimization technique.
+ Certain performance-sensitive kernel code, such as trace points,
+ scheduler functionality, networking code and KVM have such
+ branches and include support for this optimization technique.
- If it is detected that the compiler has support for "asm goto",
- the kernel will compile such branches with just a nop
- instruction. When the condition flag is toggled to true, the
- nop will be converted to a jump instruction to execute the
- conditional block of instructions.
+ If it is detected that the compiler has support for "asm goto",
+ the kernel will compile such branches with just a nop
+ instruction. When the condition flag is toggled to true, the
+ nop will be converted to a jump instruction to execute the
+ conditional block of instructions.
- This technique lowers overhead and stress on the branch prediction
- of the processor and generally makes the kernel faster. The update
- of the condition is slower, but those are always very rare.
+ This technique lowers overhead and stress on the branch prediction
+ of the processor and generally makes the kernel faster. The update
+ of the condition is slower, but those are always very rare.
- ( On 32-bit x86, the necessary options added to the compiler
- flags may increase the size of the kernel slightly. )
+ ( On 32-bit x86, the necessary options added to the compiler
+ flags may increase the size of the kernel slightly. )
config STATIC_KEYS_SELFTEST
bool "Static key selftest"
@@ -99,9 +111,9 @@ config KPROBES_ON_FTRACE
depends on KPROBES && HAVE_KPROBES_ON_FTRACE
depends on DYNAMIC_FTRACE_WITH_REGS
help
- If function tracer is enabled and the arch supports full
- passing of pt_regs to function tracing, then kprobes can
- optimize on top of function tracing.
+ If function tracer is enabled and the arch supports full
+ passing of pt_regs to function tracing, then kprobes can
+ optimize on top of function tracing.
config UPROBES
def_bool n
@@ -155,21 +167,21 @@ config HAVE_EFFICIENT_UNALIGNED_ACCESS
config ARCH_USE_BUILTIN_BSWAP
bool
help
- Modern versions of GCC (since 4.4) have builtin functions
- for handling byte-swapping. Using these, instead of the old
- inline assembler that the architecture code provides in the
- __arch_bswapXX() macros, allows the compiler to see what's
- happening and offers more opportunity for optimisation. In
- particular, the compiler will be able to combine the byteswap
- with a nearby load or store and use load-and-swap or
- store-and-swap instructions if the architecture has them. It
- should almost *never* result in code which is worse than the
- hand-coded assembler in <asm/swab.h>. But just in case it
- does, the use of the builtins is optional.
+ Modern versions of GCC (since 4.4) have builtin functions
+ for handling byte-swapping. Using these, instead of the old
+ inline assembler that the architecture code provides in the
+ __arch_bswapXX() macros, allows the compiler to see what's
+ happening and offers more opportunity for optimisation. In
+ particular, the compiler will be able to combine the byteswap
+ with a nearby load or store and use load-and-swap or
+ store-and-swap instructions if the architecture has them. It
+ should almost *never* result in code which is worse than the
+ hand-coded assembler in <asm/swab.h>. But just in case it
+ does, the use of the builtins is optional.
- Any architecture with load-and-swap or store-and-swap
- instructions should set this. And it shouldn't hurt to set it
- on architectures that don't have such instructions.
+ Any architecture with load-and-swap or store-and-swap
+ instructions should set this. And it shouldn't hurt to set it
+ on architectures that don't have such instructions.
config KRETPROBES
def_bool y
@@ -286,17 +298,16 @@ config ARCH_HAS_DMA_SET_UNCACHED
config ARCH_HAS_DMA_CLEAR_UNCACHED
bool
-# Select if arch init_task must go in the __init_task_data section
-config ARCH_TASK_STRUCT_ON_STACK
+config ARCH_HAS_CPU_FINALIZE_INIT
bool
-# Select if arch has its private alloc_task_struct() function
-config ARCH_TASK_STRUCT_ALLOCATOR
+# The architecture has a per-task state that includes the mm's PASID
+config ARCH_HAS_CPU_PASID
bool
+ select IOMMU_MM_DATA
config HAVE_ARCH_THREAD_STRUCT_WHITELIST
bool
- depends on !ARCH_TASK_STRUCT_ALLOCATOR
help
An architecture should select this to provide hardened usercopy
knowledge about what region of the thread_struct should be
@@ -305,10 +316,6 @@ config HAVE_ARCH_THREAD_STRUCT_WHITELIST
should be implemented. Without this, the entire thread_struct
field in task_struct will be left whitelisted.
-# Select if arch has its private alloc_thread_stack() function
-config ARCH_THREAD_STACK_ALLOCATOR
- bool
-
# Select if arch wants to size task_struct dynamically via arch_task_struct_size:
config ARCH_WANTS_DYNAMIC_TASK_STRUCT
bool
@@ -356,6 +363,12 @@ config HAVE_RSEQ
This symbol should be selected by an architecture if it
supports an implementation of restartable sequences.
+config HAVE_RUST
+ bool
+ help
+ This symbol should be selected by an architecture if it
+ supports Rust.
+
config HAVE_FUNCTION_ARG_ACCESS_API
bool
help
@@ -395,20 +408,14 @@ config HAVE_HARDLOCKUP_DETECTOR_PERF
The arch chooses to use the generic perf-NMI-based hardlockup
detector. Must define HAVE_PERF_EVENTS_NMI.
-config HAVE_NMI_WATCHDOG
- depends on HAVE_NMI
- bool
- help
- The arch provides a low level NMI watchdog. It provides
- asm/nmi.h, and defines its own arch_touch_nmi_watchdog().
-
config HAVE_HARDLOCKUP_DETECTOR_ARCH
bool
- select HAVE_NMI_WATCHDOG
help
- The arch chooses to provide its own hardlockup detector, which is
- a superset of the HAVE_NMI_WATCHDOG. It also conforms to config
- interfaces and parameters provided by hardlockup detector subsystem.
+ The arch provides its own hardlockup detector implementation instead
+ of the generic ones.
+
+ It uses the same command line parameters, and sysctl interface,
+ as the generic hardlockup detectors.
config HAVE_PERF_REGS
bool
@@ -460,9 +467,44 @@ config ARCH_WANT_IRQS_OFF_ACTIVATE_MM
irqs disabled over activate_mm. Architectures that do IPI based TLB
shootdowns should enable this.
+# Use normal mm refcounting for MMU_LAZY_TLB kernel thread references.
+# MMU_LAZY_TLB_REFCOUNT=n can improve the scalability of context switching
+# to/from kernel threads when the same mm is running on a lot of CPUs (a large
+# multi-threaded application), by reducing contention on the mm refcount.
+#
+# This can be disabled if the architecture ensures no CPUs are using an mm as a
+# "lazy tlb" beyond its final refcount (i.e., by the time __mmdrop frees the mm
+# or its kernel page tables). This could be arranged by arch_exit_mmap(), or
+# final exit(2) TLB flush, for example.
+#
+# To implement this, an arch *must*:
+# Ensure the _lazy_tlb variants of mmgrab/mmdrop are used when manipulating
+# the lazy tlb reference of a kthread's ->active_mm (non-arch code has been
+# converted already).
+config MMU_LAZY_TLB_REFCOUNT
+ def_bool y
+ depends on !MMU_LAZY_TLB_SHOOTDOWN
+
+# This option allows MMU_LAZY_TLB_REFCOUNT=n. It ensures no CPUs are using an
+# mm as a lazy tlb beyond its last reference count, by shooting down these
+# users before the mm is deallocated. __mmdrop() first IPIs all CPUs that may
+# be using the mm as a lazy tlb, so that they may switch themselves to using
+# init_mm for their active mm. mm_cpumask(mm) is used to determine which CPUs
+# may be using mm as a lazy tlb mm.
+#
+# To implement this, an arch *must*:
+# - At the time of the final mmdrop of the mm, ensure mm_cpumask(mm) contains
+# at least all possible CPUs in which the mm is lazy.
+# - It must meet the requirements for MMU_LAZY_TLB_REFCOUNT=n (see above).
+config MMU_LAZY_TLB_SHOOTDOWN
+ bool
+
config ARCH_HAVE_NMI_SAFE_CMPXCHG
bool
+config ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
+ bool
+
config HAVE_ALIGNED_STRUCT_PAGE
bool
help
@@ -630,7 +672,8 @@ config ARCH_SUPPORTS_SHADOW_CALL_STACK
config SHADOW_CALL_STACK
bool "Shadow Call Stack"
depends on ARCH_SUPPORTS_SHADOW_CALL_STACK
- depends on DYNAMIC_FTRACE_WITH_REGS || !FUNCTION_GRAPH_TRACER
+ depends on DYNAMIC_FTRACE_WITH_ARGS || DYNAMIC_FTRACE_WITH_REGS || !FUNCTION_GRAPH_TRACER
+ depends on MMU
help
This option enables the compiler's Shadow Call Stack, which
uses a shadow stack to protect function return addresses from
@@ -646,6 +689,13 @@ config SHADOW_CALL_STACK
reading and writing arbitrary memory may be able to locate them
and hijack control flow by modifying the stacks.
+config DYNAMIC_SCS
+ bool
+ help
+ Set by the arch code if it relies on code patching to insert the
+ shadow call stack push and pop instructions rather than on the
+ compiler.
+
config LTO
bool
help
@@ -678,7 +728,9 @@ config HAS_LTO_CLANG
depends on $(success,$(AR) --help | head -n 1 | grep -qi llvm)
depends on ARCH_SUPPORTS_LTO_CLANG
depends on !FTRACE_MCOUNT_USE_RECORDMCOUNT
- depends on !KASAN || KASAN_HW_TAGS
+ # https://github.com/ClangBuiltLinux/linux/issues/1721
+ depends on (!KASAN || KASAN_HW_TAGS || CLANG_VERSION >= 170000) || !DEBUG_INFO
+ depends on (!KCOV || CLANG_VERSION >= 170000) || !DEBUG_INFO
depends on !GCOV_KERNEL
help
The compiler and Kconfig options support building with Clang's
@@ -705,13 +757,13 @@ config LTO_CLANG_FULL
depends on !COMPILE_TEST
select LTO_CLANG
help
- This option enables Clang's full Link Time Optimization (LTO), which
- allows the compiler to optimize the kernel globally. If you enable
- this option, the compiler generates LLVM bitcode instead of ELF
- object files, and the actual compilation from bitcode happens at
- the LTO link step, which may take several minutes depending on the
- kernel configuration. More information can be found from LLVM's
- documentation:
+ This option enables Clang's full Link Time Optimization (LTO), which
+ allows the compiler to optimize the kernel globally. If you enable
+ this option, the compiler generates LLVM bitcode instead of ELF
+ object files, and the actual compilation from bitcode happens at
+ the LTO link step, which may take several minutes depending on the
+ kernel configuration. More information can be found from LLVM's
+ documentation:
https://llvm.org/docs/LinkTimeOptimization.html
@@ -739,11 +791,13 @@ config ARCH_SUPPORTS_CFI_CLANG
An architecture should select this option if it can support Clang's
Control-Flow Integrity (CFI) checking.
+config ARCH_USES_CFI_TRAPS
+ bool
+
config CFI_CLANG
bool "Use Clang's Control Flow Integrity (CFI)"
- depends on LTO_CLANG && ARCH_SUPPORTS_CFI_CLANG
- depends on CLANG_VERSION >= 140000
- select KALLSYMS
+ depends on ARCH_SUPPORTS_CFI_CLANG
+ depends on $(cc-option,-fsanitize=kcfi)
help
This option enables Clang’s forward-edge Control Flow Integrity
(CFI) checking, where the compiler injects a runtime check to each
@@ -755,16 +809,6 @@ config CFI_CLANG
https://clang.llvm.org/docs/ControlFlowIntegrity.html
-config CFI_CLANG_SHADOW
- bool "Use CFI shadow to speed up cross-module checks"
- default y
- depends on CFI_CLANG && MODULES
- help
- If you select this option, the kernel builds a fast look-up table of
- CFI check functions in loaded modules to reduce performance overhead.
-
- If unsure, say Y.
-
config CFI_PERMISSIVE
bool "Use CFI in permissive mode"
depends on CFI_CLANG
@@ -880,6 +924,14 @@ config HAVE_ARCH_HUGE_VMALLOC
config ARCH_WANT_HUGE_PMD_SHARE
bool
+# Archs that want to use pmd_mkwrite on kernel memory need it defined even
+# if there are no userspace memory management features that use it
+config ARCH_WANT_KERNEL_PMD_MKWRITE
+ bool
+
+config ARCH_WANT_PMD_MKWRITE
+ def_bool TRANSPARENT_HUGEPAGE || ARCH_WANT_KERNEL_PMD_MKWRITE
+
config HAVE_ARCH_SOFT_DIRTY
bool
@@ -924,6 +976,9 @@ config HAVE_SOFTIRQ_ON_OWN_STACK
Architecture provides a function to run __do_softirq() on a
separate stack.
+config SOFTIRQ_ON_OWN_STACK
+ def_bool HAVE_SOFTIRQ_ON_OWN_STACK && !PREEMPT_RT
+
config ALTERNATE_USER_ADDRESS_SPACE
bool
help
@@ -1023,18 +1078,107 @@ config HAVE_ARCH_COMPAT_MMAP_BASES
and vice-versa 32-bit applications to call 64-bit mmap().
Required for applications doing different bitness syscalls.
+config HAVE_PAGE_SIZE_4KB
+ bool
+
+config HAVE_PAGE_SIZE_8KB
+ bool
+
+config HAVE_PAGE_SIZE_16KB
+ bool
+
+config HAVE_PAGE_SIZE_32KB
+ bool
+
+config HAVE_PAGE_SIZE_64KB
+ bool
+
+config HAVE_PAGE_SIZE_256KB
+ bool
+
+choice
+ prompt "MMU page size"
+
+config PAGE_SIZE_4KB
+ bool "4KiB pages"
+ depends on HAVE_PAGE_SIZE_4KB
+ help
+ This option select the standard 4KiB Linux page size and the only
+ available option on many architectures. Using 4KiB page size will
+ minimize memory consumption and is therefore recommended for low
+ memory systems.
+ Some software that is written for x86 systems makes incorrect
+ assumptions about the page size and only runs on 4KiB pages.
+
+config PAGE_SIZE_8KB
+ bool "8KiB pages"
+ depends on HAVE_PAGE_SIZE_8KB
+ help
+ This option is the only supported page size on a few older
+ processors, and can be slightly faster than 4KiB pages.
+
+config PAGE_SIZE_16KB
+ bool "16KiB pages"
+ depends on HAVE_PAGE_SIZE_16KB
+ help
+ This option is usually a good compromise between memory
+ consumption and performance for typical desktop and server
+ workloads, often saving a level of page table lookups compared
+ to 4KB pages as well as reducing TLB pressure and overhead of
+ per-page operations in the kernel at the expense of a larger
+ page cache.
+
+config PAGE_SIZE_32KB
+ bool "32KiB pages"
+ depends on HAVE_PAGE_SIZE_32KB
+ help
+ Using 32KiB page size will result in slightly higher performance
+ kernel at the price of higher memory consumption compared to
+ 16KiB pages. This option is available only on cnMIPS cores.
+ Note that you will need a suitable Linux distribution to
+ support this.
+
+config PAGE_SIZE_64KB
+ bool "64KiB pages"
+ depends on HAVE_PAGE_SIZE_64KB
+ help
+ Using 64KiB page size will result in slightly higher performance
+ kernel at the price of much higher memory consumption compared to
+ 4KiB or 16KiB pages.
+ This is not suitable for general-purpose workloads but the
+ better performance may be worth the cost for certain types of
+ supercomputing or database applications that work mostly with
+ large in-memory data rather than small files.
+
+config PAGE_SIZE_256KB
+ bool "256KiB pages"
+ depends on HAVE_PAGE_SIZE_256KB
+ help
+ 256KiB pages have little practical value due to their extreme
+ memory usage. The kernel will only be able to run applications
+ that have been compiled with '-zmax-page-size' set to 256KiB
+ (the default is 64KiB or 4KiB on most architectures).
+
+endchoice
+
config PAGE_SIZE_LESS_THAN_64KB
def_bool y
- depends on !ARM64_64K_PAGES
- depends on !IA64_PAGE_SIZE_64KB
depends on !PAGE_SIZE_64KB
- depends on !PARISC_PAGE_SIZE_64KB
depends on PAGE_SIZE_LESS_THAN_256KB
config PAGE_SIZE_LESS_THAN_256KB
def_bool y
depends on !PAGE_SIZE_256KB
+config PAGE_SHIFT
+ int
+ default 12 if PAGE_SIZE_4KB
+ default 13 if PAGE_SIZE_8KB
+ default 14 if PAGE_SIZE_16KB
+ default 15 if PAGE_SIZE_32KB
+ default 16 if PAGE_SIZE_64KB
+ default 18 if PAGE_SIZE_256KB
+
# This allows to use a set of generic functions to determine mmap base
# address by giving priority to top-down scheme only if the process
# is not in legacy mode (compat task, unlimited stack size or
@@ -1146,13 +1290,6 @@ config COMPAT_32BIT_TIME
config ARCH_NO_PREEMPT
bool
-config ARCH_EPHEMERAL_INODES
- def_bool n
- help
- An arch should select this symbol if it doesn't keep track of inode
- instances on its own, but instead relies on something else (e.g. the
- host kernel for an UML kernel).
-
config ARCH_SUPPORTS_RT
bool
@@ -1320,9 +1457,9 @@ config ARCH_HAS_CC_PLATFORM
bool
config HAVE_SPARSE_SYSCALL_NR
- bool
- help
- An architecture should select this if its syscall numbering is sparse
+ bool
+ help
+ An architecture should select this if its syscall numbering is sparse
to save space. For example, MIPS architecture has a syscall array with
entries at 4000, 5000 and 6000 locations. This option turns on syscall
related optimizations for a given architecture.
@@ -1346,35 +1483,35 @@ config HAVE_PREEMPT_DYNAMIC_CALL
depends on HAVE_STATIC_CALL
select HAVE_PREEMPT_DYNAMIC
help
- An architecture should select this if it can handle the preemption
- model being selected at boot time using static calls.
+ An architecture should select this if it can handle the preemption
+ model being selected at boot time using static calls.
- Where an architecture selects HAVE_STATIC_CALL_INLINE, any call to a
- preemption function will be patched directly.
+ Where an architecture selects HAVE_STATIC_CALL_INLINE, any call to a
+ preemption function will be patched directly.
- Where an architecture does not select HAVE_STATIC_CALL_INLINE, any
- call to a preemption function will go through a trampoline, and the
- trampoline will be patched.
+ Where an architecture does not select HAVE_STATIC_CALL_INLINE, any
+ call to a preemption function will go through a trampoline, and the
+ trampoline will be patched.
- It is strongly advised to support inline static call to avoid any
- overhead.
+ It is strongly advised to support inline static call to avoid any
+ overhead.
config HAVE_PREEMPT_DYNAMIC_KEY
bool
- depends on HAVE_ARCH_JUMP_LABEL && CC_HAS_ASM_GOTO
+ depends on HAVE_ARCH_JUMP_LABEL
select HAVE_PREEMPT_DYNAMIC
help
- An architecture should select this if it can handle the preemption
- model being selected at boot time using static keys.
+ An architecture should select this if it can handle the preemption
+ model being selected at boot time using static keys.
- Each preemption function will be given an early return based on a
- static key. This should have slightly lower overhead than non-inline
- static calls, as this effectively inlines each trampoline into the
- start of its callee. This may avoid redundant work, and may
- integrate better with CFI schemes.
+ Each preemption function will be given an early return based on a
+ static key. This should have slightly lower overhead than non-inline
+ static calls, as this effectively inlines each trampoline into the
+ start of its callee. This may avoid redundant work, and may
+ integrate better with CFI schemes.
- This will have greater overhead than using inline static calls as
- the call to the preemption function cannot be entirely elided.
+ This will have greater overhead than using inline static calls as
+ the call to the preemption function cannot be entirely elided.
config ARCH_WANT_LD_ORPHAN_WARN
bool
@@ -1397,8 +1534,8 @@ config ARCH_SUPPORTS_PAGE_TABLE_CHECK
config ARCH_SPLIT_ARG64
bool
help
- If a 32-bit architecture requires 64-bit arguments to be split into
- pairs of 32-bit arguments, select this option.
+ If a 32-bit architecture requires 64-bit arguments to be split into
+ pairs of 32-bit arguments, select this option.
config ARCH_HAS_ELFCORE_COMPAT
bool
@@ -1406,6 +1543,9 @@ config ARCH_HAS_ELFCORE_COMPAT
config ARCH_HAS_PARANOID_L1D_FLUSH
bool
+config ARCH_HAVE_TRACE_MMIO_ACCESS
+ bool
+
config DYNAMIC_SIGFRAME
bool
@@ -1413,8 +1553,60 @@ config DYNAMIC_SIGFRAME
config HAVE_ARCH_NODE_DEV_GROUP
bool
+config ARCH_HAS_HW_PTE_YOUNG
+ bool
+ help
+ Architectures that select this option are capable of setting the
+ accessed bit in PTE entries when using them as part of linear address
+ translations. Architectures that require runtime check should select
+ this option and override arch_has_hw_pte_young().
+
+config ARCH_HAS_NONLEAF_PMD_YOUNG
+ bool
+ help
+ Architectures that select this option are capable of setting the
+ accessed bit in non-leaf PMD entries when using them as part of linear
+ address translations. Page table walkers that clear the accessed bit
+ may use this capability to reduce their search space.
+
source "kernel/gcov/Kconfig"
source "scripts/gcc-plugins/Kconfig"
+config FUNCTION_ALIGNMENT_4B
+ bool
+
+config FUNCTION_ALIGNMENT_8B
+ bool
+
+config FUNCTION_ALIGNMENT_16B
+ bool
+
+config FUNCTION_ALIGNMENT_32B
+ bool
+
+config FUNCTION_ALIGNMENT_64B
+ bool
+
+config FUNCTION_ALIGNMENT
+ int
+ default 64 if FUNCTION_ALIGNMENT_64B
+ default 32 if FUNCTION_ALIGNMENT_32B
+ default 16 if FUNCTION_ALIGNMENT_16B
+ default 8 if FUNCTION_ALIGNMENT_8B
+ default 4 if FUNCTION_ALIGNMENT_4B
+ default 0
+
+config CC_HAS_MIN_FUNCTION_ALIGNMENT
+ # Detect availability of the GCC option -fmin-function-alignment which
+ # guarantees minimal alignment for all functions, unlike
+ # -falign-functions which the compiler ignores for cold functions.
+ def_bool $(cc-option, -fmin-function-alignment=8)
+
+config CC_HAS_SANE_FUNCTION_ALIGNMENT
+ # Set if the guaranteed alignment with -fmin-function-alignment is
+ # available or extra care is required in the kernel. Clang provides
+ # strict alignment always, even with -falign-functions.
+ def_bool CC_HAS_MIN_FUNCTION_ALIGNMENT || CC_IS_CLANG
+
endmenu