aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/atomic-long.h19
-rw-r--r--include/asm-generic/barrier.h27
-rw-r--r--include/asm-generic/compat.h3
-rw-r--r--include/asm-generic/dma-mapping.h9
-rw-r--r--include/asm-generic/export.h34
-rw-r--r--include/asm-generic/io.h165
-rw-r--r--include/asm-generic/kvm_para.h5
-rw-r--r--include/asm-generic/pci.h8
-rw-r--r--include/asm-generic/qspinlock.h4
-rw-r--r--include/asm-generic/qspinlock_types.h32
-rw-r--r--include/asm-generic/vmlinux.lds.h336
11 files changed, 418 insertions, 224 deletions
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
index 34a028a7bcc5..87d14476edc2 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -25,6 +25,7 @@ typedef atomic64_t atomic_long_t;
#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
#define ATOMIC_LONG_PFX(x) atomic64 ## x
+#define ATOMIC_LONG_TYPE s64
#else
@@ -32,6 +33,7 @@ typedef atomic_t atomic_long_t;
#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
#define ATOMIC_LONG_PFX(x) atomic ## x
+#define ATOMIC_LONG_TYPE int
#endif
@@ -90,6 +92,21 @@ ATOMIC_LONG_ADD_SUB_OP(sub, _release)
#define atomic_long_cmpxchg(l, old, new) \
(ATOMIC_LONG_PFX(_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), (old), (new)))
+
+#define atomic_long_try_cmpxchg_relaxed(l, old, new) \
+ (ATOMIC_LONG_PFX(_try_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \
+ (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new)))
+#define atomic_long_try_cmpxchg_acquire(l, old, new) \
+ (ATOMIC_LONG_PFX(_try_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \
+ (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new)))
+#define atomic_long_try_cmpxchg_release(l, old, new) \
+ (ATOMIC_LONG_PFX(_try_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \
+ (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new)))
+#define atomic_long_try_cmpxchg(l, old, new) \
+ (ATOMIC_LONG_PFX(_try_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), \
+ (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new)))
+
+
#define atomic_long_xchg_relaxed(v, new) \
(ATOMIC_LONG_PFX(_xchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
#define atomic_long_xchg_acquire(v, new) \
@@ -244,6 +261,8 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
#define atomic_long_inc_not_zero(l) \
ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l))
+#define atomic_long_cond_read_relaxed(v, c) \
+ ATOMIC_LONG_PFX(_cond_read_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (c))
#define atomic_long_cond_read_acquire(v, c) \
ATOMIC_LONG_PFX(_cond_read_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (c))
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 29458bbb2fa0..2cafdbb9ae4c 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -221,18 +221,17 @@ do { \
#endif
/**
- * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
+ * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees
* @ptr: pointer to the variable to wait on
* @cond: boolean expression to wait for
*
- * Equivalent to using smp_load_acquire() on the condition variable but employs
- * the control dependency of the wait to reduce the barrier on many platforms.
+ * Equivalent to using READ_ONCE() on the condition variable.
*
* Due to C lacking lambda expressions we load the value of *ptr into a
* pre-named variable @VAL to be used in @cond.
*/
-#ifndef smp_cond_load_acquire
-#define smp_cond_load_acquire(ptr, cond_expr) ({ \
+#ifndef smp_cond_load_relaxed
+#define smp_cond_load_relaxed(ptr, cond_expr) ({ \
typeof(ptr) __PTR = (ptr); \
typeof(*ptr) VAL; \
for (;;) { \
@@ -241,10 +240,26 @@ do { \
break; \
cpu_relax(); \
} \
- smp_acquire__after_ctrl_dep(); \
VAL; \
})
#endif
+/**
+ * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
+ * @ptr: pointer to the variable to wait on
+ * @cond: boolean expression to wait for
+ *
+ * Equivalent to using smp_load_acquire() on the condition variable but employs
+ * the control dependency of the wait to reduce the barrier on many platforms.
+ */
+#ifndef smp_cond_load_acquire
+#define smp_cond_load_acquire(ptr, cond_expr) ({ \
+ typeof(*ptr) _val; \
+ _val = smp_cond_load_relaxed(ptr, cond_expr); \
+ smp_acquire__after_ctrl_dep(); \
+ _val; \
+})
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_GENERIC_BARRIER_H */
diff --git a/include/asm-generic/compat.h b/include/asm-generic/compat.h
new file mode 100644
index 000000000000..28819451b6d1
--- /dev/null
+++ b/include/asm-generic/compat.h
@@ -0,0 +1,3 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* This is an empty stub for 32-bit-only architectures */
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
index 880a292d792f..ad2868263867 100644
--- a/include/asm-generic/dma-mapping.h
+++ b/include/asm-generic/dma-mapping.h
@@ -4,7 +4,16 @@
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
+ /*
+ * Use the non-coherent ops if available. If an architecture wants a
+ * more fine-grained selection of operations it will have to implement
+ * get_arch_dma_ops itself or use the per-device dma_ops.
+ */
+#ifdef CONFIG_DMA_NONCOHERENT_OPS
+ return &dma_noncoherent_ops;
+#else
return &dma_direct_ops;
+#endif
}
#endif /* _ASM_GENERIC_DMA_MAPPING_H */
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
index 719db1968d81..68efb950a918 100644
--- a/include/asm-generic/export.h
+++ b/include/asm-generic/export.h
@@ -19,42 +19,32 @@
#define KCRC_ALIGN 4
#endif
-#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
-#define KSYM(name) _##name
-#else
-#define KSYM(name) name
-#endif
-
/*
* note on .section use: @progbits vs %progbits nastiness doesn't matter,
* since we immediately emit into those sections anyway.
*/
.macro ___EXPORT_SYMBOL name,val,sec
#ifdef CONFIG_MODULES
- .globl KSYM(__ksymtab_\name)
+ .globl __ksymtab_\name
.section ___ksymtab\sec+\name,"a"
.balign KSYM_ALIGN
-KSYM(__ksymtab_\name):
- __put \val, KSYM(__kstrtab_\name)
+__ksymtab_\name:
+ __put \val, __kstrtab_\name
.previous
.section __ksymtab_strings,"a"
-KSYM(__kstrtab_\name):
-#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
- .asciz "_\name"
-#else
+__kstrtab_\name:
.asciz "\name"
-#endif
.previous
#ifdef CONFIG_MODVERSIONS
.section ___kcrctab\sec+\name,"a"
.balign KCRC_ALIGN
-KSYM(__kcrctab_\name):
+__kcrctab_\name:
#if defined(CONFIG_MODULE_REL_CRCS)
- .long KSYM(__crc_\name) - .
+ .long __crc_\name - .
#else
- .long KSYM(__crc_\name)
+ .long __crc_\name
#endif
- .weak KSYM(__crc_\name)
+ .weak __crc_\name
.previous
#endif
#endif
@@ -84,12 +74,12 @@ KSYM(__kcrctab_\name):
#endif
#define EXPORT_SYMBOL(name) \
- __EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)),)
+ __EXPORT_SYMBOL(name, KSYM_FUNC(name),)
#define EXPORT_SYMBOL_GPL(name) \
- __EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)), _gpl)
+ __EXPORT_SYMBOL(name, KSYM_FUNC(name), _gpl)
#define EXPORT_DATA_SYMBOL(name) \
- __EXPORT_SYMBOL(name, KSYM(name),)
+ __EXPORT_SYMBOL(name, name,)
#define EXPORT_DATA_SYMBOL_GPL(name) \
- __EXPORT_SYMBOL(name, KSYM(name),_gpl)
+ __EXPORT_SYMBOL(name, name,_gpl)
#endif
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 8de9cc251286..66d1d45fa2e1 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -25,6 +25,50 @@
#define mmiowb() do {} while (0)
#endif
+#ifndef __io_br
+#define __io_br() barrier()
+#endif
+
+/* prevent prefetching of coherent DMA data ahead of a dma-complete */
+#ifndef __io_ar
+#ifdef rmb
+#define __io_ar() rmb()
+#else
+#define __io_ar() barrier()
+#endif
+#endif
+
+/* flush writes to coherent DMA data before possibly triggering a DMA read */
+#ifndef __io_bw
+#ifdef wmb
+#define __io_bw() wmb()
+#else
+#define __io_bw() barrier()
+#endif
+#endif
+
+/* serialize device access against a spin_unlock, usually handled there. */
+#ifndef __io_aw
+#define __io_aw() barrier()
+#endif
+
+#ifndef __io_pbw
+#define __io_pbw() __io_bw()
+#endif
+
+#ifndef __io_paw
+#define __io_paw() __io_aw()
+#endif
+
+#ifndef __io_pbr
+#define __io_pbr() __io_br()
+#endif
+
+#ifndef __io_par
+#define __io_par() __io_ar()
+#endif
+
+
/*
* __raw_{read,write}{b,w,l,q}() access memory in native endianness.
*
@@ -110,7 +154,12 @@ static inline void __raw_writeq(u64 value, volatile void __iomem *addr)
#define readb readb
static inline u8 readb(const volatile void __iomem *addr)
{
- return __raw_readb(addr);
+ u8 val;
+
+ __io_br();
+ val = __raw_readb(addr);
+ __io_ar();
+ return val;
}
#endif
@@ -118,7 +167,12 @@ static inline u8 readb(const volatile void __iomem *addr)
#define readw readw
static inline u16 readw(const volatile void __iomem *addr)
{
- return __le16_to_cpu(__raw_readw(addr));
+ u16 val;
+
+ __io_br();
+ val = __le16_to_cpu(__raw_readw(addr));
+ __io_ar();
+ return val;
}
#endif
@@ -126,7 +180,12 @@ static inline u16 readw(const volatile void __iomem *addr)
#define readl readl
static inline u32 readl(const volatile void __iomem *addr)
{
- return __le32_to_cpu(__raw_readl(addr));
+ u32 val;
+
+ __io_br();
+ val = __le32_to_cpu(__raw_readl(addr));
+ __io_ar();
+ return val;
}
#endif
@@ -135,7 +194,12 @@ static inline u32 readl(const volatile void __iomem *addr)
#define readq readq
static inline u64 readq(const volatile void __iomem *addr)
{
- return __le64_to_cpu(__raw_readq(addr));
+ u64 val;
+
+ __io_br();
+ val = __le64_to_cpu(__raw_readq(addr));
+ __io_ar();
+ return val;
}
#endif
#endif /* CONFIG_64BIT */
@@ -144,7 +208,9 @@ static inline u64 readq(const volatile void __iomem *addr)
#define writeb writeb
static inline void writeb(u8 value, volatile void __iomem *addr)
{
+ __io_bw();
__raw_writeb(value, addr);
+ __io_aw();
}
#endif
@@ -152,7 +218,9 @@ static inline void writeb(u8 value, volatile void __iomem *addr)
#define writew writew
static inline void writew(u16 value, volatile void __iomem *addr)
{
+ __io_bw();
__raw_writew(cpu_to_le16(value), addr);
+ __io_aw();
}
#endif
@@ -160,7 +228,9 @@ static inline void writew(u16 value, volatile void __iomem *addr)
#define writel writel
static inline void writel(u32 value, volatile void __iomem *addr)
{
+ __io_bw();
__raw_writel(__cpu_to_le32(value), addr);
+ __io_aw();
}
#endif
@@ -169,7 +239,9 @@ static inline void writel(u32 value, volatile void __iomem *addr)
#define writeq writeq
static inline void writeq(u64 value, volatile void __iomem *addr)
{
+ __io_bw();
__raw_writeq(__cpu_to_le64(value), addr);
+ __io_aw();
}
#endif
#endif /* CONFIG_64BIT */
@@ -180,35 +252,67 @@ static inline void writeq(u64 value, volatile void __iomem *addr)
* accesses.
*/
#ifndef readb_relaxed
-#define readb_relaxed readb
+#define readb_relaxed readb_relaxed
+static inline u8 readb_relaxed(const volatile void __iomem *addr)
+{
+ return __raw_readb(addr);
+}
#endif
#ifndef readw_relaxed
-#define readw_relaxed readw
+#define readw_relaxed readw_relaxed
+static inline u16 readw_relaxed(const volatile void __iomem *addr)
+{
+ return __le16_to_cpu(__raw_readw(addr));
+}
#endif
#ifndef readl_relaxed
-#define readl_relaxed readl
+#define readl_relaxed readl_relaxed
+static inline u32 readl_relaxed(const volatile void __iomem *addr)
+{
+ return __le32_to_cpu(__raw_readl(addr));
+}
#endif
#if defined(readq) && !defined(readq_relaxed)
-#define readq_relaxed readq
+#define readq_relaxed readq_relaxed
+static inline u64 readq_relaxed(const volatile void __iomem *addr)
+{
+ return __le64_to_cpu(__raw_readq(addr));
+}
#endif
#ifndef writeb_relaxed
-#define writeb_relaxed writeb
+#define writeb_relaxed writeb_relaxed
+static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
+{
+ __raw_writeb(value, addr);
+}
#endif
#ifndef writew_relaxed
-#define writew_relaxed writew
+#define writew_relaxed writew_relaxed
+static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
+{
+ __raw_writew(cpu_to_le16(value), addr);
+}
#endif
#ifndef writel_relaxed
-#define writel_relaxed writel
+#define writel_relaxed writel_relaxed
+static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
+{
+ __raw_writel(__cpu_to_le32(value), addr);
+}
#endif
#if defined(writeq) && !defined(writeq_relaxed)
-#define writeq_relaxed writeq
+#define writeq_relaxed writeq_relaxed
+static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
+{
+ __raw_writeq(__cpu_to_le64(value), addr);
+}
#endif
/*
@@ -351,6 +455,8 @@ static inline void writesq(volatile void __iomem *addr, const void *buffer,
#define IO_SPACE_LIMIT 0xffff
#endif
+#include <linux/logic_pio.h>
+
/*
* {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be
* implemented on hardware that needs an additional delay for I/O accesses to
@@ -361,7 +467,12 @@ static inline void writesq(volatile void __iomem *addr, const void *buffer,
#define inb inb
static inline u8 inb(unsigned long addr)
{
- return readb(PCI_IOBASE + addr);
+ u8 val;
+
+ __io_pbr();
+ val = __raw_readb(PCI_IOBASE + addr);
+ __io_par();
+ return val;
}
#endif
@@ -369,7 +480,12 @@ static inline u8 inb(unsigned long addr)
#define inw inw
static inline u16 inw(unsigned long addr)
{
- return readw(PCI_IOBASE + addr);
+ u16 val;
+
+ __io_pbr();
+ val = __le16_to_cpu(__raw_readw(PCI_IOBASE + addr));
+ __io_par();
+ return val;
}
#endif
@@ -377,7 +493,12 @@ static inline u16 inw(unsigned long addr)
#define inl inl
static inline u32 inl(unsigned long addr)
{
- return readl(PCI_IOBASE + addr);
+ u32 val;
+
+ __io_pbr();
+ val = __le32_to_cpu(__raw_readl(PCI_IOBASE + addr));
+ __io_par();
+ return val;
}
#endif
@@ -385,7 +506,9 @@ static inline u32 inl(unsigned long addr)
#define outb outb
static inline void outb(u8 value, unsigned long addr)
{
- writeb(value, PCI_IOBASE + addr);
+ __io_pbw();
+ __raw_writeb(value, PCI_IOBASE + addr);
+ __io_paw();
}
#endif
@@ -393,7 +516,9 @@ static inline void outb(u8 value, unsigned long addr)
#define outw outw
static inline void outw(u16 value, unsigned long addr)
{
- writew(value, PCI_IOBASE + addr);
+ __io_pbw();
+ __raw_writew(cpu_to_le16(value), PCI_IOBASE + addr);
+ __io_paw();
}
#endif
@@ -401,7 +526,9 @@ static inline void outw(u16 value, unsigned long addr)
#define outl outl
static inline void outl(u32 value, unsigned long addr)
{
- writel(value, PCI_IOBASE + addr);
+ __io_pbw();
+ __raw_writel(cpu_to_le32(value), PCI_IOBASE + addr);
+ __io_paw();
}
#endif
@@ -899,7 +1026,7 @@ static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
#define ioport_map ioport_map
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
- return PCI_IOBASE + (port & IO_SPACE_LIMIT);
+ return PCI_IOBASE + (port & MMIO_UPPER_LIMIT);
}
#endif
diff --git a/include/asm-generic/kvm_para.h b/include/asm-generic/kvm_para.h
index 18c6abe81fbd..728e5c5706c4 100644
--- a/include/asm-generic/kvm_para.h
+++ b/include/asm-generic/kvm_para.h
@@ -19,6 +19,11 @@ static inline unsigned int kvm_arch_para_features(void)
return 0;
}
+static inline unsigned int kvm_arch_para_hints(void)
+{
+ return 0;
+}
+
static inline bool kvm_para_available(void)
{
return false;
diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h
index 830d7659289b..6bb3cd3d695a 100644
--- a/include/asm-generic/pci.h
+++ b/include/asm-generic/pci.h
@@ -14,12 +14,4 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
}
#endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */
-/*
- * By default, assume that no iommu is in use and that the PCI
- * space is mapped to address physical 0.
- */
-#ifndef PCI_DMA_BUS_IS_PHYS
-#define PCI_DMA_BUS_IS_PHYS (1)
-#endif
-
#endif /* _ASM_GENERIC_PCI_H */
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index b37b4ad7eb94..9cc457597ddf 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -26,7 +26,6 @@
* @lock: Pointer to queued spinlock structure
* Return: 1 if it is locked, 0 otherwise
*/
-#ifndef queued_spin_is_locked
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
{
/*
@@ -35,7 +34,6 @@ static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
*/
return atomic_read(&lock->val);
}
-#endif
/**
* queued_spin_value_unlocked - is the spinlock structure unlocked?
@@ -100,7 +98,7 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
/*
* unlock() needs release semantics:
*/
- (void)atomic_sub_return_release(_Q_LOCKED_VAL, &lock->val);
+ smp_store_release(&lock->locked, 0);
}
#endif
diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h
index 034acd0c4956..0763f065b975 100644
--- a/include/asm-generic/qspinlock_types.h
+++ b/include/asm-generic/qspinlock_types.h
@@ -29,13 +29,41 @@
#endif
typedef struct qspinlock {
- atomic_t val;
+ union {
+ atomic_t val;
+
+ /*
+ * By using the whole 2nd least significant byte for the
+ * pending bit, we can allow better optimization of the lock
+ * acquisition for the pending bit holder.
+ */
+#ifdef __LITTLE_ENDIAN
+ struct {
+ u8 locked;
+ u8 pending;
+ };
+ struct {
+ u16 locked_pending;
+ u16 tail;
+ };
+#else
+ struct {
+ u16 tail;
+ u16 locked_pending;
+ };
+ struct {
+ u8 reserved[2];
+ u8 pending;
+ u8 locked;
+ };
+#endif
+ };
} arch_spinlock_t;
/*
* Initializier
*/
-#define __ARCH_SPIN_LOCK_UNLOCKED { ATOMIC_INIT(0) }
+#define __ARCH_SPIN_LOCK_UNLOCKED { .val = ATOMIC_INIT(0) }
/*
* Bitfields in the atomic value:
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 278841c75b97..e373e2e10f6a 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -64,15 +64,24 @@
* generates .data.identifier sections, which need to be pulled in with
* .data. We don't want to pull in .data..other sections, which Linux
* has defined. Same for text and bss.
+ *
+ * RODATA_MAIN is not used because existing code already defines .rodata.x
+ * sections to be brought in with rodata.
*/
#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
#define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
#define DATA_MAIN .data .data.[0-9a-zA-Z_]*
+#define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
+#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]*
#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
+#define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]*
#else
#define TEXT_MAIN .text
#define DATA_MAIN .data
+#define SDATA_MAIN .sdata
+#define RODATA_MAIN .rodata
#define BSS_MAIN .bss
+#define SBSS_MAIN .sbss
#endif
/*
@@ -104,66 +113,66 @@
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
#define MCOUNT_REC() . = ALIGN(8); \
- VMLINUX_SYMBOL(__start_mcount_loc) = .; \
- *(__mcount_loc) \
- VMLINUX_SYMBOL(__stop_mcount_loc) = .;
+ __start_mcount_loc = .; \
+ KEEP(*(__mcount_loc)) \
+ __stop_mcount_loc = .;
#else
#define MCOUNT_REC()
#endif
#ifdef CONFIG_TRACE_BRANCH_PROFILING
-#define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
- *(_ftrace_annotated_branch) \
- VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
+#define LIKELY_PROFILE() __start_annotated_branch_profile = .; \
+ KEEP(*(_ftrace_annotated_branch)) \
+ __stop_annotated_branch_profile = .;
#else
#define LIKELY_PROFILE()
#endif
#ifdef CONFIG_PROFILE_ALL_BRANCHES
-#define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
- *(_ftrace_branch) \
- VMLINUX_SYMBOL(__stop_branch_profile) = .;
+#define BRANCH_PROFILE() __start_branch_profile = .; \
+ KEEP(*(_ftrace_branch)) \
+ __stop_branch_profile = .;
#else
#define BRANCH_PROFILE()
#endif
#ifdef CONFIG_KPROBES
#define KPROBE_BLACKLIST() . = ALIGN(8); \
- VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \
+ __start_kprobe_blacklist = .; \
KEEP(*(_kprobe_blacklist)) \
- VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .;
+ __stop_kprobe_blacklist = .;
#else
#define KPROBE_BLACKLIST()
#endif
#ifdef CONFIG_FUNCTION_ERROR_INJECTION
#define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \
- VMLINUX_SYMBOL(__start_error_injection_whitelist) = .;\
+ __start_error_injection_whitelist = .; \
KEEP(*(_error_injection_whitelist)) \
- VMLINUX_SYMBOL(__stop_error_injection_whitelist) = .;
+ __stop_error_injection_whitelist = .;
#else
#define ERROR_INJECT_WHITELIST()
#endif
#ifdef CONFIG_EVENT_TRACING
#define FTRACE_EVENTS() . = ALIGN(8); \
- VMLINUX_SYMBOL(__start_ftrace_events) = .; \
+ __start_ftrace_events = .; \
KEEP(*(_ftrace_events)) \
- VMLINUX_SYMBOL(__stop_ftrace_events) = .; \
- VMLINUX_SYMBOL(__start_ftrace_eval_maps) = .; \
+ __stop_ftrace_events = .; \
+ __start_ftrace_eval_maps = .; \
KEEP(*(_ftrace_eval_map)) \
- VMLINUX_SYMBOL(__stop_ftrace_eval_maps) = .;
+ __stop_ftrace_eval_maps = .;
#else
#define FTRACE_EVENTS()
#endif
#ifdef CONFIG_TRACING
-#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
+#define TRACE_PRINTKS() __start___trace_bprintk_fmt = .; \
KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \
- VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
-#define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \
+ __stop___trace_bprintk_fmt = .;
+#define TRACEPOINT_STR() __start___tracepoint_str = .; \
KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \
- VMLINUX_SYMBOL(__stop___tracepoint_str) = .;
+ __stop___tracepoint_str = .;
#else
#define TRACE_PRINTKS()
#define TRACEPOINT_STR()
@@ -171,27 +180,27 @@
#ifdef CONFIG_FTRACE_SYSCALLS
#define TRACE_SYSCALLS() . = ALIGN(8); \
- VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
+ __start_syscalls_metadata = .; \
KEEP(*(__syscalls_metadata)) \
- VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
+ __stop_syscalls_metadata = .;
#else
#define TRACE_SYSCALLS()
#endif
#ifdef CONFIG_BPF_EVENTS
#define BPF_RAW_TP() STRUCT_ALIGN(); \
- VMLINUX_SYMBOL(__start__bpf_raw_tp) = .; \
+ __start__bpf_raw_tp = .; \
KEEP(*(__bpf_raw_tp_map)) \
- VMLINUX_SYMBOL(__stop__bpf_raw_tp) = .;
+ __stop__bpf_raw_tp = .;
#else
#define BPF_RAW_TP()
#endif
#ifdef CONFIG_SERIAL_EARLYCON
-#define EARLYCON_TABLE() STRUCT_ALIGN(); \
- VMLINUX_SYMBOL(__earlycon_table) = .; \
+#define EARLYCON_TABLE() . = ALIGN(8); \
+ __earlycon_table = .; \
KEEP(*(__earlycon_table)) \
- VMLINUX_SYMBOL(__earlycon_table_end) = .;
+ __earlycon_table_end = .;
#else
#define EARLYCON_TABLE()
#endif
@@ -202,7 +211,7 @@
#define _OF_TABLE_0(name)
#define _OF_TABLE_1(name) \
. = ALIGN(8); \
- VMLINUX_SYMBOL(__##name##_of_table) = .; \
+ __##name##_of_table = .; \
KEEP(*(__##name##_of_table)) \
KEEP(*(__##name##_of_table_end))
@@ -217,18 +226,18 @@
#ifdef CONFIG_ACPI
#define ACPI_PROBE_TABLE(name) \
. = ALIGN(8); \
- VMLINUX_SYMBOL(__##name##_acpi_probe_table) = .; \
+ __##name##_acpi_probe_table = .; \
KEEP(*(__##name##_acpi_probe_table)) \
- VMLINUX_SYMBOL(__##name##_acpi_probe_table_end) = .;
+ __##name##_acpi_probe_table_end = .;
#else
#define ACPI_PROBE_TABLE(name)
#endif
#define KERNEL_DTB() \
STRUCT_ALIGN(); \
- VMLINUX_SYMBOL(__dtb_start) = .; \
+ __dtb_start = .; \
KEEP(*(.dtb.init.rodata)) \
- VMLINUX_SYMBOL(__dtb_end) = .;
+ __dtb_end = .;
/*
* .data section
@@ -238,23 +247,23 @@
*(DATA_MAIN) \
*(.ref.data) \
*(.data..shared_aligned) /* percpu related */ \
- MEM_KEEP(init.data) \
- MEM_KEEP(exit.data) \
+ MEM_KEEP(init.data*) \
+ MEM_KEEP(exit.data*) \
*(.data.unlikely) \
- VMLINUX_SYMBOL(__start_once) = .; \
+ __start_once = .; \
*(.data.once) \
- VMLINUX_SYMBOL(__end_once) = .; \
+ __end_once = .; \
STRUCT_ALIGN(); \
*(__tracepoints) \
/* implement dynamic printk debug */ \
. = ALIGN(8); \
- VMLINUX_SYMBOL(__start___jump_table) = .; \
+ __start___jump_table = .; \
KEEP(*(__jump_table)) \
- VMLINUX_SYMBOL(__stop___jump_table) = .; \
+ __stop___jump_table = .; \
. = ALIGN(8); \
- VMLINUX_SYMBOL(__start___verbose) = .; \
+ __start___verbose = .; \
KEEP(*(__verbose)) \
- VMLINUX_SYMBOL(__stop___verbose) = .; \
+ __stop___verbose = .; \
LIKELY_PROFILE() \
BRANCH_PROFILE() \
TRACE_PRINTKS() \
@@ -266,10 +275,10 @@
*/
#define NOSAVE_DATA \
. = ALIGN(PAGE_SIZE); \
- VMLINUX_SYMBOL(__nosave_begin) = .; \
+ __nosave_begin = .; \
*(.data..nosave) \
. = ALIGN(PAGE_SIZE); \
- VMLINUX_SYMBOL(__nosave_end) = .;
+ __nosave_end = .;
#define PAGE_ALIGNED_DATA(page_align) \
. = ALIGN(page_align); \
@@ -286,13 +295,13 @@
#define INIT_TASK_DATA(align) \
. = ALIGN(align); \
- VMLINUX_SYMBOL(__start_init_task) = .; \
- VMLINUX_SYMBOL(init_thread_union) = .; \
- VMLINUX_SYMBOL(init_stack) = .; \
- *(.data..init_task) \
- *(.data..init_thread_info) \
- . = VMLINUX_SYMBOL(__start_init_task) + THREAD_SIZE; \
- VMLINUX_SYMBOL(__end_init_task) = .;
+ __start_init_task = .; \
+ init_thread_union = .; \
+ init_stack = .; \
+ KEEP(*(.data..init_task)) \
+ KEEP(*(.data..init_thread_info)) \
+ . = __start_init_task + THREAD_SIZE; \
+ __end_init_task = .;
/*
* Allow architectures to handle ro_after_init data on their
@@ -300,9 +309,9 @@
*/
#ifndef RO_AFTER_INIT_DATA
#define RO_AFTER_INIT_DATA \
- VMLINUX_SYMBOL(__start_ro_after_init) = .; \
+ __start_ro_after_init = .; \
*(.data..ro_after_init) \
- VMLINUX_SYMBOL(__end_ro_after_init) = .;
+ __end_ro_after_init = .;
#endif
/*
@@ -311,14 +320,14 @@
#define RO_DATA_SECTION(align) \
. = ALIGN((align)); \
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start_rodata) = .; \
+ __start_rodata = .; \
*(.rodata) *(.rodata.*) \
RO_AFTER_INIT_DATA /* Read only after init */ \
KEEP(*(__vermagic)) /* Kernel version magic */ \
. = ALIGN(8); \
- VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
+ __start___tracepoints_ptrs = .; \
KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \
- VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \
+ __stop___tracepoints_ptrs = .; \
*(__tracepoints_strings)/* Tracepoints: strings */ \
} \
\
@@ -328,109 +337,109 @@
\
/* PCI quirks */ \
.pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
+ __start_pci_fixups_early = .; \
KEEP(*(.pci_fixup_early)) \
- VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
- VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
+ __end_pci_fixups_early = .; \
+ __start_pci_fixups_header = .; \
KEEP(*(.pci_fixup_header)) \
- VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
- VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
+ __end_pci_fixups_header = .; \
+ __start_pci_fixups_final = .; \
KEEP(*(.pci_fixup_final)) \
- VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
- VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
+ __end_pci_fixups_final = .; \
+ __start_pci_fixups_enable = .; \
KEEP(*(.pci_fixup_enable)) \
- VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
- VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
+ __end_pci_fixups_enable = .; \
+ __start_pci_fixups_resume = .; \
KEEP(*(.pci_fixup_resume)) \
- VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
- VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
+ __end_pci_fixups_resume = .; \
+ __start_pci_fixups_resume_early = .; \
KEEP(*(.pci_fixup_resume_early)) \
- VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
- VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
+ __end_pci_fixups_resume_early = .; \
+ __start_pci_fixups_suspend = .; \
KEEP(*(.pci_fixup_suspend)) \
- VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
- VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \
+ __end_pci_fixups_suspend = .; \
+ __start_pci_fixups_suspend_late = .; \
KEEP(*(.pci_fixup_suspend_late)) \
- VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \
+ __end_pci_fixups_suspend_late = .; \
} \
\
/* Built-in firmware blobs */ \
.builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start_builtin_fw) = .; \
+ __start_builtin_fw = .; \
KEEP(*(.builtin_fw)) \
- VMLINUX_SYMBOL(__end_builtin_fw) = .; \
+ __end_builtin_fw = .; \
} \
\
TRACEDATA \
\
/* Kernel symbol table: Normal symbols */ \
__ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___ksymtab) = .; \
+ __start___ksymtab = .; \
KEEP(*(SORT(___ksymtab+*))) \
- VMLINUX_SYMBOL(__stop___ksymtab) = .; \
+ __stop___ksymtab = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
+ __start___ksymtab_gpl = .; \
KEEP(*(SORT(___ksymtab_gpl+*))) \
- VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
+ __stop___ksymtab_gpl = .; \
} \
\
/* Kernel symbol table: Normal unused symbols */ \
__ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
+ __start___ksymtab_unused = .; \
KEEP(*(SORT(___ksymtab_unused+*))) \
- VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
+ __stop___ksymtab_unused = .; \
} \
\
/* Kernel symbol table: GPL-only unused symbols */ \
__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
+ __start___ksymtab_unused_gpl = .; \
KEEP(*(SORT(___ksymtab_unused_gpl+*))) \
- VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
+ __stop___ksymtab_unused_gpl = .; \
} \
\
/* Kernel symbol table: GPL-future-only symbols */ \
__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
+ __start___ksymtab_gpl_future = .; \
KEEP(*(SORT(___ksymtab_gpl_future+*))) \
- VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
+ __stop___ksymtab_gpl_future = .; \
} \
\
/* Kernel symbol table: Normal symbols */ \
__kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___kcrctab) = .; \
+ __start___kcrctab = .; \
KEEP(*(SORT(___kcrctab+*))) \
- VMLINUX_SYMBOL(__stop___kcrctab) = .; \
+ __stop___kcrctab = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
+ __start___kcrctab_gpl = .; \
KEEP(*(SORT(___kcrctab_gpl+*))) \
- VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
+ __stop___kcrctab_gpl = .; \
} \
\
/* Kernel symbol table: Normal unused symbols */ \
__kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
+ __start___kcrctab_unused = .; \
KEEP(*(SORT(___kcrctab_unused+*))) \
- VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
+ __stop___kcrctab_unused = .; \
} \
\
/* Kernel symbol table: GPL-only unused symbols */ \
__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
+ __start___kcrctab_unused_gpl = .; \
KEEP(*(SORT(___kcrctab_unused_gpl+*))) \
- VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
+ __stop___kcrctab_unused_gpl = .; \
} \
\
/* Kernel symbol table: GPL-future-only symbols */ \
__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
+ __start___kcrctab_gpl_future = .; \
KEEP(*(SORT(___kcrctab_gpl_future+*))) \
- VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
+ __stop___kcrctab_gpl_future = .; \
} \
\
/* Kernel symbol table: strings */ \
@@ -447,18 +456,18 @@
\
/* Built-in module parameters. */ \
__param : AT(ADDR(__param) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___param) = .; \
+ __start___param = .; \
KEEP(*(__param)) \
- VMLINUX_SYMBOL(__stop___param) = .; \
+ __stop___param = .; \
} \
\
/* Built-in module versions. */ \
__modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___modver) = .; \
+ __start___modver = .; \
KEEP(*(__modver)) \
- VMLINUX_SYMBOL(__stop___modver) = .; \
+ __stop___modver = .; \
. = ALIGN((align)); \
- VMLINUX_SYMBOL(__end_rodata) = .; \
+ __end_rodata = .; \
} \
. = ALIGN((align));
@@ -469,9 +478,9 @@
#define SECURITY_INIT \
.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__security_initcall_start) = .; \
+ __security_initcall_start = .; \
KEEP(*(.security_initcall.init)) \
- VMLINUX_SYMBOL(__security_initcall_end) = .; \
+ __security_initcall_end = .; \
}
/*
@@ -487,58 +496,58 @@
*(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \
*(.text..refcount) \
*(.ref.text) \
- MEM_KEEP(init.text) \
- MEM_KEEP(exit.text) \
+ MEM_KEEP(init.text*) \
+ MEM_KEEP(exit.text*) \
/* sched.text is aling to function alignment to secure we have same
* address even at second ld pass when generating System.map */
#define SCHED_TEXT \
ALIGN_FUNCTION(); \
- VMLINUX_SYMBOL(__sched_text_start) = .; \
+ __sched_text_start = .; \
*(.sched.text) \
- VMLINUX_SYMBOL(__sched_text_end) = .;
+ __sched_text_end = .;
/* spinlock.text is aling to function alignment to secure we have same
* address even at second ld pass when generating System.map */
#define LOCK_TEXT \
ALIGN_FUNCTION(); \
- VMLINUX_SYMBOL(__lock_text_start) = .; \
+ __lock_text_start = .; \
*(.spinlock.text) \
- VMLINUX_SYMBOL(__lock_text_end) = .;
+ __lock_text_end = .;
#define CPUIDLE_TEXT \
ALIGN_FUNCTION(); \
- VMLINUX_SYMBOL(__cpuidle_text_start) = .; \
+ __cpuidle_text_start = .; \
*(.cpuidle.text) \
- VMLINUX_SYMBOL(__cpuidle_text_end) = .;
+ __cpuidle_text_end = .;
#define KPROBES_TEXT \
ALIGN_FUNCTION(); \
- VMLINUX_SYMBOL(__kprobes_text_start) = .; \
+ __kprobes_text_start = .; \
*(.kprobes.text) \
- VMLINUX_SYMBOL(__kprobes_text_end) = .;
+ __kprobes_text_end = .;
#define ENTRY_TEXT \
ALIGN_FUNCTION(); \
- VMLINUX_SYMBOL(__entry_text_start) = .; \
+ __entry_text_start = .; \
*(.entry.text) \
- VMLINUX_SYMBOL(__entry_text_end) = .;
+ __entry_text_end = .;
#define IRQENTRY_TEXT \
ALIGN_FUNCTION(); \
- VMLINUX_SYMBOL(__irqentry_text_start) = .; \
+ __irqentry_text_start = .; \
*(.irqentry.text) \
- VMLINUX_SYMBOL(__irqentry_text_end) = .;
+ __irqentry_text_end = .;
#define SOFTIRQENTRY_TEXT \
ALIGN_FUNCTION(); \
- VMLINUX_SYMBOL(__softirqentry_text_start) = .; \
+ __softirqentry_text_start = .; \
*(.softirqentry.text) \
- VMLINUX_SYMBOL(__softirqentry_text_end) = .;
+ __softirqentry_text_end = .;
/* Section used for early init (in .S files) */
-#define HEAD_TEXT *(.head.text)
+#define HEAD_TEXT KEEP(*(.head.text))
#define HEAD_TEXT_SECTION \
.head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
@@ -551,9 +560,9 @@
#define EXCEPTION_TABLE(align) \
. = ALIGN(align); \
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___ex_table) = .; \
+ __start___ex_table = .; \
KEEP(*(__ex_table)) \
- VMLINUX_SYMBOL(__stop___ex_table) = .; \
+ __stop___ex_table = .; \
}
/*
@@ -567,11 +576,11 @@
#ifdef CONFIG_CONSTRUCTORS
#define KERNEL_CTORS() . = ALIGN(8); \
- VMLINUX_SYMBOL(__ctors_start) = .; \
+ __ctors_start = .; \
KEEP(*(.ctors)) \
KEEP(*(SORT(.init_array.*))) \
KEEP(*(.init_array)) \
- VMLINUX_SYMBOL(__ctors_end) = .;
+ __ctors_end = .;
#else
#define KERNEL_CTORS()
#endif
@@ -579,11 +588,11 @@
/* init and exit section handling */
#define INIT_DATA \
KEEP(*(SORT(___kentry+*))) \
- *(.init.data) \
- MEM_DISCARD(init.data) \
+ *(.init.data init.data.*) \
+ MEM_DISCARD(init.data*) \
KERNEL_CTORS() \
MCOUNT_REC() \
- *(.init.rodata) \
+ *(.init.rodata .init.rodata.*) \
FTRACE_EVENTS() \
TRACE_SYSCALLS() \
KPROBE_BLACKLIST() \
@@ -602,16 +611,16 @@
EARLYCON_TABLE()
#define INIT_TEXT \
- *(.init.text) \
+ *(.init.text .init.text.*) \
*(.text.startup) \
- MEM_DISCARD(init.text)
+ MEM_DISCARD(init.text*)
#define EXIT_DATA \
- *(.exit.data) \
+ *(.exit.data .exit.data.*) \
*(.fini_array) \
*(.dtors) \
- MEM_DISCARD(exit.data) \
- MEM_DISCARD(exit.rodata)
+ MEM_DISCARD(exit.data*) \
+ MEM_DISCARD(exit.rodata*)
#define EXIT_TEXT \
*(.exit.text) \
@@ -629,7 +638,7 @@
. = ALIGN(sbss_align); \
.sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
*(.dynsbss) \
- *(.sbss) \
+ *(SBSS_MAIN) \
*(.scommon) \
}
@@ -706,9 +715,9 @@
#define BUG_TABLE \
. = ALIGN(8); \
__bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___bug_table) = .; \
+ __start___bug_table = .; \
KEEP(*(__bug_table)) \
- VMLINUX_SYMBOL(__stop___bug_table) = .; \
+ __stop___bug_table = .; \
}
#else
#define BUG_TABLE
@@ -718,22 +727,22 @@
#define ORC_UNWIND_TABLE \
. = ALIGN(4); \
.orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start_orc_unwind_ip) = .; \
+ __start_orc_unwind_ip = .; \
KEEP(*(.orc_unwind_ip)) \
- VMLINUX_SYMBOL(__stop_orc_unwind_ip) = .; \
+ __stop_orc_unwind_ip = .; \
} \
. = ALIGN(6); \
.orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start_orc_unwind) = .; \
+ __start_orc_unwind = .; \
KEEP(*(.orc_unwind)) \
- VMLINUX_SYMBOL(__stop_orc_unwind) = .; \
+ __stop_orc_unwind = .; \
} \
. = ALIGN(4); \
.orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(orc_lookup) = .; \
+ orc_lookup = .; \
. += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) / \
LOOKUP_BLOCK_SIZE) + 1) * 4; \
- VMLINUX_SYMBOL(orc_lookup_end) = .; \
+ orc_lookup_end = .; \
}
#else
#define ORC_UNWIND_TABLE
@@ -743,9 +752,9 @@
#define TRACEDATA \
. = ALIGN(4); \
.tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__tracedata_start) = .; \
+ __tracedata_start = .; \
KEEP(*(.tracedata)) \
- VMLINUX_SYMBOL(__tracedata_end) = .; \
+ __tracedata_end = .; \
}
#else
#define TRACEDATA
@@ -753,24 +762,24 @@
#define NOTES \
.notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start_notes) = .; \
- *(.note.*) \
- VMLINUX_SYMBOL(__stop_notes) = .; \
+ __start_notes = .; \
+ KEEP(*(.note.*)) \
+ __stop_notes = .; \
}
#define INIT_SETUP(initsetup_align) \
. = ALIGN(initsetup_align); \
- VMLINUX_SYMBOL(__setup_start) = .; \
+ __setup_start = .; \
KEEP(*(.init.setup)) \
- VMLINUX_SYMBOL(__setup_end) = .;
+ __setup_end = .;
#define INIT_CALLS_LEVEL(level) \
- VMLINUX_SYMBOL(__initcall##level##_start) = .; \
+ __initcall##level##_start = .; \
KEEP(*(.initcall##level##.init)) \
KEEP(*(.initcall##level##s.init)) \
#define INIT_CALLS \
- VMLINUX_SYMBOL(__initcall_start) = .; \
+ __initcall_start = .; \
KEEP(*(.initcallearly.init)) \
INIT_CALLS_LEVEL(0) \
INIT_CALLS_LEVEL(1) \
@@ -781,22 +790,22 @@
INIT_CALLS_LEVEL(rootfs) \
INIT_CALLS_LEVEL(6) \
INIT_CALLS_LEVEL(7) \
- VMLINUX_SYMBOL(__initcall_end) = .;
+ __initcall_end = .;
#define CON_INITCALL \
- VMLINUX_SYMBOL(__con_initcall_start) = .; \
+ __con_initcall_start = .; \
KEEP(*(.con_initcall.init)) \
- VMLINUX_SYMBOL(__con_initcall_end) = .;
+ __con_initcall_end = .;
#define SECURITY_INITCALL \
- VMLINUX_SYMBOL(__security_initcall_start) = .; \
+ __security_initcall_start = .; \
KEEP(*(.security_initcall.init)) \
- VMLINUX_SYMBOL(__security_initcall_end) = .;
+ __security_initcall_end = .;
#ifdef CONFIG_BLK_DEV_INITRD
#define INIT_RAM_FS \
. = ALIGN(4); \
- VMLINUX_SYMBOL(__initramfs_start) = .; \
+ __initramfs_start = .; \
KEEP(*(.init.ramfs)) \
. = ALIGN(8); \
KEEP(*(.init.ramfs.info))
@@ -851,7 +860,7 @@
* sharing between subsections for different purposes.
*/
#define PERCPU_INPUT(cacheline) \
- VMLINUX_SYMBOL(__per_cpu_start) = .; \
+ __per_cpu_start = .; \
*(.data..percpu..first) \
. = ALIGN(PAGE_SIZE); \
*(.data..percpu..page_aligned) \
@@ -861,7 +870,7 @@
*(.data..percpu) \
*(.data..percpu..shared_aligned) \
PERCPU_DECRYPTED_SECTION \
- VMLINUX_SYMBOL(__per_cpu_end) = .;
+ __per_cpu_end = .;
/**
* PERCPU_VADDR - define output section for percpu area
@@ -888,12 +897,11 @@
* address, use PERCPU_SECTION.
*/
#define PERCPU_VADDR(cacheline, vaddr, phdr) \
- VMLINUX_SYMBOL(__per_cpu_load) = .; \
- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
- - LOAD_OFFSET) { \
+ __per_cpu_load = .; \
+ .data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) { \
PERCPU_INPUT(cacheline) \
} phdr \
- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
+ . = __per_cpu_load + SIZEOF(.data..percpu);
/**
* PERCPU_SECTION - define output section for percpu area, simple version
@@ -910,7 +918,7 @@
#define PERCPU_SECTION(cacheline) \
. = ALIGN(PAGE_SIZE); \
.data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__per_cpu_load) = .; \
+ __per_cpu_load = .; \
PERCPU_INPUT(cacheline) \
}
@@ -949,9 +957,9 @@
#define INIT_TEXT_SECTION(inittext_align) \
. = ALIGN(inittext_align); \
.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(_sinittext) = .; \
+ _sinittext = .; \
INIT_TEXT \
- VMLINUX_SYMBOL(_einittext) = .; \
+ _einittext = .; \
}
#define INIT_DATA_SECTION(initsetup_align) \
@@ -966,8 +974,8 @@
#define BSS_SECTION(sbss_align, bss_align, stop_align) \
. = ALIGN(sbss_align); \
- VMLINUX_SYMBOL(__bss_start) = .; \
+ __bss_start = .; \
SBSS(sbss_align) \
BSS(bss_align) \
. = ALIGN(stop_align); \
- VMLINUX_SYMBOL(__bss_stop) = .;
+ __bss_stop = .;