diff options
Diffstat (limited to 'arch/parisc')
87 files changed, 909 insertions, 920 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index daafeb20f993..fcc5973f7519 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -10,18 +10,20 @@ config PARISC select ARCH_WANT_FRAME_POINTERS select ARCH_HAS_CPU_CACHE_ALIASING select ARCH_HAS_DMA_ALLOC if PA11 + select ARCH_HAS_DMA_OPS select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_MODULE_RWX select ARCH_HAS_UBSAN select ARCH_HAS_PTE_SPECIAL select ARCH_NO_SG_CHAIN + select ARCH_SPLIT_ARG64 if !64BIT select ARCH_SUPPORTS_HUGETLBFS if PA20 select ARCH_SUPPORTS_MEMORY_FAILURE select ARCH_STACKWALK + select ARCH_HAS_CACHE_LINE_SIZE select ARCH_HAS_DEBUG_VM_PGTABLE select HAVE_RELIABLE_STACKTRACE - select DMA_OPS select RTC_CLASS select RTC_DRV_GENERIC select INIT_ALL_POSSIBLE @@ -45,6 +47,7 @@ config PARISC select GENERIC_CPU_DEVICES if !SMP select GENERIC_LIB_DEVMEM_IS_ALLOWED select SYSCTL_ARCH_UNALIGN_ALLOW + select SYSCTL_ARCH_UNALIGN_NO_WARN select SYSCTL_EXCEPTION_TRACE select HAVE_MOD_ARCH_SPECIFIC select MODULES_USE_ELF_RELA @@ -57,8 +60,8 @@ config PARISC select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_HASH - select HAVE_ARCH_JUMP_LABEL - select HAVE_ARCH_JUMP_LABEL_RELATIVE + # select HAVE_ARCH_JUMP_LABEL + # select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_KFENCE select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK @@ -69,7 +72,7 @@ config PARISC select GENERIC_SCHED_CLOCK select GENERIC_IRQ_MIGRATION if SMP select HAVE_UNSTABLE_SCHED_CLOCK if SMP - select LEGACY_TIMER_TICK + select GENERIC_CLOCKEVENTS select CPU_NO_EFFICIENT_FFS select THREAD_INFO_IN_TASK select NEED_DMA_MAP_STATE @@ -85,6 +88,7 @@ config PARISC select HAVE_SOFTIRQ_ON_OWN_STACK if IRQSTACKS select TRACE_IRQFLAGS_SUPPORT select HAVE_FUNCTION_DESCRIPTORS if 64BIT + select PCI_MSI_ARCH_FALLBACKS if PCI_MSI help The PA-RISC microprocessor is designed by Hewlett-Packard and used diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 316f84f1d15c..21b8166a6883 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -119,7 +119,7 @@ export LIBGCC libs-y += arch/parisc/lib/ $(LIBGCC) -drivers-y += arch/parisc/video/ +drivers-$(CONFIG_VIDEO) += arch/parisc/video/ boot := arch/parisc/boot diff --git a/arch/parisc/boot/compressed/Makefile b/arch/parisc/boot/compressed/Makefile index a294a1b58ee7..17c42d718eb3 100644 --- a/arch/parisc/boot/compressed/Makefile +++ b/arch/parisc/boot/compressed/Makefile @@ -5,10 +5,6 @@ # create a compressed self-extracting vmlinux image from the original vmlinux # -KCOV_INSTRUMENT := n -GCOV_PROFILE := n -UBSAN_SANITIZE := n - OBJECTS := head.o real2.o firmware.o misc.o piggy.o targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4 @@ -22,6 +18,7 @@ KBUILD_CFLAGS += -fno-PIE -mno-space-regs -mdisable-fpregs -Os ifndef CONFIG_64BIT KBUILD_CFLAGS += -mfast-indirect-calls endif +KBUILD_CFLAGS += -std=gnu11 LDFLAGS_vmlinux := -X -e startup --as-needed -T $(obj)/vmlinux: $(obj)/vmlinux.lds $(addprefix $(obj)/, $(OBJECTS)) $(LIBGCC) FORCE diff --git a/arch/parisc/boot/compressed/misc.c b/arch/parisc/boot/compressed/misc.c index d389359e22ac..9c83bd06ef15 100644 --- a/arch/parisc/boot/compressed/misc.c +++ b/arch/parisc/boot/compressed/misc.c @@ -6,7 +6,7 @@ #include <linux/uaccess.h> #include <linux/elf.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <asm/page.h> #include "sizes.h" diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig index ee4febb30386..94928d114d4c 100644 --- a/arch/parisc/configs/generic-32bit_defconfig +++ b/arch/parisc/configs/generic-32bit_defconfig @@ -131,12 +131,12 @@ CONFIG_PPDEV=m CONFIG_I2C=y CONFIG_HWMON=m CONFIG_DRM=m -CONFIG_DRM_DP_CEC=y -# CONFIG_DRM_I2C_CH7006 is not set -# CONFIG_DRM_I2C_SIL164 is not set +CONFIG_DRM_DISPLAY_DP_AUX_CEC=y CONFIG_DRM_RADEON=m CONFIG_DRM_NOUVEAU=m # CONFIG_DRM_NOUVEAU_BACKLIGHT is not set +# CONFIG_DRM_NOUVEAU_CH7006 is not set +# CONFIG_DRM_NOUVEAU_SIL164 is not set CONFIG_DRM_VGEM=m CONFIG_DRM_UDL=m CONFIG_DRM_MGAG200=m @@ -251,7 +251,7 @@ CONFIG_CIFS=m CONFIG_CIFS_XATTR=y CONFIG_CIFS_POSIX=y # CONFIG_CIFS_DEBUG is not set -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST6=m @@ -264,8 +264,6 @@ CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_SHA1=y CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_DEFLATE=y -CONFIG_CRC_CCITT=m -CONFIG_CRC_T10DIF=y CONFIG_FONTS=y CONFIG_PRINTK_TIME=y CONFIG_MAGIC_SYSRQ=y diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig index 19a804860ed5..d8cd7f858b2a 100644 --- a/arch/parisc/configs/generic-64bit_defconfig +++ b/arch/parisc/configs/generic-64bit_defconfig @@ -193,11 +193,11 @@ CONFIG_MEDIA_SUPPORT=m CONFIG_AGP=y CONFIG_AGP_PARISC=y CONFIG_DRM=y -# CONFIG_DRM_I2C_CH7006 is not set -# CONFIG_DRM_I2C_SIL164 is not set CONFIG_DRM_RADEON=y CONFIG_DRM_NOUVEAU=m # CONFIG_DRM_NOUVEAU_BACKLIGHT is not set +# CONFIG_DRM_NOUVEAU_CH7006 is not set +# CONFIG_DRM_NOUVEAU_SIL164 is not set CONFIG_DRM_MGAG200=m CONFIG_FB=y CONFIG_FB_PM2=m @@ -268,7 +268,6 @@ CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_TMPFS_XATTR=y CONFIG_CONFIGFS_FS=y -CONFIG_SYSV_FS=y CONFIG_NFS_FS=m CONFIG_NFS_V4=m CONFIG_NFS_V4_1=y @@ -284,7 +283,6 @@ CONFIG_NLS_ASCII=m CONFIG_NLS_ISO8859_1=m CONFIG_NLS_ISO8859_2=m CONFIG_NLS_UTF8=m -CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m @@ -293,8 +291,6 @@ CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_DEFLATE=m # CONFIG_CRYPTO_HW is not set -CONFIG_CRC_CCITT=m -CONFIG_LIBCRC32C=y CONFIG_PRINTK_TIME=y CONFIG_DEBUG_KERNEL=y CONFIG_STRIP_ASM_SYMS=y diff --git a/arch/parisc/include/asm/alternative.h b/arch/parisc/include/asm/alternative.h index 1eb488f25b83..1601ae4b888d 100644 --- a/arch/parisc/include/asm/alternative.h +++ b/arch/parisc/include/asm/alternative.h @@ -13,7 +13,7 @@ #define INSN_PxTLB 0x02 /* modify pdtlb, pitlb */ #define INSN_NOP 0x08000240 /* nop */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/init.h> #include <linux/types.h> @@ -61,6 +61,6 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end, .word (new_instr_ptr - .) ! \ .previous -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_PARISC_ALTERNATIVE_H */ diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h index 000a28e1c5e8..c20261604f09 100644 --- a/arch/parisc/include/asm/assembly.h +++ b/arch/parisc/include/asm/assembly.h @@ -53,7 +53,7 @@ #define SR_TEMP2 2 #define SR_USER 3 -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #ifdef CONFIG_64BIT #define LDREG ldd @@ -582,5 +582,5 @@ .previous -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h index c705decf2bed..519b1903c5ed 100644 --- a/arch/parisc/include/asm/barrier.h +++ b/arch/parisc/include/asm/barrier.h @@ -4,7 +4,7 @@ #include <asm/alternative.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ /* The synchronize caches instruction executes as a nop on systems in which all memory references are performed in order. */ @@ -93,5 +93,5 @@ do { \ }) #include <asm-generic/barrier.h> -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* __ASM_BARRIER_H */ diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h index 2a60d7a72f1f..3f8d3be6ef24 100644 --- a/arch/parisc/include/asm/cache.h +++ b/arch/parisc/include/asm/cache.h @@ -16,11 +16,20 @@ #define L1_CACHE_BYTES 16 #define L1_CACHE_SHIFT 4 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #define SMP_CACHE_BYTES L1_CACHE_BYTES -#define ARCH_DMA_MINALIGN L1_CACHE_BYTES +#ifdef CONFIG_PA20 +#define ARCH_DMA_MINALIGN 128 +#else +#define ARCH_DMA_MINALIGN 32 +#endif +#define ARCH_KMALLOC_MINALIGN 16 /* ldcw requires 16-byte alignment */ + +#define arch_slab_minalign() ((unsigned)dcache_stride) +#define cache_line_size() dcache_stride +#define dma_get_cache_alignment cache_line_size #define __read_mostly __section(".data..read_mostly") @@ -57,7 +66,7 @@ void parisc_setup_cache_timing(void); ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) :::"memory") #define asm_syncdma() asm volatile("syncdma" :::"memory") -#endif /* ! __ASSEMBLY__ */ +#endif /* ! __ASSEMBLER__ */ /* Classes of processor wrt: disabling space register hashing */ diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index ba4c05bc24d6..8394718870e1 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -31,18 +31,17 @@ void flush_cache_all_local(void); void flush_cache_all(void); void flush_cache_mm(struct mm_struct *mm); -void flush_kernel_dcache_page_addr(const void *addr); - #define flush_kernel_dcache_range(start,size) \ flush_kernel_dcache_range_asm((start), (start)+(size)); +/* The only way to flush a vmap range is to flush whole cache */ #define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1 void flush_kernel_vmap_range(void *vaddr, int size); void invalidate_kernel_vmap_range(void *vaddr, int size); -#define flush_cache_vmap(start, end) flush_cache_all() +void flush_cache_vmap(unsigned long start, unsigned long end); #define flush_cache_vmap_early(start, end) do { } while (0) -#define flush_cache_vunmap(start, end) flush_cache_all() +void flush_cache_vunmap(unsigned long start, unsigned long end); void flush_dcache_folio(struct folio *folio); #define flush_dcache_folio flush_dcache_folio @@ -77,17 +76,11 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); -/* defined in pacache.S exported in cache.c used by flush_anon_page */ -void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); - #define ARCH_HAS_FLUSH_ANON_PAGE void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr); #define ARCH_HAS_FLUSH_ON_KUNMAP -static inline void kunmap_flush_on_unmap(const void *addr) -{ - flush_kernel_dcache_page_addr(addr); -} +void kunmap_flush_on_unmap(const void *addr); #endif /* _PARISC_CACHEFLUSH_H */ diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h index c1d776bb16b4..bf0a0f1189eb 100644 --- a/arch/parisc/include/asm/cmpxchg.h +++ b/arch/parisc/include/asm/cmpxchg.h @@ -56,26 +56,24 @@ __arch_xchg(unsigned long x, volatile void *ptr, int size) /* bug catcher for when unsupported size is used - won't link */ extern void __cmpxchg_called_with_bad_pointer(void); -/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */ -extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, - unsigned int new_); -extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_); +/* __cmpxchg_u... defined in arch/parisc/lib/bitops.c */ extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_); +extern u16 __cmpxchg_u16(volatile u16 *ptr, u16 old, u16 new_); +extern u32 __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_); +extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_); /* don't worry...optimizer will get rid of most of this */ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) { - switch (size) { + return #ifdef CONFIG_64BIT - case 8: return __cmpxchg_u64((u64 *)ptr, old, new_); + size == 8 ? __cmpxchg_u64(ptr, old, new_) : #endif - case 4: return __cmpxchg_u32((unsigned int *)ptr, - (unsigned int)old, (unsigned int)new_); - case 1: return __cmpxchg_u8((u8 *)ptr, old & 0xff, new_ & 0xff); - } - __cmpxchg_called_with_bad_pointer(); - return old; + size == 4 ? __cmpxchg_u32(ptr, old, new_) : + size == 2 ? __cmpxchg_u16(ptr, old, new_) : + size == 1 ? __cmpxchg_u8(ptr, old, new_) : + (__cmpxchg_called_with_bad_pointer(), old); } #define arch_cmpxchg(ptr, o, n) \ diff --git a/arch/parisc/include/asm/current.h b/arch/parisc/include/asm/current.h index dc7aea07c3f3..2814529a4c28 100644 --- a/arch/parisc/include/asm/current.h +++ b/arch/parisc/include/asm/current.h @@ -2,7 +2,7 @@ #ifndef _ASM_PARISC_CURRENT_H #define _ASM_PARISC_CURRENT_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ struct task_struct; static __always_inline struct task_struct *get_current(void) @@ -16,6 +16,6 @@ static __always_inline struct task_struct *get_current(void) #define current get_current() -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_PARISC_CURRENT_H */ diff --git a/arch/parisc/include/asm/dwarf.h b/arch/parisc/include/asm/dwarf.h index f4512db86a19..526f4a79262c 100644 --- a/arch/parisc/include/asm/dwarf.h +++ b/arch/parisc/include/asm/dwarf.h @@ -6,7 +6,7 @@ #ifndef _ASM_PARISC_DWARF_H #define _ASM_PARISC_DWARF_H -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define CFI_STARTPROC .cfi_startproc #define CFI_ENDPROC .cfi_endproc @@ -15,6 +15,6 @@ #define CFI_REL_OFFSET .cfi_rel_offset #define CFI_UNDEFINED .cfi_undefined -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_PARISC_DWARF_H */ diff --git a/arch/parisc/include/asm/fb.h b/arch/parisc/include/asm/fb.h deleted file mode 100644 index 658a8a7dc531..000000000000 --- a/arch/parisc/include/asm/fb.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_FB_H_ -#define _ASM_FB_H_ - -struct fb_info; - -#if defined(CONFIG_STI_CORE) -int fb_is_primary_device(struct fb_info *info); -#define fb_is_primary_device fb_is_primary_device -#endif - -#include <asm-generic/fb.h> - -#endif /* _ASM_FB_H_ */ diff --git a/arch/parisc/include/asm/fixmap.h b/arch/parisc/include/asm/fixmap.h index 5cd80ce1163a..9cafa449c4a7 100644 --- a/arch/parisc/include/asm/fixmap.h +++ b/arch/parisc/include/asm/fixmap.h @@ -39,7 +39,7 @@ #define KERNEL_MAP_START (GATEWAY_PAGE_SIZE) #define KERNEL_MAP_END (FIXMAP_START) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ enum fixed_addresses { @@ -59,6 +59,6 @@ extern void *parisc_vmalloc_start; void set_fixmap(enum fixed_addresses idx, phys_addr_t phys); void clear_fixmap(enum fixed_addresses idx); -#endif /*__ASSEMBLY__*/ +#endif /*__ASSEMBLER__*/ #endif /*_ASM_FIXMAP_H*/ diff --git a/arch/parisc/include/asm/ftrace.h b/arch/parisc/include/asm/ftrace.h index f1cc1ee3a647..8b89d2b642eb 100644 --- a/arch/parisc/include/asm/ftrace.h +++ b/arch/parisc/include/asm/ftrace.h @@ -2,7 +2,7 @@ #ifndef _ASM_PARISC_FTRACE_H #define _ASM_PARISC_FTRACE_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ extern void mcount(void); #define MCOUNT_ADDR ((unsigned long)mcount) @@ -29,6 +29,6 @@ unsigned long ftrace_call_adjust(unsigned long addr); #define ftrace_return_address(n) return_address(n) -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_PARISC_FTRACE_H */ diff --git a/arch/parisc/include/asm/hugetlb.h b/arch/parisc/include/asm/hugetlb.h index 72daacc472a0..21e9ace17739 100644 --- a/arch/parisc/include/asm/hugetlb.h +++ b/arch/parisc/include/asm/hugetlb.h @@ -10,22 +10,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, - pte_t *ptep); - -/* - * If the arch doesn't supply something else, assume that hugepage - * size aligned regions are ok without further preparation. - */ -#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE -static inline int prepare_hugepage_range(struct file *file, - unsigned long addr, unsigned long len) -{ - if (len & ~HPAGE_MASK) - return -EINVAL; - if (addr & ~HPAGE_MASK) - return -EINVAL; - return 0; -} + pte_t *ptep, unsigned long sz); #define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h index a63190af2f05..f01ad3ad60b5 100644 --- a/arch/parisc/include/asm/io.h +++ b/arch/parisc/include/asm/io.h @@ -131,17 +131,10 @@ static inline void gsc_writeq(unsigned long long val, unsigned long addr) _PAGE_ACCESSED | _PAGE_NO_CACHE) #define ioremap_wc(addr, size) \ - ioremap_prot((addr), (size), _PAGE_IOREMAP) + ioremap_prot((addr), (size), __pgprot(_PAGE_IOREMAP)) #define pci_iounmap pci_iounmap -void memset_io(volatile void __iomem *addr, unsigned char val, int count); -void memcpy_fromio(void *dst, const volatile void __iomem *src, int count); -void memcpy_toio(volatile void __iomem *dst, const void *src, int count); -#define memset_io memset_io -#define memcpy_fromio memcpy_fromio -#define memcpy_toio memcpy_toio - /* Port-space IO */ #define inb_p inb @@ -231,36 +224,54 @@ extern void outsl (unsigned long port, const void *src, unsigned long count); #define F_EXTEND(x) ((unsigned long)((x) | (0xffffffff00000000ULL))) #ifdef CONFIG_64BIT -#define ioread64 ioread64 -#define ioread64be ioread64be -#define iowrite64 iowrite64 -#define iowrite64be iowrite64be extern u64 ioread64(const void __iomem *addr); extern u64 ioread64be(const void __iomem *addr); +#define ioread64 ioread64 +#define ioread64be ioread64be + extern void iowrite64(u64 val, void __iomem *addr); extern void iowrite64be(u64 val, void __iomem *addr); +#define iowrite64 iowrite64 +#define iowrite64be iowrite64be #endif -#include <asm-generic/iomap.h> -/* - * These get provided from <asm-generic/iomap.h> since parisc does not - * select GENERIC_IOMAP. - */ +extern void __iomem *ioport_map(unsigned long port, unsigned int nr); +extern void ioport_unmap(void __iomem *); #define ioport_map ioport_map #define ioport_unmap ioport_unmap + +extern unsigned int ioread8(const void __iomem *); +extern unsigned int ioread16(const void __iomem *); +extern unsigned int ioread16be(const void __iomem *); +extern unsigned int ioread32(const void __iomem *); +extern unsigned int ioread32be(const void __iomem *); #define ioread8 ioread8 #define ioread16 ioread16 #define ioread32 ioread32 #define ioread16be ioread16be #define ioread32be ioread32be + +extern void iowrite8(u8, void __iomem *); +extern void iowrite16(u16, void __iomem *); +extern void iowrite16be(u16, void __iomem *); +extern void iowrite32(u32, void __iomem *); +extern void iowrite32be(u32, void __iomem *); #define iowrite8 iowrite8 #define iowrite16 iowrite16 #define iowrite32 iowrite32 #define iowrite16be iowrite16be #define iowrite32be iowrite32be + +extern void ioread8_rep(const void __iomem *port, void *buf, unsigned long count); +extern void ioread16_rep(const void __iomem *port, void *buf, unsigned long count); +extern void ioread32_rep(const void __iomem *port, void *buf, unsigned long count); #define ioread8_rep ioread8_rep #define ioread16_rep ioread16_rep #define ioread32_rep ioread32_rep + +extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count); +extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count); +extern void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count); #define iowrite8_rep iowrite8_rep #define iowrite16_rep iowrite16_rep #define iowrite32_rep iowrite32_rep diff --git a/arch/parisc/include/asm/jump_label.h b/arch/parisc/include/asm/jump_label.h index 317ebc5edc9f..f325ae3c622f 100644 --- a/arch/parisc/include/asm/jump_label.h +++ b/arch/parisc/include/asm/jump_label.h @@ -2,7 +2,7 @@ #ifndef _ASM_PARISC_JUMP_LABEL_H #define _ASM_PARISC_JUMP_LABEL_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/types.h> #include <linux/stringify.h> @@ -44,5 +44,5 @@ l_yes: return true; } -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif diff --git a/arch/parisc/include/asm/kexec.h b/arch/parisc/include/asm/kexec.h index 87e174006995..bf31e2d50df9 100644 --- a/arch/parisc/include/asm/kexec.h +++ b/arch/parisc/include/asm/kexec.h @@ -14,7 +14,7 @@ #define KEXEC_ARCH KEXEC_ARCH_PARISC #define ARCH_HAS_KIMAGE_ARCH -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ struct kimage_arch { unsigned long initrd_start; @@ -28,6 +28,6 @@ static inline void crash_setup_regs(struct pt_regs *newregs, /* Dummy implementation for now */ } -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_PARISC_KEXEC_H */ diff --git a/arch/parisc/include/asm/kgdb.h b/arch/parisc/include/asm/kgdb.h index 317cd434bee3..9ece98bc6d9d 100644 --- a/arch/parisc/include/asm/kgdb.h +++ b/arch/parisc/include/asm/kgdb.h @@ -21,7 +21,7 @@ #define CACHE_FLUSH_IS_SAFE 1 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ static inline void arch_kgdb_breakpoint(void) { diff --git a/arch/parisc/include/asm/linkage.h b/arch/parisc/include/asm/linkage.h index cd6fe4febead..d4cad492b971 100644 --- a/arch/parisc/include/asm/linkage.h +++ b/arch/parisc/include/asm/linkage.h @@ -15,7 +15,7 @@ */ #define ASM_NL ! -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define ENTRY(name) \ ALIGN !\ @@ -35,6 +35,6 @@ name: ASM_NL\ .procend ASM_NL\ ENDPROC(name) -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_PARISC_LINKAGE_H */ diff --git a/arch/parisc/include/asm/mman.h b/arch/parisc/include/asm/mman.h new file mode 100644 index 000000000000..663f587dc789 --- /dev/null +++ b/arch/parisc/include/asm/mman.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_MMAN_H__ +#define __ASM_MMAN_H__ + +#include <linux/fs.h> +#include <uapi/asm/mman.h> + +/* PARISC cannot allow mdwe as it needs writable stacks */ +static inline bool arch_memory_deny_write_exec_supported(void) +{ + return false; +} +#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported + +static inline unsigned long arch_calc_vm_flag_bits(struct file *file, unsigned long flags) +{ + /* + * The stack on parisc grows upwards, so if userspace requests memory + * for a stack, mark it with VM_GROWSUP so that the stack expansion in + * the fault handler will work. + */ + if (flags & MAP_STACK) + return VM_GROWSUP; + + return 0; +} +#define arch_calc_vm_flag_bits(file, flags) arch_calc_vm_flag_bits(file, flags) + +#endif /* __ASM_MMAN_H__ */ diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h index ad4e15d12ed1..8f4e51071ea1 100644 --- a/arch/parisc/include/asm/page.h +++ b/arch/parisc/include/asm/page.h @@ -4,12 +4,11 @@ #include <linux/const.h> -#define PAGE_SHIFT CONFIG_PAGE_SHIFT -#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE-1)) +#include <vdso/page.h> +#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/types.h> #include <asm/cache.h> @@ -94,7 +93,7 @@ typedef struct __physmem_range { extern physmem_range_t pmem_ranges[]; extern int npmem_ranges; -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ /* WARNING: The definitions below must match exactly to sizeof(pte_t) * etc @@ -140,7 +139,7 @@ extern int npmem_ranges; #define KERNEL_BINARY_TEXT_START (__PAGE_OFFSET + 0x100000) /* These macros don't work for 64-bit C code -- don't allow in C at all */ -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ # define PA(x) ((x)-__PAGE_OFFSET) # define VA(x) ((x)+__PAGE_OFFSET) #endif @@ -167,7 +166,6 @@ extern int npmem_ranges; #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) -#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #include <asm-generic/memory_model.h> diff --git a/arch/parisc/include/asm/parisc-device.h b/arch/parisc/include/asm/parisc-device.h index 7ddd7f433367..9e74cef4d774 100644 --- a/arch/parisc/include/asm/parisc-device.h +++ b/arch/parisc/include/asm/parisc-device.h @@ -41,7 +41,7 @@ struct parisc_driver { #define to_parisc_device(d) container_of(d, struct parisc_device, dev) -#define to_parisc_driver(d) container_of(d, struct parisc_driver, drv) +#define to_parisc_driver(d) container_of_const(d, struct parisc_driver, drv) #define parisc_parent(d) to_parisc_device(d->dev.parent) static inline const char *parisc_pathname(struct parisc_device *d) diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h index 5d2d9737e579..6080a1516b34 100644 --- a/arch/parisc/include/asm/pdc.h +++ b/arch/parisc/include/asm/pdc.h @@ -4,7 +4,7 @@ #include <uapi/asm/pdc.h> -#if !defined(__ASSEMBLY__) +#if !defined(__ASSEMBLER__) extern int parisc_narrow_firmware; @@ -109,5 +109,5 @@ static inline char * os_id_to_string(u16 os_id) { } } -#endif /* !defined(__ASSEMBLY__) */ +#endif /* !defined(__ASSEMBLER__) */ #endif /* _PARISC_PDC_H */ diff --git a/arch/parisc/include/asm/pdcpat.h b/arch/parisc/include/asm/pdcpat.h index 8f160375b865..84ac81b1adde 100644 --- a/arch/parisc/include/asm/pdcpat.h +++ b/arch/parisc/include/asm/pdcpat.h @@ -210,7 +210,7 @@ #define PDC_PAT_SYSTEM_INFO 76L /* PDC_PAT_SYSTEM_INFO uses the same options as PDC_SYSTEM_INFO function. */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/types.h> #ifdef CONFIG_64BIT @@ -389,6 +389,6 @@ extern int pdc_pat_mem_get_dimm_phys_location( struct pdc_pat_mem_phys_mem_location *pret, unsigned long phys_addr); -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* ! __PARISC_PATPDC_H */ diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h index e3e142b1c5c5..3b84ee93edaa 100644 --- a/arch/parisc/include/asm/pgalloc.h +++ b/arch/parisc/include/asm/pgalloc.h @@ -11,27 +11,12 @@ #include <asm/cache.h> #define __HAVE_ARCH_PMD_ALLOC_ONE -#define __HAVE_ARCH_PMD_FREE -#define __HAVE_ARCH_PGD_FREE #include <asm-generic/pgalloc.h> /* Allocate the top level pgd (page directory) */ static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - pgd_t *pgd; - - pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_TABLE_ORDER); - if (unlikely(pgd == NULL)) - return NULL; - - memset(pgd, 0, PAGE_SIZE << PGD_TABLE_ORDER); - - return pgd; -} - -static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) -{ - free_pages((unsigned long)pgd, PGD_TABLE_ORDER); + return __pgd_alloc(mm, PGD_TABLE_ORDER); } #if CONFIG_PGTABLE_LEVELS == 3 @@ -46,17 +31,19 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) { - pmd_t *pmd; - - pmd = (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_TABLE_ORDER); - if (likely(pmd)) - memset ((void *)pmd, 0, PAGE_SIZE << PMD_TABLE_ORDER); - return pmd; -} + struct ptdesc *ptdesc; + gfp_t gfp = GFP_PGTABLE_USER; -static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) -{ - free_pages((unsigned long)pmd, PMD_TABLE_ORDER); + if (mm == &init_mm) + gfp = GFP_PGTABLE_KERNEL; + ptdesc = pagetable_alloc(gfp, PMD_TABLE_ORDER); + if (!ptdesc) + return NULL; + if (!pagetable_pmd_ctor(mm, ptdesc)) { + pagetable_free(ptdesc); + return NULL; + } + return ptdesc_address(ptdesc); } #endif diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 974accac05cd..1a86a4370b29 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -12,7 +12,7 @@ #include <asm/fixmap.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ /* * we simulate an x86-style page table for the linux mm code */ @@ -73,7 +73,7 @@ extern void __update_cache(pte_t pte); mb(); \ } while(0) -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #define pte_ERROR(e) \ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) @@ -226,7 +226,7 @@ extern void __update_cache(pte_t pte); #define PxD_FLAG_SHIFT (4) #define PxD_VALUE_SHIFT (PFN_PTE_SHIFT-PxD_FLAG_SHIFT) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER) #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE) @@ -338,10 +338,6 @@ static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; re #endif -/* - * Conversion functions: convert a page and protection to a page entry, - * and a page entry and page directory to the page they refer to. - */ #define __mk_pte(addr,pgprot) \ ({ \ pte_t __pte; \ @@ -351,8 +347,6 @@ static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; re __pte; \ }) -#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) - static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) { pte_t pte; @@ -431,7 +425,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -static inline int pte_swp_exclusive(pte_t pte) +static inline bool pte_swp_exclusive(pte_t pte) { return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; } @@ -448,14 +442,17 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte) return pte; } +static inline pte_t ptep_get(pte_t *ptep) +{ + return READ_ONCE(*ptep); +} +#define ptep_get ptep_get + static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { pte_t pte; - if (!pte_young(*ptep)) - return 0; - - pte = *ptep; + pte = ptep_get(ptep); if (!pte_young(pte)) { return 0; } @@ -463,17 +460,10 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned return 1; } -struct mm_struct; -static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - pte_t old_pte; - - old_pte = *ptep; - set_pte(ptep, __pte(0)); - - return old_pte; -} +int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep); +pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep); +struct mm_struct; static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { set_pte(ptep, pte_wrprotect(*ptep)); @@ -481,7 +471,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, #define pte_same(A,B) (pte_val(A) == pte_val(B)) -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ /* TLB page size encoding - see table 3-1 in parisc20.pdf */ @@ -511,7 +501,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR +#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH +#define __HAVE_ARCH_PTEP_CLEAR_FLUSH #define __HAVE_ARCH_PTEP_SET_WRPROTECT #define __HAVE_ARCH_PTE_SAME diff --git a/arch/parisc/include/asm/prefetch.h b/arch/parisc/include/asm/prefetch.h index 6e63f720024d..748eefb27c68 100644 --- a/arch/parisc/include/asm/prefetch.h +++ b/arch/parisc/include/asm/prefetch.h @@ -16,7 +16,7 @@ #ifndef __ASM_PARISC_PREFETCH_H #define __ASM_PARISC_PREFETCH_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #ifdef CONFIG_PREFETCH #define ARCH_HAS_PREFETCH @@ -40,6 +40,6 @@ static inline void prefetchw(const void *addr) #endif /* CONFIG_PA20 */ #endif /* CONFIG_PREFETCH */ -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_PARISC_PROCESSOR_H */ diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index 982aca20f56f..4c14bde39aac 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h @@ -9,7 +9,7 @@ #ifndef __ASM_PARISC_PROCESSOR_H #define __ASM_PARISC_PROCESSOR_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/threads.h> #include <linux/irqreturn.h> @@ -20,7 +20,7 @@ #include <asm/ptrace.h> #include <asm/types.h> #include <asm/percpu.h> -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #define HAVE_ARCH_PICK_MMAP_LAYOUT @@ -45,7 +45,7 @@ #define STACK_TOP TASK_SIZE #define STACK_TOP_MAX DEFAULT_TASK_SIZE -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ struct rlimit; unsigned long mmap_upper_limit(struct rlimit *rlim_stack); @@ -298,7 +298,7 @@ extern unsigned int toc_handler_csum; extern void do_cpu_irq_mask(struct pt_regs *); extern irqreturn_t timer_interrupt(int, void *); extern irqreturn_t ipi_interrupt(int, void *); -extern void start_cpu_itimer(void); +extern void parisc_clockevent_init(void); extern void handle_interruption(int, struct pt_regs *); /* called from assembly code: */ @@ -325,6 +325,6 @@ extern void sba_directed_lmmio(struct parisc_device *, struct resource *); extern void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask); extern void ccio_cujo20_fixup(struct parisc_device *dev, u32 iovp); -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_PARISC_PROCESSOR_H */ diff --git a/arch/parisc/include/asm/psw.h b/arch/parisc/include/asm/psw.h index 46921ffcc407..9140e1ab7e63 100644 --- a/arch/parisc/include/asm/psw.h +++ b/arch/parisc/include/asm/psw.h @@ -60,7 +60,7 @@ #define USER_PSW_MASK (WIDE_PSW | PSW_T | PSW_N | PSW_X | PSW_B | PSW_V | PSW_CB) #define USER_PSW (PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ /* The program status word as bitfields. */ struct pa_psw { @@ -99,6 +99,6 @@ struct pa_psw { #define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW)) #endif -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif diff --git a/arch/parisc/include/asm/signal.h b/arch/parisc/include/asm/signal.h index 715c96ba2ec8..85c3d7409bbc 100644 --- a/arch/parisc/include/asm/signal.h +++ b/arch/parisc/include/asm/signal.h @@ -4,24 +4,12 @@ #include <uapi/asm/signal.h> -#define _NSIG 64 -/* bits-per-word, where word apparently means 'long' not 'int' */ -#define _NSIG_BPW BITS_PER_LONG -#define _NSIG_WORDS (_NSIG / _NSIG_BPW) - -# ifndef __ASSEMBLY__ +# ifndef __ASSEMBLER__ /* Most things should be clean enough to redefine this at will, if care is taken to make libc match. */ -typedef unsigned long old_sigset_t; /* at least 32 bits */ - -typedef struct { - /* next_signal() assumes this is a long - no choice */ - unsigned long sig[_NSIG_WORDS]; -} sigset_t; - #include <asm/sigcontext.h> -#endif /* !__ASSEMBLY */ +#endif /* !__ASSEMBLER__ */ #endif /* _ASM_PARISC_SIGNAL_H */ diff --git a/arch/parisc/include/asm/smp.h b/arch/parisc/include/asm/smp.h index 94d1f21ce99a..0cf1c3a2696a 100644 --- a/arch/parisc/include/asm/smp.h +++ b/arch/parisc/include/asm/smp.h @@ -12,7 +12,7 @@ extern int init_per_cpu(int cpuid); #define PDC_OS_BOOT_RENDEZVOUS 0x10 #define PDC_OS_BOOT_RENDEZVOUS_HI 0x28 -#ifndef ASSEMBLY +#ifndef __ASSEMBLER__ #include <linux/bitops.h> #include <linux/threads.h> /* for NR_CPUS */ #include <linux/cpumask.h> @@ -34,7 +34,7 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); #define raw_smp_processor_id() (current_thread_info()->cpu) -#endif /* !ASSEMBLY */ +#endif /* !__ASSEMBLER__ */ #else /* CONFIG_SMP */ diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h index 7b986b09dba8..8e6889bc23cc 100644 --- a/arch/parisc/include/asm/spinlock_types.h +++ b/arch/parisc/include/asm/spinlock_types.h @@ -6,7 +6,7 @@ #define SPINLOCK_BREAK_INSN 0x0000c006 /* break 6,6 */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ typedef struct { volatile unsigned int lock[4]; @@ -26,7 +26,7 @@ typedef struct { volatile unsigned int counter; } arch_rwlock_t; -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000 #define __ARCH_RW_LOCK_UNLOCKED { .lock_mutex = __ARCH_SPIN_LOCK_UNLOCKED, \ diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h index 00b127a5e09b..c11222798ab2 100644 --- a/arch/parisc/include/asm/syscall.h +++ b/arch/parisc/include/asm/syscall.h @@ -17,6 +17,13 @@ static inline long syscall_get_nr(struct task_struct *tsk, return regs->gr[20]; } +static inline void syscall_set_nr(struct task_struct *tsk, + struct pt_regs *regs, + int nr) +{ + regs->gr[20] = nr; +} + static inline void syscall_get_arguments(struct task_struct *tsk, struct pt_regs *regs, unsigned long *args) @@ -29,6 +36,18 @@ static inline void syscall_get_arguments(struct task_struct *tsk, args[0] = regs->gr[26]; } +static inline void syscall_set_arguments(struct task_struct *tsk, + struct pt_regs *regs, + unsigned long *args) +{ + regs->gr[21] = args[5]; + regs->gr[22] = args[4]; + regs->gr[23] = args[3]; + regs->gr[24] = args[2]; + regs->gr[25] = args[1]; + regs->gr[26] = args[0]; +} + static inline long syscall_get_error(struct task_struct *task, struct pt_regs *regs) { diff --git a/arch/parisc/include/asm/patch.h b/arch/parisc/include/asm/text-patching.h index 400d84c6e504..400d84c6e504 100644 --- a/arch/parisc/include/asm/patch.h +++ b/arch/parisc/include/asm/text-patching.h diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index 1a58795f785c..b283738bb6da 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h @@ -2,7 +2,7 @@ #ifndef _ASM_PARISC_THREAD_INFO_H #define _ASM_PARISC_THREAD_INFO_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/processor.h> #include <asm/special_insns.h> @@ -20,7 +20,7 @@ struct thread_info { .preempt_count = INIT_PREEMPT_COUNT, \ } -#endif /* !__ASSEMBLY */ +#endif /* !__ASSEMBLER__ */ /* thread information allocation */ diff --git a/arch/parisc/include/asm/traps.h b/arch/parisc/include/asm/traps.h index 0ccdb738a9a3..10c8fb68e404 100644 --- a/arch/parisc/include/asm/traps.h +++ b/arch/parisc/include/asm/traps.h @@ -4,7 +4,7 @@ #define PARISC_ITLB_TRAP 6 /* defined by architecture. Do not change. */ -#if !defined(__ASSEMBLY__) +#if !defined(__ASSEMBLER__) struct pt_regs; /* traps.c */ diff --git a/arch/parisc/include/asm/unaligned.h b/arch/parisc/include/asm/unaligned.h deleted file mode 100644 index c0621295100d..000000000000 --- a/arch/parisc/include/asm/unaligned.h +++ /dev/null @@ -1,11 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_PARISC_UNALIGNED_H -#define _ASM_PARISC_UNALIGNED_H - -#include <asm-generic/unaligned.h> - -struct pt_regs; -void handle_unaligned(struct pt_regs *regs); -int check_unaligned(struct pt_regs *regs); - -#endif /* _ASM_PARISC_UNALIGNED_H */ diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h index e38f9a90ac15..3e46c6ea9df6 100644 --- a/arch/parisc/include/asm/unistd.h +++ b/arch/parisc/include/asm/unistd.h @@ -6,7 +6,7 @@ #define __NR_Linux_syscalls __NR_syscalls -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #define SYS_ify(syscall_name) __NR_##syscall_name @@ -20,7 +20,7 @@ * sysdeps/unix/sysv/linux/hppa/sysdep.h */ -#ifdef PIC +#ifndef DONT_USE_PIC /* WARNING: CANNOT BE USED IN A NOP! */ # define K_STW_ASM_PIC " copy %%r19, %%r4\n" # define K_LDW_ASM_PIC " copy %%r4, %%r19\n" @@ -43,7 +43,7 @@ across the syscall. */ #define K_CALL_CLOB_REGS "%r1", "%r2", K_USING_GR4 \ - "%r20", "%r29", "%r31" + "%r20", "%r29", "%r31" #undef K_INLINE_SYSCALL #define K_INLINE_SYSCALL(name, nr, args...) ({ \ @@ -58,7 +58,7 @@ " ldi %1, %%r20\n" \ K_LDW_ASM_PIC \ : "=r" (__res) \ - : "i" (SYS_ify(name)) K_ASM_ARGS_##nr \ + : "i" (name) K_ASM_ARGS_##nr \ : "memory", K_CALL_CLOB_REGS K_CLOB_ARGS_##nr \ ); \ __sys_res = (long)__res; \ @@ -104,42 +104,18 @@ #define K_CLOB_ARGS_1 K_CLOB_ARGS_2, "%r25" #define K_CLOB_ARGS_0 K_CLOB_ARGS_1, "%r26" -#define _syscall0(type,name) \ -type name(void) \ -{ \ - return K_INLINE_SYSCALL(name, 0); \ -} - -#define _syscall1(type,name,type1,arg1) \ -type name(type1 arg1) \ -{ \ - return K_INLINE_SYSCALL(name, 1, arg1); \ -} - -#define _syscall2(type,name,type1,arg1,type2,arg2) \ -type name(type1 arg1, type2 arg2) \ -{ \ - return K_INLINE_SYSCALL(name, 2, arg1, arg2); \ -} - -#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ -type name(type1 arg1, type2 arg2, type3 arg3) \ -{ \ - return K_INLINE_SYSCALL(name, 3, arg1, arg2, arg3); \ -} - -#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ -type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ -{ \ - return K_INLINE_SYSCALL(name, 4, arg1, arg2, arg3, arg4); \ -} - -/* select takes 5 arguments */ -#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \ -type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \ -{ \ - return K_INLINE_SYSCALL(name, 5, arg1, arg2, arg3, arg4, arg5); \ -} +#define syscall0(name) \ + K_INLINE_SYSCALL(name, 0) +#define syscall1(name, arg1) \ + K_INLINE_SYSCALL(name, 1, arg1) +#define syscall2(name, arg1, arg2) \ + K_INLINE_SYSCALL(name, 2, arg1, arg2) +#define syscall3(name, arg1, arg2, arg3) \ + K_INLINE_SYSCALL(name, 3, arg1, arg2, arg3) +#define syscall4(name, arg1, arg2, arg3, arg4) \ + K_INLINE_SYSCALL(name, 4, arg1, arg2, arg3, arg4) +#define syscall5(name, arg1, arg2, arg3, arg4, arg5) \ + K_INLINE_SYSCALL(name, 5, arg1, arg2, arg3, arg4, arg5) #define __ARCH_WANT_NEW_STAT #define __ARCH_WANT_STAT64 @@ -160,7 +136,6 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \ #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_CLONE -#define __ARCH_WANT_SYS_CLONE3 #define __ARCH_WANT_COMPAT_SYS_SENDFILE #define __ARCH_WANT_COMPAT_STAT @@ -169,7 +144,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \ #define __ARCH_WANT_SYS_UTIME #endif -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #undef STR diff --git a/arch/parisc/include/asm/vdso.h b/arch/parisc/include/asm/vdso.h index ef8206193f82..81bc1d42802a 100644 --- a/arch/parisc/include/asm/vdso.h +++ b/arch/parisc/include/asm/vdso.h @@ -2,7 +2,7 @@ #ifndef __PARISC_VDSO_H__ #define __PARISC_VDSO_H__ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #ifdef CONFIG_64BIT #include <generated/vdso64-offsets.h> @@ -12,13 +12,11 @@ #define VDSO64_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso64_offset_##name)) #define VDSO32_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso32_offset_##name)) -extern struct vdso_data *vdso_data; - -#endif /* __ASSEMBLY __ */ +#endif /* __ASSEMBLER__ */ /* Default link addresses for the vDSOs */ #define VDSO_LBASE 0 -#define VDSO_VERSION_STRING LINUX_5.18 +#define VDSO_VERSION_STRING LINUX_6.11 #endif /* __PARISC_VDSO_H__ */ diff --git a/arch/parisc/include/asm/video.h b/arch/parisc/include/asm/video.h new file mode 100644 index 000000000000..c5dff3223194 --- /dev/null +++ b/arch/parisc/include/asm/video.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_VIDEO_H_ +#define _ASM_VIDEO_H_ + +#include <linux/types.h> + +struct device; + +#if defined(CONFIG_STI_CORE) +bool video_is_primary_device(struct device *dev); +#define video_is_primary_device video_is_primary_device +#endif + +#include <asm-generic/video.h> + +#endif /* _ASM_VIDEO_H_ */ diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h index 68c44f99bc93..b6a709506987 100644 --- a/arch/parisc/include/uapi/asm/mman.h +++ b/arch/parisc/include/uapi/asm/mman.h @@ -75,6 +75,9 @@ #define MADV_HWPOISON 100 /* poison a page for testing */ #define MADV_SOFT_OFFLINE 101 /* soft offline page for testing */ +#define MADV_GUARD_INSTALL 102 /* fatal signal on access to range */ +#define MADV_GUARD_REMOVE 103 /* unguard range */ + /* compatibility flags */ #define MAP_FILE 0 diff --git a/arch/parisc/include/uapi/asm/pdc.h b/arch/parisc/include/uapi/asm/pdc.h index fef4f2e96160..65031ddf8372 100644 --- a/arch/parisc/include/uapi/asm/pdc.h +++ b/arch/parisc/include/uapi/asm/pdc.h @@ -361,7 +361,7 @@ /* size of the pdc_result buffer for firmware.c */ #define NUM_PDC_RESULT 32 -#if !defined(__ASSEMBLY__) +#if !defined(__ASSEMBLER__) /* flags for hardware_path */ #define PF_AUTOBOOT 0x80 @@ -741,6 +741,6 @@ struct pdc_firm_test_get_rtn_block { /* PDC_MODEL/PDC_FIRM_TEST_GET */ #define PIRANHA_CPU_ID 0x13 #define MAKO_CPU_ID 0x14 -#endif /* !defined(__ASSEMBLY__) */ +#endif /* !defined(__ASSEMBLER__) */ #endif /* _UAPI_PARISC_PDC_H */ diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h index 8e4895c5ea5d..d99accf37341 100644 --- a/arch/parisc/include/uapi/asm/signal.h +++ b/arch/parisc/include/uapi/asm/signal.h @@ -57,10 +57,20 @@ #include <asm-generic/signal-defs.h> -# ifndef __ASSEMBLY__ +#define _NSIG 64 +#define _NSIG_BPW (sizeof(unsigned long) * 8) +#define _NSIG_WORDS (_NSIG / _NSIG_BPW) + +# ifndef __ASSEMBLER__ # include <linux/types.h> +typedef unsigned long old_sigset_t; /* at least 32 bits */ + +typedef struct { + unsigned long sig[_NSIG_WORDS]; +} sigset_t; + /* Avoid too many header ordering problems. */ struct siginfo; @@ -70,5 +80,5 @@ typedef struct sigaltstack { __kernel_size_t ss_size; } stack_t; -#endif /* !__ASSEMBLY */ +#endif /* !__ASSEMBLER__ */ #endif /* _UAPI_ASM_PARISC_SIGNAL_H */ diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index be264c2b1a11..1f2d5b7a7f5d 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -132,6 +132,18 @@ #define SO_PASSPIDFD 0x404A #define SO_PEERPIDFD 0x404B +#define SCM_TS_OPT_ID 0x404C + +#define SO_RCVPRIORITY 0x404D + +#define SO_DEVMEM_LINEAR 0x404E +#define SCM_DEVMEM_LINEAR SO_DEVMEM_LINEAR +#define SO_DEVMEM_DMABUF 0x404F +#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF +#define SO_DEVMEM_DONTNEED 0x4050 + +#define SO_PASSRIGHTS 0x4051 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/parisc/install.sh b/arch/parisc/install.sh index 933d031c249a..664c2d77f776 100755 --- a/arch/parisc/install.sh +++ b/arch/parisc/install.sh @@ -16,6 +16,8 @@ # $3 - kernel map file # $4 - default install path (blank if root directory) +set -e + if [ "$(basename $2)" = "vmlinuz" ]; then # Compressed install echo "Installing compressed kernel" diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile index 5ab0467be70a..d5055ba33722 100644 --- a/arch/parisc/kernel/Makefile +++ b/arch/parisc/kernel/Makefile @@ -3,7 +3,7 @@ # Makefile for arch/parisc/kernel # -extra-y := vmlinux.lds +always-$(KBUILD_BUILTIN) := vmlinux.lds obj-y := head.o cache.o pacache.o setup.o pdt.o traps.o time.o irq.o \ syscall.o entry.o sys_parisc.o firmware.o \ diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 422f3e1e6d9c..db531e58d70e 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -20,6 +20,7 @@ #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/syscalls.h> +#include <linux/vmalloc.h> #include <asm/pdc.h> #include <asm/cache.h> #include <asm/cacheflush.h> @@ -31,20 +32,31 @@ #include <asm/mmu_context.h> #include <asm/cachectl.h> +#define PTR_PAGE_ALIGN_DOWN(addr) PTR_ALIGN_DOWN(addr, PAGE_SIZE) + +/* + * When nonzero, use _PAGE_ACCESSED bit to try to reduce the number + * of page flushes done flush_cache_page_if_present. There are some + * pros and cons in using this option. It may increase the risk of + * random segmentation faults. + */ +#define CONFIG_FLUSH_PAGE_ACCESSED 0 + int split_tlb __ro_after_init; int dcache_stride __ro_after_init; int icache_stride __ro_after_init; EXPORT_SYMBOL(dcache_stride); +/* Internal implementation in arch/parisc/kernel/pacache.S */ void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); EXPORT_SYMBOL(flush_dcache_page_asm); void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr); - -/* Internal implementation in arch/parisc/kernel/pacache.S */ void flush_data_cache_local(void *); /* flushes local data-cache only */ void flush_instruction_cache_local(void); /* flushes local code-cache only */ +static void flush_kernel_dcache_page_addr(const void *addr); + /* On some machines (i.e., ones with the Merced bus), there can be * only a single PxTLB broadcast at a time; this must be guaranteed * by software. We need a spinlock around all TLB flushes to ensure @@ -321,6 +333,18 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, { if (!static_branch_likely(&parisc_has_cache)) return; + + /* + * The TLB is the engine of coherence on parisc. The CPU is + * entitled to speculate any page with a TLB mapping, so here + * we kill the mapping then flush the page along a special flush + * only alias mapping. This guarantees that the page is no-longer + * in the cache for any process and nor may it be speculatively + * read in (until the user or kernel specifically accesses it, + * of course). + */ + flush_tlb_page(vma, vmaddr); + preempt_disable(); flush_dcache_page_asm(physaddr, vmaddr); if (vma->vm_flags & VM_EXEC) @@ -328,46 +352,44 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, preempt_enable(); } -static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr) +static void flush_kernel_dcache_page_addr(const void *addr) { - unsigned long flags, space, pgd, prot; -#ifdef CONFIG_TLB_PTLOCK - unsigned long pgd_lock; -#endif + unsigned long vaddr = (unsigned long)addr; + unsigned long flags; - vmaddr &= PAGE_MASK; + /* Purge TLB entry to remove translation on all CPUs */ + purge_tlb_start(flags); + pdtlb(SR_KERNEL, addr); + purge_tlb_end(flags); + /* Use tmpalias flush to prevent data cache move-in */ preempt_disable(); + flush_dcache_page_asm(__pa(vaddr), vaddr); + preempt_enable(); +} - /* Set context for flush */ - local_irq_save(flags); - prot = mfctl(8); - space = mfsp(SR_USER); - pgd = mfctl(25); -#ifdef CONFIG_TLB_PTLOCK - pgd_lock = mfctl(28); -#endif - switch_mm_irqs_off(NULL, vma->vm_mm, NULL); - local_irq_restore(flags); - - flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE); - if (vma->vm_flags & VM_EXEC) - flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE); - flush_tlb_page(vma, vmaddr); +static void flush_kernel_icache_page_addr(const void *addr) +{ + unsigned long vaddr = (unsigned long)addr; + unsigned long flags; - /* Restore previous context */ - local_irq_save(flags); -#ifdef CONFIG_TLB_PTLOCK - mtctl(pgd_lock, 28); -#endif - mtctl(pgd, 25); - mtsp(space, SR_USER); - mtctl(prot, 8); - local_irq_restore(flags); + /* Purge TLB entry to remove translation on all CPUs */ + purge_tlb_start(flags); + pdtlb(SR_KERNEL, addr); + purge_tlb_end(flags); + /* Use tmpalias flush to prevent instruction cache move-in */ + preempt_disable(); + flush_icache_page_asm(__pa(vaddr), vaddr); preempt_enable(); } +void kunmap_flush_on_unmap(const void *addr) +{ + flush_kernel_dcache_page_addr(addr); +} +EXPORT_SYMBOL(kunmap_flush_on_unmap); + void flush_icache_pages(struct vm_area_struct *vma, struct page *page, unsigned int nr) { @@ -375,13 +397,16 @@ void flush_icache_pages(struct vm_area_struct *vma, struct page *page, for (;;) { flush_kernel_dcache_page_addr(kaddr); - flush_kernel_icache_page(kaddr); + flush_kernel_icache_page_addr(kaddr); if (--nr == 0) break; kaddr += PAGE_SIZE; } } +/* + * Walk page directory for MM to find PTEP pointer for address ADDR. + */ static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr) { pte_t *ptep = NULL; @@ -410,6 +435,41 @@ static inline bool pte_needs_flush(pte_t pte) == (_PAGE_PRESENT | _PAGE_ACCESSED); } +/* + * Return user physical address. Returns 0 if page is not present. + */ +static inline unsigned long get_upa(struct mm_struct *mm, unsigned long addr) +{ + unsigned long flags, space, pgd, prot, pa; +#ifdef CONFIG_TLB_PTLOCK + unsigned long pgd_lock; +#endif + + /* Save context */ + local_irq_save(flags); + prot = mfctl(8); + space = mfsp(SR_USER); + pgd = mfctl(25); +#ifdef CONFIG_TLB_PTLOCK + pgd_lock = mfctl(28); +#endif + + /* Set context for lpa_user */ + switch_mm_irqs_off(NULL, mm, NULL); + pa = lpa_user(addr); + + /* Restore previous context */ +#ifdef CONFIG_TLB_PTLOCK + mtctl(pgd_lock, 28); +#endif + mtctl(pgd, 25); + mtsp(space, SR_USER); + mtctl(prot, 8); + local_irq_restore(flags); + + return pa; +} + void flush_dcache_folio(struct folio *folio) { struct address_space *mapping = folio_flush_mapping(folio); @@ -458,50 +518,23 @@ void flush_dcache_folio(struct folio *folio) if (addr + nr * PAGE_SIZE > vma->vm_end) nr = (vma->vm_end - addr) / PAGE_SIZE; - if (parisc_requires_coherency()) { - for (i = 0; i < nr; i++) { - pte_t *ptep = get_ptep(vma->vm_mm, - addr + i * PAGE_SIZE); - if (!ptep) - continue; - if (pte_needs_flush(*ptep)) - flush_user_cache_page(vma, - addr + i * PAGE_SIZE); - /* Optimise accesses to the same table? */ - pte_unmap(ptep); - } - } else { + if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1)) + != (addr & (SHM_COLOUR - 1))) { + for (i = 0; i < nr; i++) + __flush_cache_page(vma, + addr + i * PAGE_SIZE, + (pfn + i) * PAGE_SIZE); /* - * The TLB is the engine of coherence on parisc: - * The CPU is entitled to speculate any page - * with a TLB mapping, so here we kill the - * mapping then flush the page along a special - * flush only alias mapping. This guarantees that - * the page is no-longer in the cache for any - * process and nor may it be speculatively read - * in (until the user or kernel specifically - * accesses it, of course) + * Software is allowed to have any number + * of private mappings to a page. */ - for (i = 0; i < nr; i++) - flush_tlb_page(vma, addr + i * PAGE_SIZE); - if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1)) - != (addr & (SHM_COLOUR - 1))) { - for (i = 0; i < nr; i++) - __flush_cache_page(vma, - addr + i * PAGE_SIZE, - (pfn + i) * PAGE_SIZE); - /* - * Software is allowed to have any number - * of private mappings to a page. - */ - if (!(vma->vm_flags & VM_SHARED)) - continue; - if (old_addr) - pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", - old_addr, addr, vma->vm_file); - if (nr == folio_nr_pages(folio)) - old_addr = addr; - } + if (!(vma->vm_flags & VM_SHARED)) + continue; + if (old_addr) + pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", + old_addr, addr, vma->vm_file); + if (nr == folio_nr_pages(folio)) + old_addr = addr; } WARN_ON(++count == 4096); } @@ -578,11 +611,7 @@ void __init parisc_setup_cache_timing(void) threshold/1024); set_tlb_threshold: - if (threshold > FLUSH_TLB_THRESHOLD) - parisc_tlb_flush_threshold = threshold; - else - parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD; - + parisc_tlb_flush_threshold = max(threshold, FLUSH_TLB_THRESHOLD); printk(KERN_INFO "TLB flush threshold set to %lu KiB\n", parisc_tlb_flush_threshold/1024); } @@ -591,35 +620,28 @@ extern void purge_kernel_dcache_page_asm(unsigned long); extern void clear_user_page_asm(void *, unsigned long); extern void copy_user_page_asm(void *, void *, unsigned long); -void flush_kernel_dcache_page_addr(const void *addr) -{ - unsigned long flags; - - flush_kernel_dcache_page_asm(addr); - purge_tlb_start(flags); - pdtlb(SR_KERNEL, addr); - purge_tlb_end(flags); -} -EXPORT_SYMBOL(flush_kernel_dcache_page_addr); - static void flush_cache_page_if_present(struct vm_area_struct *vma, - unsigned long vmaddr, unsigned long pfn) + unsigned long vmaddr) { +#if CONFIG_FLUSH_PAGE_ACCESSED bool needs_flush = false; - pte_t *ptep; + pte_t *ptep, pte; - /* - * The pte check is racy and sometimes the flush will trigger - * a non-access TLB miss. Hopefully, the page has already been - * flushed. - */ ptep = get_ptep(vma->vm_mm, vmaddr); if (ptep) { - needs_flush = pte_needs_flush(*ptep); + pte = ptep_get(ptep); + needs_flush = pte_needs_flush(pte); pte_unmap(ptep); } if (needs_flush) - flush_cache_page(vma, vmaddr, pfn); + __flush_cache_page(vma, vmaddr, PFN_PHYS(pte_pfn(pte))); +#else + struct mm_struct *mm = vma->vm_mm; + unsigned long physaddr = get_upa(mm, vmaddr); + + if (physaddr) + __flush_cache_page(vma, vmaddr, PAGE_ALIGN_DOWN(physaddr)); +#endif } void copy_user_highpage(struct page *to, struct page *from, @@ -629,7 +651,7 @@ void copy_user_highpage(struct page *to, struct page *from, kfrom = kmap_local_page(from); kto = kmap_local_page(to); - flush_cache_page_if_present(vma, vaddr, page_to_pfn(from)); + __flush_cache_page(vma, vaddr, PFN_PHYS(page_to_pfn(from))); copy_page_asm(kto, kfrom); kunmap_local(kto); kunmap_local(kfrom); @@ -638,16 +660,17 @@ void copy_user_highpage(struct page *to, struct page *from, void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long user_vaddr, void *dst, void *src, int len) { - flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page)); + __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page))); memcpy(dst, src, len); - flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); + flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(dst)); } void copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long user_vaddr, void *dst, void *src, int len) { - flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page)); + __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page))); memcpy(dst, src, len); + flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(src)); } /* __flush_tlb_range() @@ -681,32 +704,10 @@ int __flush_tlb_range(unsigned long sid, unsigned long start, static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - unsigned long addr, pfn; - pte_t *ptep; - - for (addr = start; addr < end; addr += PAGE_SIZE) { - bool needs_flush = false; - /* - * The vma can contain pages that aren't present. Although - * the pte search is expensive, we need the pte to find the - * page pfn and to check whether the page should be flushed. - */ - ptep = get_ptep(vma->vm_mm, addr); - if (ptep) { - needs_flush = pte_needs_flush(*ptep); - pfn = pte_pfn(*ptep); - pte_unmap(ptep); - } - if (needs_flush) { - if (parisc_requires_coherency()) { - flush_user_cache_page(vma, addr); - } else { - if (WARN_ON(!pfn_valid(pfn))) - return; - __flush_cache_page(vma, addr, PFN_PHYS(pfn)); - } - } - } + unsigned long addr; + + for (addr = start; addr < end; addr += PAGE_SIZE) + flush_cache_page_if_present(vma, addr); } static inline unsigned long mm_total_size(struct mm_struct *mm) @@ -757,21 +758,19 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) return; flush_tlb_range(vma, start, end); - flush_cache_all(); + if (vma->vm_flags & VM_EXEC) + flush_cache_all(); + else + flush_data_cache(); return; } - flush_cache_pages(vma, start, end); + flush_cache_pages(vma, start & PAGE_MASK, end); } void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) { - if (WARN_ON(!pfn_valid(pfn))) - return; - if (parisc_requires_coherency()) - flush_user_cache_page(vma, vmaddr); - else - __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); + __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); } void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) @@ -779,34 +778,133 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned lon if (!PageAnon(page)) return; - if (parisc_requires_coherency()) { - if (vma->vm_flags & VM_SHARED) - flush_data_cache(); - else - flush_user_cache_page(vma, vmaddr); + __flush_cache_page(vma, vmaddr, PFN_PHYS(page_to_pfn(page))); +} + +int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep) +{ + pte_t pte = ptep_get(ptep); + + if (!pte_young(pte)) + return 0; + set_pte(ptep, pte_mkold(pte)); +#if CONFIG_FLUSH_PAGE_ACCESSED + __flush_cache_page(vma, addr, PFN_PHYS(pte_pfn(pte))); +#endif + return 1; +} + +/* + * After a PTE is cleared, we have no way to flush the cache for + * the physical page. On PA8800 and PA8900 processors, these lines + * can cause random cache corruption. Thus, we must flush the cache + * as well as the TLB when clearing a PTE that's valid. + */ +pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep) +{ + struct mm_struct *mm = (vma)->vm_mm; + pte_t pte = ptep_get_and_clear(mm, addr, ptep); + unsigned long pfn = pte_pfn(pte); + + if (pfn_valid(pfn)) + __flush_cache_page(vma, addr, PFN_PHYS(pfn)); + else if (pte_accessible(mm, pte)) + flush_tlb_page(vma, addr); + + return pte; +} + +/* + * The physical address for pages in the ioremap case can be obtained + * from the vm_struct struct. I wasn't able to successfully handle the + * vmalloc and vmap cases. We have an array of struct page pointers in + * the uninitialized vmalloc case but the flush failed using page_to_pfn. + */ +void flush_cache_vmap(unsigned long start, unsigned long end) +{ + unsigned long addr, physaddr; + struct vm_struct *vm; + + /* Prevent cache move-in */ + flush_tlb_kernel_range(start, end); + + if (end - start >= parisc_cache_flush_threshold) { + flush_cache_all(); return; } - flush_tlb_page(vma, vmaddr); - preempt_disable(); - flush_dcache_page_asm(page_to_phys(page), vmaddr); - preempt_enable(); + if (WARN_ON_ONCE(!is_vmalloc_addr((void *)start))) { + flush_cache_all(); + return; + } + + vm = find_vm_area((void *)start); + if (WARN_ON_ONCE(!vm)) { + flush_cache_all(); + return; + } + + /* The physical addresses of IOREMAP regions are contiguous */ + if (vm->flags & VM_IOREMAP) { + physaddr = vm->phys_addr; + for (addr = start; addr < end; addr += PAGE_SIZE) { + preempt_disable(); + flush_dcache_page_asm(physaddr, start); + flush_icache_page_asm(physaddr, start); + preempt_enable(); + physaddr += PAGE_SIZE; + } + return; + } + + flush_cache_all(); } +EXPORT_SYMBOL(flush_cache_vmap); +/* + * The vm_struct has been retired and the page table is set up. The + * last page in the range is a guard page. Its physical address can't + * be determined using lpa, so there is no way to flush the range + * using flush_dcache_page_asm. + */ +void flush_cache_vunmap(unsigned long start, unsigned long end) +{ + /* Prevent cache move-in */ + flush_tlb_kernel_range(start, end); + flush_data_cache(); +} +EXPORT_SYMBOL(flush_cache_vunmap); + +/* + * On systems with PA8800/PA8900 processors, there is no way to flush + * a vmap range other than using the architected loop to flush the + * entire cache. The page directory is not set up, so we can't use + * fdc, etc. FDCE/FICE don't work to flush a portion of the cache. + * L2 is physically indexed but FDCE/FICE instructions in virtual + * mode output their virtual address on the core bus, not their + * real address. As a result, the L2 cache index formed from the + * virtual address will most likely not be the same as the L2 index + * formed from the real address. + */ void flush_kernel_vmap_range(void *vaddr, int size) { unsigned long start = (unsigned long)vaddr; unsigned long end = start + size; - if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && - (unsigned long)size >= parisc_cache_flush_threshold) { - flush_tlb_kernel_range(start, end); - flush_data_cache(); + flush_tlb_kernel_range(start, end); + + if (!static_branch_likely(&parisc_has_dcache)) + return; + + /* If interrupts are disabled, we can only do local flush */ + if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) { + flush_data_cache_local(NULL); return; } - flush_kernel_dcache_range_asm(start, end); - flush_tlb_kernel_range(start, end); + flush_data_cache(); } EXPORT_SYMBOL(flush_kernel_vmap_range); @@ -818,15 +916,18 @@ void invalidate_kernel_vmap_range(void *vaddr, int size) /* Ensure DMA is complete */ asm_syncdma(); - if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && - (unsigned long)size >= parisc_cache_flush_threshold) { - flush_tlb_kernel_range(start, end); - flush_data_cache(); + flush_tlb_kernel_range(start, end); + + if (!static_branch_likely(&parisc_has_dcache)) + return; + + /* If interrupts are disabled, we can only do local flush */ + if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) { + flush_data_cache_local(NULL); return; } - purge_kernel_dcache_range_asm(start, end); - flush_tlb_kernel_range(start, end); + flush_data_cache(); } EXPORT_SYMBOL(invalidate_kernel_vmap_range); diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index ac19d685e4a5..1e793f770f71 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c @@ -97,7 +97,7 @@ static int for_each_padev(int (*fn)(struct device *, void *), void * data) * @driver: the PA-RISC driver to try * @dev: the PA-RISC device to try */ -static int match_device(struct parisc_driver *driver, struct parisc_device *dev) +static int match_device(const struct parisc_driver *driver, struct parisc_device *dev) { const struct parisc_device_id *ids; @@ -548,7 +548,7 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path) return dev; } -static int parisc_generic_match(struct device *dev, struct device_driver *drv) +static int parisc_generic_match(struct device *dev, const struct device_driver *drv) { return match_device(to_parisc_driver(drv), to_parisc_device(dev)); } diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index ab23e61a6f01..ea57bcc21dc5 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -1051,8 +1051,7 @@ ENTRY_CFI(intr_save) /* for os_hpmc */ STREG %r16, PT_ISR(%r29) STREG %r17, PT_IOR(%r29) -#if 0 && defined(CONFIG_64BIT) - /* Revisit when we have 64-bit code above 4Gb */ +#if defined(CONFIG_64BIT) b,n intr_save2 skip_save_ior: @@ -1060,8 +1059,7 @@ skip_save_ior: * need to adjust iasq/iaoq here in the same way we adjusted isr/ior * above. */ - extrd,u,* %r8,PSW_W_BIT,1,%r1 - cmpib,COND(=),n 1,%r1,intr_save2 + bb,COND(>=),n %r8,PSW_W_BIT,intr_save2 LDREG PT_IASQ0(%r29), %r16 LDREG PT_IAOQ0(%r29), %r17 /* adjust iasq/iaoq */ diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c index 621a4b386ae4..10fd5b3e63e7 100644 --- a/arch/parisc/kernel/ftrace.c +++ b/arch/parisc/kernel/ftrace.c @@ -20,7 +20,7 @@ #include <asm/assembly.h> #include <asm/sections.h> #include <asm/ftrace.h> -#include <asm/patch.h> +#include <asm/text-patching.h> #define __hot __section(".text.hot") @@ -87,7 +87,7 @@ int ftrace_enable_ftrace_graph_caller(void) int ftrace_disable_ftrace_graph_caller(void) { - static_key_enable(&ftrace_graph_enable.key); + static_key_disable(&ftrace_graph_enable.key); return 0; } #endif @@ -206,6 +206,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, struct kprobe *p; int bit; + if (unlikely(kprobe_ftrace_disabled)) + return; + bit = ftrace_test_recursion_trylock(ip, parent_ip); if (bit < 0) return; diff --git a/arch/parisc/kernel/jump_label.c b/arch/parisc/kernel/jump_label.c index e253b134500d..ea51f15bf0e6 100644 --- a/arch/parisc/kernel/jump_label.c +++ b/arch/parisc/kernel/jump_label.c @@ -8,7 +8,7 @@ #include <linux/jump_label.h> #include <linux/bug.h> #include <asm/alternative.h> -#include <asm/patch.h> +#include <asm/text-patching.h> static inline int reassemble_17(int as17) { diff --git a/arch/parisc/kernel/kgdb.c b/arch/parisc/kernel/kgdb.c index b16fa9bac5f4..fee81f877525 100644 --- a/arch/parisc/kernel/kgdb.c +++ b/arch/parisc/kernel/kgdb.c @@ -16,7 +16,7 @@ #include <asm/ptrace.h> #include <asm/traps.h> #include <asm/processor.h> -#include <asm/patch.h> +#include <asm/text-patching.h> #include <asm/cacheflush.h> const struct kgdb_arch arch_kgdb_ops = { diff --git a/arch/parisc/kernel/kprobes.c b/arch/parisc/kernel/kprobes.c index 6e0b86652f30..9255adba67a3 100644 --- a/arch/parisc/kernel/kprobes.c +++ b/arch/parisc/kernel/kprobes.c @@ -12,7 +12,7 @@ #include <linux/kprobes.h> #include <linux/slab.h> #include <asm/cacheflush.h> -#include <asm/patch.h> +#include <asm/text-patching.h> DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index d214bbe3c2af..4e5d991b2b65 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c @@ -41,7 +41,6 @@ #include <linux/moduleloader.h> #include <linux/elf.h> -#include <linux/vmalloc.h> #include <linux/fs.h> #include <linux/ftrace.h> #include <linux/string.h> @@ -173,17 +172,6 @@ static inline int reassemble_22(int as22) ((as22 & 0x0003ff) << 3)); } -void *module_alloc(unsigned long size) -{ - /* using RWX means less protection for modules, but it's - * easier than trying to map the text, data, init_text and - * init_data correctly */ - return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, - GFP_KERNEL, - PAGE_KERNEL_RWX, 0, NUMA_NO_NODE, - __builtin_return_address(0)); -} - #ifndef CONFIG_64BIT static inline unsigned long count_gots(const Elf_Rela *rela, unsigned long n) { diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c index 6f0c92e8149d..509146a52725 100644 --- a/arch/parisc/kernel/parisc_ksyms.c +++ b/arch/parisc/kernel/parisc_ksyms.c @@ -22,6 +22,8 @@ EXPORT_SYMBOL(memset); #include <linux/atomic.h> EXPORT_SYMBOL(__xchg8); EXPORT_SYMBOL(__xchg32); +EXPORT_SYMBOL(__cmpxchg_u8); +EXPORT_SYMBOL(__cmpxchg_u16); EXPORT_SYMBOL(__cmpxchg_u32); EXPORT_SYMBOL(__cmpxchg_u64); #ifdef CONFIG_SMP @@ -41,9 +43,6 @@ EXPORT_SYMBOL($global$); #endif #include <asm/io.h> -EXPORT_SYMBOL(memcpy_toio); -EXPORT_SYMBOL(memcpy_fromio); -EXPORT_SYMBOL(memset_io); extern void $$divI(void); extern void $$divU(void); diff --git a/arch/parisc/kernel/patch.c b/arch/parisc/kernel/patch.c index e59574f65e64..35dd764b871e 100644 --- a/arch/parisc/kernel/patch.c +++ b/arch/parisc/kernel/patch.c @@ -13,7 +13,7 @@ #include <asm/cacheflush.h> #include <asm/fixmap.h> -#include <asm/patch.h> +#include <asm/text-patching.h> struct patch { void *addr; diff --git a/arch/parisc/kernel/pdt.c b/arch/parisc/kernel/pdt.c index 0f9b3b5914cf..b70b67adb855 100644 --- a/arch/parisc/kernel/pdt.c +++ b/arch/parisc/kernel/pdt.c @@ -63,6 +63,7 @@ static unsigned long pdt_entry[MAX_PDT_ENTRIES] __page_aligned_bss; #define PDT_ADDR_PERM_ERR (pdt_type != PDT_PDC ? 2UL : 0UL) #define PDT_ADDR_SINGLE_ERR 1UL +#ifdef CONFIG_PROC_FS /* report PDT entries via /proc/meminfo */ void arch_report_meminfo(struct seq_file *m) { @@ -74,6 +75,7 @@ void arch_report_meminfo(struct seq_file *m) seq_printf(m, "PDT_cur_entries: %7lu\n", pdt_status.pdt_entries); } +#endif static int get_info_pat_new(void) { diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c index b0f0816879df..5e10f98ce7b5 100644 --- a/arch/parisc/kernel/perf.c +++ b/arch/parisc/kernel/perf.c @@ -466,7 +466,6 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) } static const struct file_operations perf_fops = { - .llseek = no_llseek, .read = perf_read, .write = perf_write, .unlocked_ioctl = perf_ioctl, @@ -476,9 +475,9 @@ static const struct file_operations perf_fops = { }; static struct miscdevice perf_dev = { - MISC_DYNAMIC_MINOR, - PA_PERF_DEV, - &perf_fops + .minor = MISC_DYNAMIC_MINOR, + .name = PA_PERF_DEV, + .fops = &perf_fops, }; /* diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 444154271f23..b2d12ab728b1 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -297,7 +297,7 @@ smp_cpu_init(int cpunum) enter_lazy_tlb(&init_mm, current); init_IRQ(); /* make sure no IRQs are enabled or pending */ - start_cpu_itimer(); + parisc_clockevent_init(); } @@ -344,7 +344,7 @@ static int smp_boot_one_cpu(int cpuid, struct task_struct *idle) struct irq_desc *desc = irq_to_desc(i); if (desc && desc->kstat_irqs) - *per_cpu_ptr(desc->kstat_irqs, cpuid) = 0; + *per_cpu_ptr(desc->kstat_irqs, cpuid) = (struct irqstat) { }; } #endif diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index 98af719d5f85..f852fe274abe 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -104,7 +104,9 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, struct vm_area_struct *vma, *prev; unsigned long filp_pgoff; int do_color_align; - struct vm_unmapped_area_info info; + struct vm_unmapped_area_info info = { + .length = len + }; if (unlikely(len > TASK_SIZE)) return -ENOMEM; @@ -139,7 +141,6 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, return addr; } - info.length = len; info.align_mask = do_color_align ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0; info.align_offset = shared_align_offset(filp_pgoff, pgoff); @@ -160,14 +161,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, */ } - info.flags = 0; info.low_limit = mm->mmap_base; info.high_limit = mmap_upper_limit(NULL); return vm_unmapped_area(&info); } unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags) + unsigned long len, unsigned long pgoff, unsigned long flags, + vm_flags_t vm_flags) { return arch_get_unmapped_area_common(filp, addr, len, pgoff, flags, UP); @@ -175,7 +176,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, - unsigned long flags) + unsigned long flags, vm_flags_t vm_flags) { return arch_get_unmapped_area_common(filp, addr, len, pgoff, flags, DOWN); diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c index 2a12a547b447..826c8e51b585 100644 --- a/arch/parisc/kernel/sys_parisc32.c +++ b/arch/parisc/kernel/sys_parisc32.c @@ -23,12 +23,3 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23, current->comm, current->pid, r20); return -ENOSYS; } - -asmlinkage long sys32_fanotify_mark(compat_int_t fanotify_fd, compat_uint_t flags, - compat_uint_t mask0, compat_uint_t mask1, compat_int_t dfd, - const char __user * pathname) -{ - return sys_fanotify_mark(fanotify_fd, flags, - ((__u64)mask1 << 32) | mask0, - dfd, pathname); -} diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 1f51aa9c8230..0fa81bf1466b 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -243,10 +243,10 @@ linux_gateway_entry: #ifdef CONFIG_64BIT ldil L%sys_call_table, %r1 - or,= %r2,%r2,%r2 - addil L%(sys_call_table64-sys_call_table), %r1 + or,ev %r2,%r2,%r2 + ldil L%sys_call_table64, %r1 ldo R%sys_call_table(%r1), %r19 - or,= %r2,%r2,%r2 + or,ev %r2,%r2,%r2 ldo R%sys_call_table64(%r1), %r19 #else load32 sys_call_table, %r19 @@ -379,10 +379,10 @@ tracesys_next: extrd,u %r19,63,1,%r2 /* W hidden in bottom bit */ ldil L%sys_call_table, %r1 - or,= %r2,%r2,%r2 - addil L%(sys_call_table64-sys_call_table), %r1 + or,ev %r2,%r2,%r2 + ldil L%sys_call_table64, %r1 ldo R%sys_call_table(%r1), %r19 - or,= %r2,%r2,%r2 + or,ev %r2,%r2,%r2 ldo R%sys_call_table64(%r1), %r19 #else load32 sys_call_table, %r19 @@ -1327,6 +1327,8 @@ ENTRY(sys_call_table) END(sys_call_table) #ifdef CONFIG_64BIT +#undef __SYSCALL_WITH_COMPAT +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) .align 8 ENTRY(sys_call_table64) #include <asm/syscall_table_64.h> /* 64-bit syscalls */ diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl index b236a84c4e12..94df3cb957e9 100644 --- a/arch/parisc/kernel/syscalls/syscall.tbl +++ b/arch/parisc/kernel/syscalls/syscall.tbl @@ -108,7 +108,7 @@ 95 common fchown sys_fchown 96 common getpriority sys_getpriority 97 common setpriority sys_setpriority -98 common recv sys_recv +98 common recv sys_recv compat_sys_recv 99 common statfs sys_statfs compat_sys_statfs 100 common fstatfs sys_fstatfs compat_sys_fstatfs 101 common stat64 sys_stat64 @@ -135,7 +135,7 @@ 120 common clone sys_clone_wrapper 121 common setdomainname sys_setdomainname 122 common sendfile sys_sendfile compat_sys_sendfile -123 common recvfrom sys_recvfrom +123 common recvfrom sys_recvfrom compat_sys_recvfrom 124 32 adjtimex sys_adjtimex_time32 124 64 adjtimex sys_adjtimex 125 common mprotect sys_mprotect @@ -364,7 +364,7 @@ 320 common accept4 sys_accept4 321 common prlimit64 sys_prlimit64 322 common fanotify_init sys_fanotify_init -323 common fanotify_mark sys_fanotify_mark sys32_fanotify_mark +323 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark 324 32 clock_adjtime sys_clock_adjtime32 324 64 clock_adjtime sys_clock_adjtime 325 common name_to_handle_at sys_name_to_handle_at @@ -460,3 +460,9 @@ 459 common lsm_get_self_attr sys_lsm_get_self_attr 460 common lsm_set_self_attr sys_lsm_set_self_attr 461 common lsm_list_modules sys_lsm_list_modules +462 common mseal sys_mseal +463 common setxattrat sys_setxattrat +464 common getxattrat sys_getxattrat +465 common listxattrat sys_listxattrat +466 common removexattrat sys_removexattrat +467 common open_tree_attr sys_open_tree_attr diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 9714fbd7c42d..c17e2249115f 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -1,166 +1,119 @@ // SPDX-License-Identifier: GPL-2.0 /* - * linux/arch/parisc/kernel/time.c + * Common time service routines for parisc machines. + * based on arch/loongarch/kernel/time.c * - * Copyright (C) 1991, 1992, 1995 Linus Torvalds - * Modifications for ARM (C) 1994, 1995, 1996,1997 Russell King - * Copyright (C) 1999 SuSE GmbH, (Philipp Rumpf, prumpf@tux.org) - * - * 1994-07-02 Alan Modra - * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime - * 1998-12-20 Updated NTP code according to technical memorandum Jan '96 - * "A Kernel Model for Precision Timekeeping" by Dave Mills + * Copyright (C) 2024 Helge Deller <deller@gmx.de> */ -#include <linux/errno.h> -#include <linux/module.h> -#include <linux/rtc.h> -#include <linux/sched.h> -#include <linux/sched/clock.h> -#include <linux/sched_clock.h> -#include <linux/kernel.h> -#include <linux/param.h> -#include <linux/string.h> -#include <linux/mm.h> -#include <linux/interrupt.h> -#include <linux/time.h> +#include <linux/clockchips.h> +#include <linux/delay.h> +#include <linux/export.h> #include <linux/init.h> -#include <linux/smp.h> -#include <linux/profile.h> -#include <linux/clocksource.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/sched_clock.h> +#include <linux/spinlock.h> +#include <linux/rtc.h> #include <linux/platform_device.h> -#include <linux/ftrace.h> +#include <asm/processor.h> -#include <linux/uaccess.h> -#include <asm/io.h> -#include <asm/irq.h> -#include <asm/page.h> -#include <asm/param.h> -#include <asm/pdc.h> -#include <asm/led.h> +static u64 cr16_clock_freq; +static unsigned long clocktick; -#include <linux/timex.h> +int time_keeper_id; /* CPU used for timekeeping */ -int time_keeper_id __read_mostly; /* CPU used for timekeeping. */ +static DEFINE_PER_CPU(struct clock_event_device, parisc_clockevent_device); -static unsigned long clocktick __ro_after_init; /* timer cycles per tick */ +static void parisc_event_handler(struct clock_event_device *dev) +{ +} -/* - * We keep time on PA-RISC Linux by using the Interval Timer which is - * a pair of registers; one is read-only and one is write-only; both - * accessed through CR16. The read-only register is 32 or 64 bits wide, - * and increments by 1 every CPU clock tick. The architecture only - * guarantees us a rate between 0.5 and 2, but all implementations use a - * rate of 1. The write-only register is 32-bits wide. When the lowest - * 32 bits of the read-only register compare equal to the write-only - * register, it raises a maskable external interrupt. Each processor has - * an Interval Timer of its own and they are not synchronised. - * - * We want to generate an interrupt every 1/HZ seconds. So we program - * CR16 to interrupt every @clocktick cycles. The it_value in cpu_data - * is programmed with the intended time of the next tick. We can be - * held off for an arbitrarily long period of time by interrupts being - * disabled, so we may miss one or more ticks. - */ -irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) +static int parisc_timer_next_event(unsigned long delta, struct clock_event_device *evt) { - unsigned long now; - unsigned long next_tick; - unsigned long ticks_elapsed = 0; - unsigned int cpu = smp_processor_id(); - struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); - - /* gcc can optimize for "read-only" case with a local clocktick */ - unsigned long cpt = clocktick; - - /* Initialize next_tick to the old expected tick time. */ - next_tick = cpuinfo->it_value; - - /* Calculate how many ticks have elapsed. */ - now = mfctl(16); - do { - ++ticks_elapsed; - next_tick += cpt; - } while (next_tick - now > cpt); - - /* Store (in CR16 cycles) up to when we are accounting right now. */ - cpuinfo->it_value = next_tick; - - /* Go do system house keeping. */ - if (IS_ENABLED(CONFIG_SMP) && (cpu != time_keeper_id)) - ticks_elapsed = 0; - legacy_timer_tick(ticks_elapsed); - - /* Skip clockticks on purpose if we know we would miss those. - * The new CR16 must be "later" than current CR16 otherwise - * itimer would not fire until CR16 wrapped - e.g 4 seconds - * later on a 1Ghz processor. We'll account for the missed - * ticks on the next timer interrupt. - * We want IT to fire modulo clocktick even if we miss/skip some. - * But those interrupts don't in fact get delivered that regularly. - * - * "next_tick - now" will always give the difference regardless - * if one or the other wrapped. If "now" is "bigger" we'll end up - * with a very large unsigned number. - */ - now = mfctl(16); - while (next_tick - now > cpt) - next_tick += cpt; - - /* Program the IT when to deliver the next interrupt. - * Only bottom 32-bits of next_tick are writable in CR16! - * Timer interrupt will be delivered at least a few hundred cycles - * after the IT fires, so if we are too close (<= 8000 cycles) to the - * next cycle, simply skip it. - */ - if (next_tick - now <= 8000) - next_tick += cpt; - mtctl(next_tick, 16); + unsigned long new_cr16; - return IRQ_HANDLED; -} + new_cr16 = mfctl(16) + delta; + mtctl(new_cr16, 16); + return 0; +} -unsigned long profile_pc(struct pt_regs *regs) +irqreturn_t timer_interrupt(int irq, void *data) { - unsigned long pc = instruction_pointer(regs); + struct clock_event_device *cd; + int cpu = smp_processor_id(); - if (regs->gr[0] & PSW_N) - pc -= 4; + cd = &per_cpu(parisc_clockevent_device, cpu); -#ifdef CONFIG_SMP - if (in_lock_functions(pc)) - pc = regs->gr[2]; -#endif + if (clockevent_state_periodic(cd)) + parisc_timer_next_event(clocktick, cd); - return pc; + if (clockevent_state_periodic(cd) || clockevent_state_oneshot(cd)) + cd->event_handler(cd); + + return IRQ_HANDLED; } -EXPORT_SYMBOL(profile_pc); +static int parisc_set_state_oneshot(struct clock_event_device *evt) +{ + parisc_timer_next_event(clocktick, evt); -/* clock source code */ + return 0; +} -static u64 notrace read_cr16(struct clocksource *cs) +static int parisc_set_state_periodic(struct clock_event_device *evt) { - return get_cycles(); + parisc_timer_next_event(clocktick, evt); + + return 0; } -static struct clocksource clocksource_cr16 = { - .name = "cr16", - .rating = 300, - .read = read_cr16, - .mask = CLOCKSOURCE_MASK(BITS_PER_LONG), - .flags = CLOCK_SOURCE_IS_CONTINUOUS, -}; +static int parisc_set_state_shutdown(struct clock_event_device *evt) +{ + return 0; +} -void start_cpu_itimer(void) +void parisc_clockevent_init(void) { unsigned int cpu = smp_processor_id(); - unsigned long next_tick = mfctl(16) + clocktick; + unsigned long min_delta = 0x600; /* XXX */ + unsigned long max_delta = (1UL << (BITS_PER_LONG - 1)); + struct clock_event_device *cd; + + cd = &per_cpu(parisc_clockevent_device, cpu); + + cd->name = "cr16_clockevent"; + cd->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_PERCPU; + + cd->irq = TIMER_IRQ; + cd->rating = 320; + cd->cpumask = cpumask_of(cpu); + cd->set_state_oneshot = parisc_set_state_oneshot; + cd->set_state_oneshot_stopped = parisc_set_state_shutdown; + cd->set_state_periodic = parisc_set_state_periodic; + cd->set_state_shutdown = parisc_set_state_shutdown; + cd->set_next_event = parisc_timer_next_event; + cd->event_handler = parisc_event_handler; + + clockevents_config_and_register(cd, cr16_clock_freq, min_delta, max_delta); +} + +unsigned long notrace profile_pc(struct pt_regs *regs) +{ + unsigned long pc = instruction_pointer(regs); - mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */ + if (regs->gr[0] & PSW_N) + pc -= 4; + +#ifdef CONFIG_SMP + if (in_lock_functions(pc)) + pc = regs->gr[2]; +#endif - per_cpu(cpu_data, cpu).it_value = next_tick; + return pc; } +EXPORT_SYMBOL(profile_pc); #if IS_ENABLED(CONFIG_RTC_DRV_GENERIC) static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm) @@ -224,12 +177,27 @@ void read_persistent_clock64(struct timespec64 *ts) } } - static u64 notrace read_cr16_sched_clock(void) { return get_cycles(); } +static u64 notrace read_cr16(struct clocksource *cs) +{ + return get_cycles(); +} + +static struct clocksource clocksource_cr16 = { + .name = "cr16", + .rating = 300, + .read = read_cr16, + .mask = CLOCKSOURCE_MASK(BITS_PER_LONG), + .flags = CLOCK_SOURCE_IS_CONTINUOUS | + CLOCK_SOURCE_VALID_FOR_HRES | + CLOCK_SOURCE_MUST_VERIFY | + CLOCK_SOURCE_VERIFY_PERCPU, +}; + /* * timer interrupt and sched_clock() initialization @@ -237,33 +205,14 @@ static u64 notrace read_cr16_sched_clock(void) void __init time_init(void) { - unsigned long cr16_hz; - - clocktick = (100 * PAGE0->mem_10msec) / HZ; - start_cpu_itimer(); /* get CPU 0 started */ - - cr16_hz = 100 * PAGE0->mem_10msec; /* Hz */ + cr16_clock_freq = 100 * PAGE0->mem_10msec; /* Hz */ + clocktick = cr16_clock_freq / HZ; /* register as sched_clock source */ - sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz); -} + sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_clock_freq); -static int __init init_cr16_clocksource(void) -{ - /* - * The cr16 interval timers are not synchronized across CPUs. - */ - if (num_online_cpus() > 1 && !running_on_qemu) { - clocksource_cr16.name = "cr16_unstable"; - clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE; - clocksource_cr16.rating = 0; - } + parisc_clockevent_init(); /* register at clocksource framework */ - clocksource_register_hz(&clocksource_cr16, - 100 * PAGE0->mem_10msec); - - return 0; + clocksource_register_hz(&clocksource_cr16, cr16_clock_freq); } - -device_initcall(init_cr16_clocksource); diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 1107ca819ac8..b9b3d527bc90 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -36,7 +36,7 @@ #include <asm/io.h> #include <asm/irq.h> #include <asm/traps.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/atomic.h> #include <asm/smp.h> #include <asm/pdc.h> @@ -47,6 +47,8 @@ #include <linux/kgdb.h> #include <linux/kprobes.h> +#include "unaligned.h" + #if defined(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK) #include <asm/spinlock.h> #endif @@ -504,7 +506,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) if (((unsigned long)regs->iaoq[0] & 3) && ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) { /* Kill the user process later */ - regs->iaoq[0] = 0 | 3; + regs->iaoq[0] = 0 | PRIV_USER; regs->iaoq[1] = regs->iaoq[0] + 4; regs->iasq[0] = regs->iasq[1] = regs->sr[7]; regs->gr[0] &= ~PSW_B; diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index 71e596ca5a86..00e97204783e 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c @@ -12,9 +12,10 @@ #include <linux/ratelimit.h> #include <linux/uaccess.h> #include <linux/sysctl.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <asm/hardirq.h> #include <asm/traps.h> +#include "unaligned.h" /* #define DEBUG_UNALIGNED 1 */ @@ -24,7 +25,7 @@ #define DPRINTF(fmt, args...) #endif -#define RFMT "%#08lx" +#define RFMT "0x%08lx" /* 1111 1100 0000 0000 0001 0011 1100 0000 */ #define OPCODE1(a,b,c) ((a)<<26|(b)<<12|(c)<<6) @@ -104,6 +105,7 @@ #define ERR_NOTHANDLED -1 int unaligned_enabled __read_mostly = 1; +int no_unaligned_warning __read_mostly; static int emulate_ldh(struct pt_regs *regs, int toreg) { @@ -399,6 +401,7 @@ void handle_unaligned(struct pt_regs *regs) } else { static DEFINE_RATELIMIT_STATE(kernel_ratelimit, 5 * HZ, 5); if (!(current->thread.flags & PARISC_UAC_NOPRINT) && + !no_unaligned_warning && __ratelimit(&kernel_ratelimit)) pr_warn("Kernel: unaligned access to " RFMT " in %pS " "(iir " RFMT ")\n", diff --git a/arch/parisc/kernel/unaligned.h b/arch/parisc/kernel/unaligned.h new file mode 100644 index 000000000000..c1aa4b12e284 --- /dev/null +++ b/arch/parisc/kernel/unaligned.h @@ -0,0 +1,3 @@ +struct pt_regs; +void handle_unaligned(struct pt_regs *regs); +int check_unaligned(struct pt_regs *regs); diff --git a/arch/parisc/kernel/vdso32/Makefile b/arch/parisc/kernel/vdso32/Makefile index 4459a48d2303..4ee8d17da229 100644 --- a/arch/parisc/kernel/vdso32/Makefile +++ b/arch/parisc/kernel/vdso32/Makefile @@ -1,11 +1,25 @@ -# List of files in the vdso, has to be asm only for now +# Include the generic Makefile to check the built vdso. +include $(srctree)/lib/vdso/Makefile.include + +KCOV_INSTRUMENT := n + +# Disable gcov profiling, ubsan and kasan for VDSO code +GCOV_PROFILE := n +UBSAN_SANITIZE := n +KASAN_SANITIZE := n +KCSAN_SANITIZE := n obj-vdso32 = note.o sigtramp.o restart_syscall.o +obj-cvdso32 = vdso32_generic.o # Build rules -targets := $(obj-vdso32) vdso32.so +targets := $(obj-vdso32) $(obj-cvdso32) vdso32.so obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32)) +obj-cvdso32 := $(addprefix $(obj)/, $(obj-cvdso32)) + +VDSO_CFLAGS_REMOVE := -pg $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_vdso32_generic.o = $(VDSO_CFLAGS_REMOVE) ccflags-y := -shared -fno-common -fbuiltin -mno-fast-indirect-calls -O2 -mno-long-calls # -march=1.1 -mschedule=7100LC @@ -19,20 +33,19 @@ KBUILD_CFLAGS += -DBUILD_VDSO -DDISABLE_BRANCH_PROFILING VDSO_LIBGCC := $(shell $(CROSS32CC) -print-libgcc-file-name) obj-y += vdso32_wrapper.o -extra-y += vdso32.lds +targets += vdso32.lds CPPFLAGS_vdso32.lds += -P -C # -U$(ARCH) $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so FORCE # Force dependency (incbin is bad) # link rule for the .so file, .lds has to be first -$(obj)/vdso32.so: $(src)/vdso32.lds $(obj-vdso32) $(obj-cvdso32) $(VDSO_LIBGCC) FORCE +$(obj)/vdso32.so: $(obj)/vdso32.lds $(obj-vdso32) $(obj-cvdso32) $(VDSO_LIBGCC) FORCE $(call if_changed,vdso32ld) # assembly rules for the .S files $(obj-vdso32): %.o: %.S FORCE $(call if_changed_dep,vdso32as) - $(obj-cvdso32): %.o: %.c FORCE $(call if_changed_dep,vdso32cc) @@ -42,10 +55,10 @@ quiet_cmd_vdso32ld = VDSO32L $@ quiet_cmd_vdso32as = VDSO32A $@ cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $< quiet_cmd_vdso32cc = VDSO32C $@ - cmd_vdso32cc = $(CROSS32CC) $(c_flags) -c -fPIC -mno-fast-indirect-calls -o $@ $< + cmd_vdso32cc = $(CROSS32CC) $(c_flags) -c -o $@ $< # Generate VDSO offsets using helper script -gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh +gen-vdsosym := $(src)/gen_vdso_offsets.sh quiet_cmd_vdsosym = VDSOSYM $@ cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ diff --git a/arch/parisc/kernel/vdso32/vdso32.lds.S b/arch/parisc/kernel/vdso32/vdso32.lds.S index d4aff3af5262..4273baa26b65 100644 --- a/arch/parisc/kernel/vdso32/vdso32.lds.S +++ b/arch/parisc/kernel/vdso32/vdso32.lds.S @@ -106,6 +106,9 @@ VERSION global: __kernel_sigtramp_rt32; __kernel_restart_syscall32; + __vdso_gettimeofday; + __vdso_clock_gettime; + __vdso_clock_gettime64; local: *; }; } diff --git a/arch/parisc/kernel/vdso32/vdso32_generic.c b/arch/parisc/kernel/vdso32/vdso32_generic.c new file mode 100644 index 000000000000..8d5bd59e8646 --- /dev/null +++ b/arch/parisc/kernel/vdso32/vdso32_generic.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "asm/unistd.h" +#include <linux/types.h> +#include <uapi/asm/unistd_32.h> + +struct timezone; +struct old_timespec32; +struct __kernel_timespec; +struct __kernel_old_timeval; + +/* forward declarations */ +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz); +int __vdso_clock_gettime(clockid_t clock, struct old_timespec32 *ts); +int __vdso_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts); + + +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, + struct timezone *tz) +{ + return syscall2(__NR_gettimeofday, (long)tv, (long)tz); +} + +int __vdso_clock_gettime(clockid_t clock, struct old_timespec32 *ts) +{ + return syscall2(__NR_clock_gettime, (long)clock, (long)ts); +} + +int __vdso_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts) +{ + return syscall2(__NR_clock_gettime64, (long)clock, (long)ts); +} diff --git a/arch/parisc/kernel/vdso64/Makefile b/arch/parisc/kernel/vdso64/Makefile index f3d6045793f4..c63f4069170f 100644 --- a/arch/parisc/kernel/vdso64/Makefile +++ b/arch/parisc/kernel/vdso64/Makefile @@ -1,12 +1,25 @@ -# List of files in the vdso, has to be asm only for now +# Include the generic Makefile to check the built vdso. +include $(srctree)/lib/vdso/Makefile.include + +KCOV_INSTRUMENT := n + +# Disable gcov profiling, ubsan and kasan for VDSO code +GCOV_PROFILE := n +UBSAN_SANITIZE := n +KASAN_SANITIZE := n +KCSAN_SANITIZE := n obj-vdso64 = note.o sigtramp.o restart_syscall.o +obj-cvdso64 = vdso64_generic.o # Build rules -targets := $(obj-vdso64) vdso64.so -obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64)) +targets := $(obj-vdso64) $(obj-cvdso64) vdso64.so +obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64)) +obj-cvdso64 := $(addprefix $(obj)/, $(obj-cvdso64)) +VDSO_CFLAGS_REMOVE := -pg $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_vdso64_generic.o = $(VDSO_CFLAGS_REMOVE) ccflags-y := -shared -fno-common -fno-builtin ccflags-y += -nostdlib -Wl,-soname=linux-vdso64.so.1 \ @@ -19,28 +32,32 @@ KBUILD_CFLAGS += -DBUILD_VDSO -DDISABLE_BRANCH_PROFILING VDSO_LIBGCC := $(shell $(CC) -print-libgcc-file-name) obj-y += vdso64_wrapper.o -extra-y += vdso64.lds +targets += vdso64.lds CPPFLAGS_vdso64.lds += -P -C -U$(ARCH) $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so FORCE # Force dependency (incbin is bad) # link rule for the .so file, .lds has to be first -$(obj)/vdso64.so: $(src)/vdso64.lds $(obj-vdso64) $(VDSO_LIBGCC) FORCE +$(obj)/vdso64.so: $(obj)/vdso64.lds $(obj-vdso64) $(obj-cvdso64) $(VDSO_LIBGCC) FORCE $(call if_changed,vdso64ld) # assembly rules for the .S files $(obj-vdso64): %.o: %.S FORCE $(call if_changed_dep,vdso64as) +$(obj-cvdso64): %.o: %.c FORCE + $(call if_changed_dep,vdso64cc) # actual build commands quiet_cmd_vdso64ld = VDSO64L $@ cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter-out FORCE, $^) -o $@ quiet_cmd_vdso64as = VDSO64A $@ cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< +quiet_cmd_vdso64cc = VDSO64C $@ + cmd_vdso64cc = $(CC) $(c_flags) -c -o $@ $< # Generate VDSO offsets using helper script -gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh +gen-vdsosym := $(src)/gen_vdso_offsets.sh quiet_cmd_vdsosym = VDSOSYM $@ cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ diff --git a/arch/parisc/kernel/vdso64/vdso64.lds.S b/arch/parisc/kernel/vdso64/vdso64.lds.S index de1fb4b19286..10f25e4e1554 100644 --- a/arch/parisc/kernel/vdso64/vdso64.lds.S +++ b/arch/parisc/kernel/vdso64/vdso64.lds.S @@ -104,6 +104,8 @@ VERSION global: __kernel_sigtramp_rt64; __kernel_restart_syscall64; + __vdso_gettimeofday; + __vdso_clock_gettime; local: *; }; } diff --git a/arch/parisc/kernel/vdso64/vdso64_generic.c b/arch/parisc/kernel/vdso64/vdso64_generic.c new file mode 100644 index 000000000000..fc6836a0075b --- /dev/null +++ b/arch/parisc/kernel/vdso64/vdso64_generic.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "asm/unistd.h" +#include <linux/types.h> + +struct timezone; +struct __kernel_timespec; +struct __kernel_old_timeval; + +/* forward declarations */ +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz); +int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts); + + +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, + struct timezone *tz) +{ + return syscall2(__NR_gettimeofday, (long)tv, (long)tz); +} + +int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts) +{ + return syscall2(__NR_clock_gettime, (long)clock, (long)ts); +} diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c index 36a314199074..9df810050642 100644 --- a/arch/parisc/lib/bitops.c +++ b/arch/parisc/lib/bitops.c @@ -56,38 +56,20 @@ unsigned long notrace __xchg8(char x, volatile char *ptr) } -u64 notrace __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new) -{ - unsigned long flags; - u64 prev; - - _atomic_spin_lock_irqsave(ptr, flags); - if ((prev = *ptr) == old) - *ptr = new; - _atomic_spin_unlock_irqrestore(ptr, flags); - return prev; -} - -unsigned long notrace __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new) -{ - unsigned long flags; - unsigned int prev; - - _atomic_spin_lock_irqsave(ptr, flags); - if ((prev = *ptr) == old) - *ptr = new; - _atomic_spin_unlock_irqrestore(ptr, flags); - return (unsigned long)prev; -} - -u8 notrace __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new) -{ - unsigned long flags; - u8 prev; - - _atomic_spin_lock_irqsave(ptr, flags); - if ((prev = *ptr) == old) - *ptr = new; - _atomic_spin_unlock_irqrestore(ptr, flags); - return prev; -} +#define CMPXCHG(T) \ + T notrace __cmpxchg_##T(volatile T *ptr, T old, T new) \ + { \ + unsigned long flags; \ + T prev; \ + \ + _atomic_spin_lock_irqsave(ptr, flags); \ + if ((prev = *ptr) == old) \ + *ptr = new; \ + _atomic_spin_unlock_irqrestore(ptr, flags); \ + return prev; \ + } + +CMPXCHG(u64) +CMPXCHG(u32) +CMPXCHG(u16) +CMPXCHG(u8) diff --git a/arch/parisc/lib/checksum.c b/arch/parisc/lib/checksum.c index 4818f3db84a5..59d8c15d81bd 100644 --- a/arch/parisc/lib/checksum.c +++ b/arch/parisc/lib/checksum.c @@ -25,15 +25,6 @@ : "=r"(_t) \ : "r"(_r), "0"(_t)); -static inline unsigned short from32to16(unsigned int x) -{ - /* 32 bits --> 16 bits + carry */ - x = (x & 0xffff) + (x >> 16); - /* 16 bits + carry --> 16 bits including carry */ - x = (x & 0xffff) + (x >> 16); - return (unsigned short)x; -} - static inline unsigned int do_csum(const unsigned char * buff, int len) { int odd, count; @@ -85,7 +76,7 @@ static inline unsigned int do_csum(const unsigned char * buff, int len) } if (len & 1) result += le16_to_cpu(*buff); - result = from32to16(result); + result = csum_from32to16(result); if (odd) result = swab16(result); out: @@ -102,7 +93,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum) { unsigned int result = do_csum(buff, len); addc(result, sum); - return (__force __wsum)from32to16(result); + return (__force __wsum)csum_from32to16(result); } EXPORT_SYMBOL(csum_partial); diff --git a/arch/parisc/lib/io.c b/arch/parisc/lib/io.c index 7c00496b47d4..3c7e617f5a93 100644 --- a/arch/parisc/lib/io.c +++ b/arch/parisc/lib/io.c @@ -12,114 +12,6 @@ #include <linux/module.h> #include <asm/io.h> -/* Copies a block of memory to a device in an efficient manner. - * Assumes the device can cope with 32-bit transfers. If it can't, - * don't use this function. - */ -void memcpy_toio(volatile void __iomem *dst, const void *src, int count) -{ - if (((unsigned long)dst & 3) != ((unsigned long)src & 3)) - goto bytecopy; - while ((unsigned long)dst & 3) { - writeb(*(char *)src, dst++); - src++; - count--; - } - while (count > 3) { - __raw_writel(*(u32 *)src, dst); - src += 4; - dst += 4; - count -= 4; - } - bytecopy: - while (count--) { - writeb(*(char *)src, dst++); - src++; - } -} - -/* -** Copies a block of memory from a device in an efficient manner. -** Assumes the device can cope with 32-bit transfers. If it can't, -** don't use this function. -** -** CR16 counts on C3000 reading 256 bytes from Symbios 896 RAM: -** 27341/64 = 427 cyc per int -** 61311/128 = 478 cyc per short -** 122637/256 = 479 cyc per byte -** Ergo bus latencies dominant (not transfer size). -** Minimize total number of transfers at cost of CPU cycles. -** TODO: only look at src alignment and adjust the stores to dest. -*/ -void memcpy_fromio(void *dst, const volatile void __iomem *src, int count) -{ - /* first compare alignment of src/dst */ - if ( (((unsigned long)dst ^ (unsigned long)src) & 1) || (count < 2) ) - goto bytecopy; - - if ( (((unsigned long)dst ^ (unsigned long)src) & 2) || (count < 4) ) - goto shortcopy; - - /* Then check for misaligned start address */ - if ((unsigned long)src & 1) { - *(u8 *)dst = readb(src); - src++; - dst++; - count--; - if (count < 2) goto bytecopy; - } - - if ((unsigned long)src & 2) { - *(u16 *)dst = __raw_readw(src); - src += 2; - dst += 2; - count -= 2; - } - - while (count > 3) { - *(u32 *)dst = __raw_readl(src); - dst += 4; - src += 4; - count -= 4; - } - - shortcopy: - while (count > 1) { - *(u16 *)dst = __raw_readw(src); - src += 2; - dst += 2; - count -= 2; - } - - bytecopy: - while (count--) { - *(char *)dst = readb(src); - src++; - dst++; - } -} - -/* Sets a block of memory on a device to a given value. - * Assumes the device can cope with 32-bit transfers. If it can't, - * don't use this function. - */ -void memset_io(volatile void __iomem *addr, unsigned char val, int count) -{ - u32 val32 = (val << 24) | (val << 16) | (val << 8) | val; - while ((unsigned long)addr & 3) { - writeb(val, addr++); - count--; - } - while (count > 3) { - __raw_writel(val32, addr); - addr += 4; - count -= 4; - } - while (count--) { - writeb(val, addr++); - } -} - /* * Read COUNT 8-bit bytes from port PORT into memory starting at * SRC. @@ -170,15 +62,15 @@ void insw (unsigned long port, void *dst, unsigned long count) unsigned char *p; p = (unsigned char *)dst; - + if (!count) return; - + switch (((unsigned long)p) & 0x3) { case 0x00: /* Buffer 32-bit aligned */ while (count>=2) { - + count -= 2; l = cpu_to_le16(inw(port)) << 16; l |= cpu_to_le16(inw(port)); @@ -189,13 +81,13 @@ void insw (unsigned long port, void *dst, unsigned long count) *(unsigned short *)p = cpu_to_le16(inw(port)); } break; - + case 0x02: /* Buffer 16-bit aligned */ *(unsigned short *)p = cpu_to_le16(inw(port)); p += 2; count--; while (count>=2) { - + count -= 2; l = cpu_to_le16(inw(port)) << 16; l |= cpu_to_le16(inw(port)); @@ -206,13 +98,13 @@ void insw (unsigned long port, void *dst, unsigned long count) *(unsigned short *)p = cpu_to_le16(inw(port)); } break; - + case 0x01: /* Buffer 8-bit aligned */ case 0x03: /* I don't bother with 32bit transfers * in this case, 16bit will have to do -- DE */ --count; - + l = cpu_to_le16(inw(port)); *p = l >> 8; p++; @@ -242,10 +134,10 @@ void insl (unsigned long port, void *dst, unsigned long count) unsigned char *p; p = (unsigned char *)dst; - + if (!count) return; - + switch (((unsigned long) dst) & 0x3) { case 0x00: /* Buffer 32-bit aligned */ @@ -255,14 +147,14 @@ void insl (unsigned long port, void *dst, unsigned long count) p += 4; } break; - + case 0x02: /* Buffer 16-bit aligned */ --count; - + l = cpu_to_le32(inl(port)); *(unsigned short *)p = l >> 16; p += 2; - + while (count--) { l2 = cpu_to_le32(inl(port)); @@ -274,7 +166,7 @@ void insl (unsigned long port, void *dst, unsigned long count) break; case 0x01: /* Buffer 8-bit aligned */ --count; - + l = cpu_to_le32(inl(port)); *(unsigned char *)p = l >> 24; p++; @@ -291,7 +183,7 @@ void insl (unsigned long port, void *dst, unsigned long count) break; case 0x03: /* Buffer 8-bit aligned */ --count; - + l = cpu_to_le32(inl(port)); *p = l >> 24; p++; @@ -340,10 +232,10 @@ void outsw (unsigned long port, const void *src, unsigned long count) const unsigned char *p; p = (const unsigned char *)src; - + if (!count) return; - + switch (((unsigned long)p) & 0x3) { case 0x00: /* Buffer 32-bit aligned */ @@ -358,13 +250,13 @@ void outsw (unsigned long port, const void *src, unsigned long count) outw(le16_to_cpu(*(unsigned short*)p), port); } break; - + case 0x02: /* Buffer 16-bit aligned */ - + outw(le16_to_cpu(*(unsigned short*)p), port); p += 2; count--; - + while (count>=2) { count -= 2; l = *(unsigned int *)p; @@ -376,11 +268,11 @@ void outsw (unsigned long port, const void *src, unsigned long count) outw(le16_to_cpu(*(unsigned short *)p), port); } break; - - case 0x01: /* Buffer 8-bit aligned */ + + case 0x01: /* Buffer 8-bit aligned */ /* I don't bother with 32bit transfers * in this case, 16bit will have to do -- DE */ - + l = *p << 8; p++; count--; @@ -395,7 +287,7 @@ void outsw (unsigned long port, const void *src, unsigned long count) l2 = *(unsigned char *)p; outw (le16_to_cpu(l | l2>>8), port); break; - + } } @@ -412,10 +304,10 @@ void outsl (unsigned long port, const void *src, unsigned long count) const unsigned char *p; p = (const unsigned char *)src; - + if (!count) return; - + switch (((unsigned long)p) & 0x3) { case 0x00: /* Buffer 32-bit aligned */ @@ -425,13 +317,13 @@ void outsl (unsigned long port, const void *src, unsigned long count) p += 4; } break; - + case 0x02: /* Buffer 16-bit aligned */ --count; - + l = *(unsigned short *)p; p += 2; - + while (count--) { l2 = *(unsigned int *)p; @@ -462,7 +354,7 @@ void outsl (unsigned long port, const void *src, unsigned long count) break; case 0x03: /* Buffer 8-bit aligned */ --count; - + l = *p << 24; p++; diff --git a/arch/parisc/math-emu/driver.c b/arch/parisc/math-emu/driver.c index 6ce427b58836..71829cb7bc81 100644 --- a/arch/parisc/math-emu/driver.c +++ b/arch/parisc/math-emu/driver.c @@ -26,12 +26,6 @@ #define FPUDEBUG 0 -/* Format of the floating-point exception registers. */ -struct exc_reg { - unsigned int exception : 6; - unsigned int ei : 26; -}; - /* Macros for grabbing bits of the instruction format from the 'ei' field above. */ /* Major opcode 0c and 0e */ @@ -103,9 +97,19 @@ handle_fpe(struct pt_regs *regs) memcpy(regs->fr, frcopy, sizeof regs->fr); if (signalcode != 0) { - force_sig_fault(signalcode >> 24, signalcode & 0xffffff, - (void __user *) regs->iaoq[0]); - return -1; + int sig = signalcode >> 24; + + if (sig == SIGFPE) { + /* + * Clear floating point trap bit to avoid trapping + * again on the first floating-point instruction in + * the userspace signal handler. + */ + regs->fr[0] &= ~(1ULL << 38); + } + force_sig_fault(sig, signalcode & 0xffffff, + (void __user *) regs->iaoq[0]); + return -1; } return signalcode ? -1 : 0; diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c index a9f7e21f6656..a94fe546d434 100644 --- a/arch/parisc/mm/hugetlbpage.c +++ b/arch/parisc/mm/hugetlbpage.c @@ -21,27 +21,6 @@ #include <asm/mmu_context.h> -unsigned long -hugetlb_get_unmapped_area(struct file *file, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags) -{ - struct hstate *h = hstate_file(file); - - if (len & ~huge_page_mask(h)) - return -EINVAL; - if (len > TASK_SIZE) - return -ENOMEM; - - if (flags & MAP_FIXED) - if (prepare_hugepage_range(file, addr, len)) - return -EINVAL; - - if (addr) - addr = ALIGN(addr, huge_page_size(h)); - - /* we need to make sure the colouring is OK */ - return arch_get_unmapped_area(file, addr, len, pgoff, flags); -} pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, @@ -147,7 +126,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, - pte_t *ptep) + pte_t *ptep, unsigned long sz) { pte_t entry; @@ -180,14 +159,3 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, } return changed; } - - -int pmd_huge(pmd_t pmd) -{ - return 0; -} - -int pud_huge(pud_t pud) -{ - return 0; -} diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index f876af56e13f..14270715d754 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -24,6 +24,7 @@ #include <linux/nodemask.h> /* for node_online_map */ #include <linux/pagemap.h> /* for release_pages */ #include <linux/compat.h> +#include <linux/execmem.h> #include <asm/pgalloc.h> #include <asm/tlb.h> @@ -376,10 +377,8 @@ static void __ref map_pages(unsigned long start_vaddr, #if CONFIG_PGTABLE_LEVELS == 3 if (pud_none(*pud)) { - pmd = memblock_alloc(PAGE_SIZE << PMD_TABLE_ORDER, + pmd = memblock_alloc_or_panic(PAGE_SIZE << PMD_TABLE_ORDER, PAGE_SIZE << PMD_TABLE_ORDER); - if (!pmd) - panic("pmd allocation failed.\n"); pud_populate(NULL, pud, pmd); } #endif @@ -387,9 +386,7 @@ static void __ref map_pages(unsigned long start_vaddr, pmd = pmd_offset(pud, vaddr); for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) { if (pmd_none(*pmd)) { - pg_table = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!pg_table) - panic("page table allocation failed\n"); + pg_table = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); pmd_populate_kernel(NULL, pmd, pg_table); } @@ -458,7 +455,6 @@ void free_initmem(void) unsigned long kernel_end = (unsigned long)&_end; /* Remap kernel text and data, but do not touch init section yet. */ - kernel_set_to_readonly = true; map_pages(init_end, __pa(init_end), kernel_end - init_end, PAGE_KERNEL, 0); @@ -481,7 +477,7 @@ void free_initmem(void) /* finally dump all the instructions which were cached, since the * pages are no-longer executable */ flush_icache_range(init_begin, init_end); - + free_initmem_default(POISON_FREE_INITMEM); /* set up a new led state on systems shipped LED State panel */ @@ -492,11 +488,18 @@ void free_initmem(void) #ifdef CONFIG_STRICT_KERNEL_RWX void mark_rodata_ro(void) { - /* rodata memory was already mapped with KERNEL_RO access rights by - pagetable_init() and map_pages(). No need to do additional stuff here */ - unsigned long roai_size = __end_ro_after_init - __start_ro_after_init; + unsigned long start = (unsigned long) &__start_rodata; + unsigned long end = (unsigned long) &__end_rodata; - pr_info("Write protected read-only-after-init data: %luk\n", roai_size >> 10); + pr_info("Write protecting the kernel read-only data: %luk\n", + (end - start) >> 10); + + kernel_set_to_readonly = true; + map_pages(start, __pa(start), end - start, PAGE_KERNEL, 0); + + /* force the kernel to see the new page table entries */ + flush_cache_all(); + flush_tlb_all(); } #endif @@ -559,10 +562,6 @@ void __init mem_init(void) BUILD_BUG_ON(TMPALIAS_MAP_START >= 0x80000000); #endif - high_memory = __va((max_pfn << PAGE_SHIFT)); - set_max_mapnr(max_low_pfn); - memblock_free_all(); - #ifdef CONFIG_PA11 if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) { pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START); @@ -641,9 +640,7 @@ static void __init pagetable_init(void) } #endif - empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!empty_zero_page) - panic("zero page allocation failed.\n"); + empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); } @@ -680,19 +677,15 @@ static void __init fixmap_init(void) #if CONFIG_PGTABLE_LEVELS == 3 if (pud_none(*pud)) { - pmd = memblock_alloc(PAGE_SIZE << PMD_TABLE_ORDER, + pmd = memblock_alloc_or_panic(PAGE_SIZE << PMD_TABLE_ORDER, PAGE_SIZE << PMD_TABLE_ORDER); - if (!pmd) - panic("fixmap: pmd allocation failed.\n"); pud_populate(NULL, pud, pmd); } #endif pmd = pmd_offset(pud, addr); do { - pte_t *pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!pte) - panic("fixmap: pte allocation failed.\n"); + pte_t *pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); pmd_populate_kernel(&init_mm, pmd, pte); @@ -992,3 +985,23 @@ static const pgprot_t protection_map[16] = { [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX }; DECLARE_VM_GET_PAGE_PROT + +#ifdef CONFIG_EXECMEM +static struct execmem_info execmem_info __ro_after_init; + +struct execmem_info __init *execmem_arch_setup(void) +{ + execmem_info = (struct execmem_info){ + .ranges = { + [EXECMEM_DEFAULT] = { + .start = VMALLOC_START, + .end = VMALLOC_END, + .pgprot = PAGE_KERNEL_RWX, + .alignment = 1, + }, + }, + }; + + return &execmem_info; +} +#endif /* CONFIG_EXECMEM */ diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c index fd996472dfe7..0b65c4b3baee 100644 --- a/arch/parisc/mm/ioremap.c +++ b/arch/parisc/mm/ioremap.c @@ -14,7 +14,7 @@ #include <linux/mm.h> void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, - unsigned long prot) + pgprot_t prot) { #ifdef CONFIG_EISA unsigned long end = phys_addr + size - 1; @@ -41,6 +41,6 @@ void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, } } - return generic_ioremap_prot(phys_addr, size, __pgprot(prot)); + return generic_ioremap_prot(phys_addr, size, prot); } EXPORT_SYMBOL(ioremap_prot); diff --git a/arch/parisc/net/bpf_jit_core.c b/arch/parisc/net/bpf_jit_core.c index d6ee2fd45550..06cbcd6fe87b 100644 --- a/arch/parisc/net/bpf_jit_core.c +++ b/arch/parisc/net/bpf_jit_core.c @@ -114,7 +114,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) jit_data->header = bpf_jit_binary_alloc(prog_size + extable_size, &jit_data->image, - sizeof(u32), + sizeof(long), bpf_fill_ill_insns); if (!jit_data->header) { prog = orig_prog; @@ -167,7 +167,13 @@ skip_init_ctx: bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns); if (!prog->is_func || extra_pass) { - bpf_jit_binary_lock_ro(jit_data->header); + if (bpf_jit_binary_lock_ro(jit_data->header)) { + bpf_jit_binary_free(jit_data->header); + prog->bpf_func = NULL; + prog->jited = 0; + prog->jited_len = 0; + goto out_offset; + } prologue_len = ctx->epilogue_offset - ctx->body_len; for (i = 0; i < prog->len; i++) ctx->offset[i] += prologue_len; diff --git a/arch/parisc/video/Makefile b/arch/parisc/video/Makefile index 16a73cce4661..b5db5b42880f 100644 --- a/arch/parisc/video/Makefile +++ b/arch/parisc/video/Makefile @@ -1,3 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_STI_CORE) += fbdev.o +obj-$(CONFIG_STI_CORE) += video-sti.o diff --git a/arch/parisc/video/fbdev.c b/arch/parisc/video/video-sti.c index e4f8ac99fc9e..564661e87093 100644 --- a/arch/parisc/video/fbdev.c +++ b/arch/parisc/video/video-sti.c @@ -5,12 +5,13 @@ * Copyright (C) 2001-2002 Thomas Bogendoerfer <tsbogend@alpha.franken.de> */ -#include <linux/fb.h> #include <linux/module.h> #include <video/sticore.h> -int fb_is_primary_device(struct fb_info *info) +#include <asm/video.h> + +bool video_is_primary_device(struct device *dev) { struct sti_struct *sti; @@ -21,6 +22,6 @@ int fb_is_primary_device(struct fb_info *info) return true; /* return true if it's the default built-in framebuffer driver */ - return (sti->dev == info->device); + return (sti->dev == dev); } -EXPORT_SYMBOL(fb_is_primary_device); +EXPORT_SYMBOL(video_is_primary_device); |