diff options
Diffstat (limited to 'arch/hexagon/include/asm')
-rw-r--r-- | arch/hexagon/include/asm/Kbuild | 3 | ||||
-rw-r--r-- | arch/hexagon/include/asm/atomic.h | 69 | ||||
-rw-r--r-- | arch/hexagon/include/asm/bitops.h | 37 | ||||
-rw-r--r-- | arch/hexagon/include/asm/cacheflush.h | 10 | ||||
-rw-r--r-- | arch/hexagon/include/asm/cmpxchg.h | 12 | ||||
-rw-r--r-- | arch/hexagon/include/asm/io.h | 224 | ||||
-rw-r--r-- | arch/hexagon/include/asm/irq.h | 3 | ||||
-rw-r--r-- | arch/hexagon/include/asm/page.h | 31 | ||||
-rw-r--r-- | arch/hexagon/include/asm/pgalloc.h | 9 | ||||
-rw-r--r-- | arch/hexagon/include/asm/pgtable.h | 77 | ||||
-rw-r--r-- | arch/hexagon/include/asm/processor.h | 4 | ||||
-rw-r--r-- | arch/hexagon/include/asm/ptrace.h | 25 | ||||
-rw-r--r-- | arch/hexagon/include/asm/setup.h | 20 | ||||
-rw-r--r-- | arch/hexagon/include/asm/spinlock_types.h | 2 | ||||
-rw-r--r-- | arch/hexagon/include/asm/syscall.h | 21 | ||||
-rw-r--r-- | arch/hexagon/include/asm/syscalls.h | 6 | ||||
-rw-r--r-- | arch/hexagon/include/asm/unistd.h | 10 |
17 files changed, 205 insertions, 358 deletions
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index 3ece3c93fe08..1efa1e993d4b 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild @@ -1,5 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 +syscall-y += syscall_table_32.h + generic-y += extable.h generic-y += iomap.h generic-y += kvm_para.h generic-y += mcs_spinlock.h +generic-y += text-patching.h diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h index 6e94f8d04146..2447d083c432 100644 --- a/arch/hexagon/include/asm/atomic.h +++ b/arch/hexagon/include/asm/atomic.h @@ -28,58 +28,8 @@ static inline void arch_atomic_set(atomic_t *v, int new) #define arch_atomic_set_release(v, i) arch_atomic_set((v), (i)) -/** - * arch_atomic_read - reads a word, atomically - * @v: pointer to atomic value - * - * Assumes all word reads on our architecture are atomic. - */ #define arch_atomic_read(v) READ_ONCE((v)->counter) -/** - * arch_atomic_xchg - atomic - * @v: pointer to memory to change - * @new: new value (technically passed in a register -- see xchg) - */ -#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), (new))) - - -/** - * arch_atomic_cmpxchg - atomic compare-and-exchange values - * @v: pointer to value to change - * @old: desired old value to match - * @new: new value to put in - * - * Parameters are then pointer, value-in-register, value-in-register, - * and the output is the old value. - * - * Apparently this is complicated for archs that don't support - * the memw_locked like we do (or it's broken or whatever). - * - * Kind of the lynchpin of the rest of the generically defined routines. - * Remember V2 had that bug with dotnew predicate set by memw_locked. - * - * "old" is "expected" old val, __oldval is actual old value - */ -static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new) -{ - int __oldval; - - asm volatile( - "1: %0 = memw_locked(%1);\n" - " { P0 = cmp.eq(%0,%2);\n" - " if (!P0.new) jump:nt 2f; }\n" - " memw_locked(%1,P0) = %3;\n" - " if (!P0) jump 1b;\n" - "2:\n" - : "=&r" (__oldval) - : "r" (&v->counter), "r" (old), "r" (new) - : "memory", "p0" - ); - - return __oldval; -} - #define ATOMIC_OP(op) \ static inline void arch_atomic_##op(int i, atomic_t *v) \ { \ @@ -135,6 +85,11 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) +#define arch_atomic_add_return arch_atomic_add_return +#define arch_atomic_sub_return arch_atomic_sub_return +#define arch_atomic_fetch_add arch_atomic_fetch_add +#define arch_atomic_fetch_sub arch_atomic_fetch_sub + #undef ATOMIC_OPS #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) @@ -142,21 +97,15 @@ ATOMIC_OPS(and) ATOMIC_OPS(or) ATOMIC_OPS(xor) +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_xor arch_atomic_fetch_xor + #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -/** - * arch_atomic_fetch_add_unless - add unless the number is a given value - * @v: pointer to value - * @a: amount to add - * @u: unless value is equal to u - * - * Returns old value. - * - */ - static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) { int __oldval; diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h index 75d6ba3643b8..160d8f37fa1a 100644 --- a/arch/hexagon/include/asm/bitops.h +++ b/arch/hexagon/include/asm/bitops.h @@ -127,38 +127,45 @@ static inline void change_bit(int nr, volatile void *addr) * be atomic, particularly for things like slab_lock and slab_unlock. * */ -static inline void __clear_bit(int nr, volatile unsigned long *addr) +static __always_inline void +arch___clear_bit(unsigned long nr, volatile unsigned long *addr) { test_and_clear_bit(nr, addr); } -static inline void __set_bit(int nr, volatile unsigned long *addr) +static __always_inline void +arch___set_bit(unsigned long nr, volatile unsigned long *addr) { test_and_set_bit(nr, addr); } -static inline void __change_bit(int nr, volatile unsigned long *addr) +static __always_inline void +arch___change_bit(unsigned long nr, volatile unsigned long *addr) { test_and_change_bit(nr, addr); } /* Apparently, at least some of these are allowed to be non-atomic */ -static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) +static __always_inline bool +arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { return test_and_clear_bit(nr, addr); } -static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) +static __always_inline bool +arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { return test_and_set_bit(nr, addr); } -static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) +static __always_inline bool +arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { return test_and_change_bit(nr, addr); } -static inline int __test_bit(int nr, const volatile unsigned long *addr) +static __always_inline bool +arch_test_bit(unsigned long nr, const volatile unsigned long *addr) { int retval; @@ -172,7 +179,20 @@ static inline int __test_bit(int nr, const volatile unsigned long *addr) return retval; } -#define test_bit(nr, addr) __test_bit(nr, addr) +static __always_inline bool +arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + int retval; + + asm volatile( + "{P0 = tstbit(%1,%2); if (P0.new) %0 = #1; if (!P0.new) %0 = #0;}\n" + : "=&r" (retval) + : "r" (addr[BIT_WORD(nr)]), "r" (nr % BITS_PER_LONG) + : "p0", "memory" + ); + + return retval; +} /* * ffz - find first zero in word. @@ -271,6 +291,7 @@ static inline unsigned long __fls(unsigned long word) } #include <asm-generic/bitops/lock.h> +#include <asm-generic/bitops/non-instrumented-non-atomic.h> #include <asm-generic/bitops/fls64.h> #include <asm-generic/bitops/sched.h> diff --git a/arch/hexagon/include/asm/cacheflush.h b/arch/hexagon/include/asm/cacheflush.h index 6eff0730e6ef..bfff514a81c8 100644 --- a/arch/hexagon/include/asm/cacheflush.h +++ b/arch/hexagon/include/asm/cacheflush.h @@ -18,7 +18,7 @@ * - flush_cache_range(vma, start, end) flushes a range of pages * - flush_icache_range(start, end) flush a range of instructions * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache - * - flush_icache_page(vma, pg) flushes(invalidates) a page for icache + * - flush_icache_pages(vma, pg, nr) flushes(invalidates) nr pages for icache * * Need to doublecheck which one is really needed for ptrace stuff to work. */ @@ -58,12 +58,16 @@ extern void flush_cache_all_hexagon(void); * clean the cache when the PTE is set. * */ -static inline void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t *ptep) +static inline void update_mmu_cache_range(struct vm_fault *vmf, + struct vm_area_struct *vma, unsigned long address, + pte_t *ptep, unsigned int nr) { /* generic_ptrace_pokedata doesn't wind up here, does it? */ } +#define update_mmu_cache(vma, addr, ptep) \ + update_mmu_cache_range(NULL, vma, addr, ptep, 1) + void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, void *src, int len); #define copy_to_user_page copy_to_user_page diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h index cdb705e1496a..9c58fb81f7fd 100644 --- a/arch/hexagon/include/asm/cmpxchg.h +++ b/arch/hexagon/include/asm/cmpxchg.h @@ -9,7 +9,7 @@ #define _ASM_CMPXCHG_H /* - * __xchg - atomically exchange a register and a memory location + * __arch_xchg - atomically exchange a register and a memory location * @x: value to swap * @ptr: pointer to memory * @size: size of the value @@ -19,8 +19,8 @@ * Note: there was an errata for V2 about .new's and memw_locked. * */ -static inline unsigned long __xchg(unsigned long x, volatile void *ptr, - int size) +static inline unsigned long +__arch_xchg(unsigned long x, volatile void *ptr, int size) { unsigned long retval; @@ -42,8 +42,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, * Atomically swap the contents of a register with memory. Should be atomic * between multiple CPU's and within interrupts on the same CPU. */ -#define arch_xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \ - sizeof(*(ptr)))) +#define arch_xchg(ptr, v) ((__typeof__(*(ptr)))__arch_xchg((unsigned long)(v), (ptr), \ + sizeof(*(ptr)))) /* * see rt-mutex-design.txt; cmpxchg supposedly checks if *ptr == A and swaps. @@ -56,7 +56,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, __typeof__(ptr) __ptr = (ptr); \ __typeof__(*(ptr)) __old = (old); \ __typeof__(*(ptr)) __new = (new); \ - __typeof__(*(ptr)) __oldval = 0; \ + __typeof__(*(ptr)) __oldval = (__typeof__(*(ptr))) 0; \ \ asm volatile( \ "1: %0 = memw_locked(%1);\n" \ diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h index c33241425a5c..83b2eb5de60c 100644 --- a/arch/hexagon/include/asm/io.h +++ b/arch/hexagon/include/asm/io.h @@ -8,40 +8,13 @@ #ifndef _ASM_IO_H #define _ASM_IO_H -#ifdef __KERNEL__ - #include <linux/types.h> -#include <asm/iomap.h> #include <asm/page.h> #include <asm/cacheflush.h> -/* - * We don't have PCI yet. - * _IO_BASE is pointing at what should be unused virtual space. - */ -#define IO_SPACE_LIMIT 0xffff -#define _IO_BASE ((void __iomem *)0xfe000000) - -#define IOMEM(x) ((void __force __iomem *)(x)) - extern int remap_area_pages(unsigned long start, unsigned long phys_addr, unsigned long end, unsigned long flags); -extern void iounmap(const volatile void __iomem *addr); - -/* Defined in lib/io.c, needed for smc91x driver. */ -extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen); -extern void __raw_writesw(void __iomem *addr, const void *data, int wordlen); - -extern void __raw_readsl(const void __iomem *addr, void *data, int wordlen); -extern void __raw_writesl(void __iomem *addr, const void *data, int wordlen); - -#define readsw(p, d, l) __raw_readsw(p, d, l) -#define writesw(p, d, l) __raw_writesw(p, d, l) - -#define readsl(p, d, l) __raw_readsl(p, d, l) -#define writesl(p, d, l) __raw_writesl(p, d, l) - /* * virt_to_phys - map virtual address to physical * @address: address to map @@ -61,26 +34,11 @@ static inline void *phys_to_virt(unsigned long address) } /* - * convert a physical pointer to a virtual kernel pointer for - * /dev/mem access. - */ -#define xlate_dev_mem_ptr(p) __va(p) - -/* - * IO port access primitives. Hexagon doesn't have special IO access - * instructions; all I/O is memory mapped. - * - * in/out are used for "ports", but we don't have "port instructions", - * so these are really just memory mapped too. - */ - -/* * readb - read byte from memory mapped device * @addr: pointer to memory * - * Operates on "I/O bus memory space" */ -static inline u8 readb(const volatile void __iomem *addr) +static inline u8 __raw_readb(const volatile void __iomem *addr) { u8 val; asm volatile( @@ -90,8 +48,9 @@ static inline u8 readb(const volatile void __iomem *addr) ); return val; } +#define __raw_readb __raw_readb -static inline u16 readw(const volatile void __iomem *addr) +static inline u16 __raw_readw(const volatile void __iomem *addr) { u16 val; asm volatile( @@ -101,8 +60,9 @@ static inline u16 readw(const volatile void __iomem *addr) ); return val; } +#define __raw_readw __raw_readw -static inline u32 readl(const volatile void __iomem *addr) +static inline u32 __raw_readl(const volatile void __iomem *addr) { u32 val; asm volatile( @@ -112,6 +72,7 @@ static inline u32 readl(const volatile void __iomem *addr) ); return val; } +#define __raw_readl __raw_readl /* * writeb - write a byte to a memory location @@ -119,7 +80,7 @@ static inline u32 readl(const volatile void __iomem *addr) * @addr: pointer to memory * */ -static inline void writeb(u8 data, volatile void __iomem *addr) +static inline void __raw_writeb(u8 data, volatile void __iomem *addr) { asm volatile( "memb(%0) = %1;" @@ -128,8 +89,9 @@ static inline void writeb(u8 data, volatile void __iomem *addr) : "memory" ); } +#define __raw_writeb __raw_writeb -static inline void writew(u16 data, volatile void __iomem *addr) +static inline void __raw_writew(u16 data, volatile void __iomem *addr) { asm volatile( "memh(%0) = %1;" @@ -139,8 +101,9 @@ static inline void writew(u16 data, volatile void __iomem *addr) ); } +#define __raw_writew __raw_writew -static inline void writel(u32 data, volatile void __iomem *addr) +static inline void __raw_writel(u32 data, volatile void __iomem *addr) { asm volatile( "memw(%0) = %1;" @@ -149,165 +112,22 @@ static inline void writel(u32 data, volatile void __iomem *addr) : "memory" ); } - -#define __raw_writeb writeb -#define __raw_writew writew -#define __raw_writel writel - -#define __raw_readb readb -#define __raw_readw readw -#define __raw_readl readl - -/* - * http://comments.gmane.org/gmane.linux.ports.arm.kernel/117626 - */ - -#define readb_relaxed __raw_readb -#define readw_relaxed __raw_readw -#define readl_relaxed __raw_readl - -#define writeb_relaxed __raw_writeb -#define writew_relaxed __raw_writew -#define writel_relaxed __raw_writel - -void __iomem *ioremap(unsigned long phys_addr, unsigned long size); -#define ioremap_uc(X, Y) ioremap((X), (Y)) - - -#define __raw_writel writel - -static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, - int count) -{ - memcpy(dst, (void *) src, count); -} - -static inline void memcpy_toio(volatile void __iomem *dst, const void *src, - int count) -{ - memcpy((void *) dst, src, count); -} - -static inline void memset_io(volatile void __iomem *addr, int value, - size_t size) -{ - memset((void __force *)addr, value, size); -} - -#define PCI_IO_ADDR (volatile void __iomem *) +#define __raw_writel __raw_writel /* - * inb - read byte from I/O port or something - * @port: address in I/O space - * - * Operates on "I/O bus I/O space" + * I/O memory mapping functions. */ -static inline u8 inb(unsigned long port) -{ - return readb(_IO_BASE + (port & IO_SPACE_LIMIT)); -} - -static inline u16 inw(unsigned long port) -{ - return readw(_IO_BASE + (port & IO_SPACE_LIMIT)); -} - -static inline u32 inl(unsigned long port) -{ - return readl(_IO_BASE + (port & IO_SPACE_LIMIT)); -} +#define _PAGE_IOREMAP (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ + (__HEXAGON_C_DEV << 6)) /* - * outb - write a byte to a memory location - * @data: data to write to - * @addr: address in I/O space + * These defines are necessary to use the generic io.h for filling in + * the missing parts of the API contract. This is because the platform + * uses (inline) functions rather than defines and the generic helper + * fills in the undefined. */ -static inline void outb(u8 data, unsigned long port) -{ - writeb(data, _IO_BASE + (port & IO_SPACE_LIMIT)); -} - -static inline void outw(u16 data, unsigned long port) -{ - writew(data, _IO_BASE + (port & IO_SPACE_LIMIT)); -} - -static inline void outl(u32 data, unsigned long port) -{ - writel(data, _IO_BASE + (port & IO_SPACE_LIMIT)); -} - -#define outb_p outb -#define outw_p outw -#define outl_p outl - -#define inb_p inb -#define inw_p inw -#define inl_p inl - -static inline void insb(unsigned long port, void *buffer, int count) -{ - if (count) { - u8 *buf = buffer; - do { - u8 x = inb(port); - *buf++ = x; - } while (--count); - } -} - -static inline void insw(unsigned long port, void *buffer, int count) -{ - if (count) { - u16 *buf = buffer; - do { - u16 x = inw(port); - *buf++ = x; - } while (--count); - } -} - -static inline void insl(unsigned long port, void *buffer, int count) -{ - if (count) { - u32 *buf = buffer; - do { - u32 x = inw(port); - *buf++ = x; - } while (--count); - } -} - -static inline void outsb(unsigned long port, const void *buffer, int count) -{ - if (count) { - const u8 *buf = buffer; - do { - outb(*buf++, port); - } while (--count); - } -} - -static inline void outsw(unsigned long port, const void *buffer, int count) -{ - if (count) { - const u16 *buf = buffer; - do { - outw(*buf++, port); - } while (--count); - } -} - -static inline void outsl(unsigned long port, const void *buffer, int count) -{ - if (count) { - const u32 *buf = buffer; - do { - outl(*buf++, port); - } while (--count); - } -} - -#endif /* __KERNEL__ */ +#define virt_to_phys virt_to_phys +#define phys_to_virt phys_to_virt +#include <asm-generic/io.h> #endif diff --git a/arch/hexagon/include/asm/irq.h b/arch/hexagon/include/asm/irq.h index 1f7f1292f701..a60d26754caa 100644 --- a/arch/hexagon/include/asm/irq.h +++ b/arch/hexagon/include/asm/irq.h @@ -20,4 +20,7 @@ #include <asm-generic/irq.h> +struct pt_regs; +void arch_do_IRQ(struct pt_regs *); + #endif diff --git a/arch/hexagon/include/asm/page.h b/arch/hexagon/include/asm/page.h index 7cbf719c578e..137ba7c5de48 100644 --- a/arch/hexagon/include/asm/page.h +++ b/arch/hexagon/include/asm/page.h @@ -13,27 +13,22 @@ /* This is probably not the most graceful way to handle this. */ #ifdef CONFIG_PAGE_SIZE_4KB -#define PAGE_SHIFT 12 #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_4KB #endif #ifdef CONFIG_PAGE_SIZE_16KB -#define PAGE_SHIFT 14 #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_16KB #endif #ifdef CONFIG_PAGE_SIZE_64KB -#define PAGE_SHIFT 16 #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_64KB #endif #ifdef CONFIG_PAGE_SIZE_256KB -#define PAGE_SHIFT 18 #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_256KB #endif #ifdef CONFIG_PAGE_SIZE_1MB -#define PAGE_SHIFT 20 #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_1MB #endif @@ -50,8 +45,7 @@ #define HVM_HUGEPAGE_SIZE 0x5 #endif -#define PAGE_SIZE (1UL << PAGE_SHIFT) -#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) +#include <vdso/page.h> #ifdef __KERNEL__ #ifndef __ASSEMBLY__ @@ -78,6 +72,9 @@ typedef struct page *pgtable_t; #define __pgd(x) ((pgd_t) { (x) }) #define __pgprot(x) ((pgprot_t) { (x) }) +/* Needed for PAGE_OFFSET used in the macro right below */ +#include <asm/mem-layout.h> + /* * We need a __pa and a __va routine for kernel space. * MIPS says they're only used during mem_init. @@ -95,7 +92,6 @@ struct page; /* Default vm area behavior is non-executable. */ #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC -#define pfn_valid(pfn) ((pfn) < max_mapnr) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) /* Need to not use a define for linesize; may move this to another file. */ @@ -120,24 +116,13 @@ static inline void clear_page(void *page) #define clear_user_page(page, vaddr, pg) clear_page(page) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) -/* - * page_to_phys - convert page to physical address - * @page - pointer to page entry in mem_map - */ -#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) - -#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) -#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) +static inline unsigned long virt_to_pfn(const void *kaddr) +{ + return __pa(kaddr) >> PAGE_SHIFT; +} #define page_to_virt(page) __va(page_to_phys(page)) -/* - * For port to Hexagon Virtual Machine, MAYBE we check for attempts - * to reference reserved HVM space, but in any case, the VM will be - * protected. - */ -#define kern_addr_valid(addr) (1) - #include <asm/mem-layout.h> #include <asm-generic/memory_model.h> /* XXX Todo: implement assembly-optimized version of getorder. */ diff --git a/arch/hexagon/include/asm/pgalloc.h b/arch/hexagon/include/asm/pgalloc.h index f0c47e6a7427..937a11ef4c33 100644 --- a/arch/hexagon/include/asm/pgalloc.h +++ b/arch/hexagon/include/asm/pgalloc.h @@ -22,7 +22,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *pgd; - pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); + pgd = __pgd_alloc(mm, 0); /* * There may be better ways to do this, but to ensure @@ -87,10 +87,7 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, max_kernel_seg = pmdindex; } -#define __pte_free_tlb(tlb, pte, addr) \ -do { \ - pgtable_pte_page_dtor((pte)); \ - tlb_remove_page((tlb), (pte)); \ -} while (0) +#define __pte_free_tlb(tlb, pte, addr) \ + tlb_remove_ptdesc((tlb), page_ptdesc(pte)) #endif diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h index 0610724d6a28..fbf24d1d1ca6 100644 --- a/arch/hexagon/include/asm/pgtable.h +++ b/arch/hexagon/include/asm/pgtable.h @@ -61,6 +61,9 @@ extern unsigned long empty_zero_page; * So we'll put up with a bit of inefficiency for now... */ +/* We borrow bit 6 to store the exclusive marker in swap PTEs. */ +#define _PAGE_SWP_EXCLUSIVE (1<<6) + /* * Top "FOURTH" level (pgd), which for the Hexagon VM is really * only the second from the bottom, pgd and pud both being collapsed. @@ -126,33 +129,6 @@ extern unsigned long _dflt_cache_att; */ #define CACHEDEF (CACHE_DEFAULT << 6) -/* Private (copy-on-write) page protections. */ -#define __P000 __pgprot(_PAGE_PRESENT | _PAGE_USER | CACHEDEF) -#define __P001 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | CACHEDEF) -#define __P010 __P000 /* Write-only copy-on-write */ -#define __P011 __P001 /* Read/Write copy-on-write */ -#define __P100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ - _PAGE_EXECUTE | CACHEDEF) -#define __P101 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_EXECUTE | \ - _PAGE_READ | CACHEDEF) -#define __P110 __P100 /* Write/execute copy-on-write */ -#define __P111 __P101 /* Read/Write/Execute, copy-on-write */ - -/* Shared page protections. */ -#define __S000 __P000 -#define __S001 __P001 -#define __S010 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ - _PAGE_WRITE | CACHEDEF) -#define __S011 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \ - _PAGE_WRITE | CACHEDEF) -#define __S100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ - _PAGE_EXECUTE | CACHEDEF) -#define __S101 __P101 -#define __S110 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ - _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF) -#define __S111 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \ - _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF) - extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* located in head.S */ /* HUGETLB not working currently */ @@ -262,9 +238,6 @@ static inline int pte_present(pte_t pte) return pte_val(pte) & _PAGE_PRESENT; } -/* mk_pte - make a PTE out of a page pointer and protection bits */ -#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) - /* pte_page - returns a page (frame pointer/descriptor?) based on a PTE */ #define pte_page(x) pfn_to_page(pte_pfn(x)) @@ -324,7 +297,7 @@ static inline pte_t pte_wrprotect(pte_t pte) } /* pte_mkwrite - mark page as writable */ -static inline pte_t pte_mkwrite(pte_t pte) +static inline pte_t pte_mkwrite_novma(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; @@ -362,6 +335,7 @@ static inline int pte_exec(pte_t pte) /* __swp_entry_to_pte - extract PTE from swap entry */ #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) +#define PFN_PTE_SHIFT PAGE_SHIFT /* pfn_pte - convert page number and protection value to page table entry */ #define pfn_pte(pfn, pgprot) __pte((pfn << PAGE_SHIFT) | pgprot_val(pgprot)) @@ -369,14 +343,6 @@ static inline int pte_exec(pte_t pte) #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) -/* - * set_pte_at - update page table and do whatever magic may be - * necessary to make the underlying hardware/firmware take note. - * - * VM may require a virtual instruction to alert the MMU. - */ -#define set_pte_at(mm, addr, ptep, pte) set_pte(ptep, pte) - static inline unsigned long pmd_page_vaddr(pmd_t pmd) { return (unsigned long)__va(pmd_val(pmd) & PAGE_MASK); @@ -386,9 +352,12 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) #define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page)) /* + * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that + * are !pte_none() && !pte_present(). + * * Swap/file PTE definitions. If _PAGE_PRESENT is zero, the rest of the PTE is * interpreted as swap information. The remaining free bits are interpreted as - * swap type/offset tuple. Rather than have the TLB fill handler test + * listed below. Rather than have the TLB fill handler test * _PAGE_PRESENT, we're going to reserve the permissions bits and set them to * all zeros for swap entries, which speeds up the miss handler at the cost of * 3 bits of offset. That trade-off can be revisited if necessary, but Hexagon @@ -398,9 +367,10 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) * Format of swap PTE: * bit 0: Present (zero) * bits 1-5: swap type (arch independent layer uses 5 bits max) - * bits 6-9: bits 3:0 of offset + * bit 6: exclusive marker + * bits 7-9: bits 2:0 of offset * bits 10-12: effectively _PAGE_PROTNONE (all zero) - * bits 13-31: bits 22:4 of swap offset + * bits 13-31: bits 21:3 of swap offset * * The split offset makes some of the following macros a little gnarly, * but there's plenty of precedent for this sort of thing. @@ -410,11 +380,28 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) #define __swp_type(swp_pte) (((swp_pte).val >> 1) & 0x1f) #define __swp_offset(swp_pte) \ - ((((swp_pte).val >> 6) & 0xf) | (((swp_pte).val >> 9) & 0x7ffff0)) + ((((swp_pte).val >> 7) & 0x7) | (((swp_pte).val >> 10) & 0x3ffff8)) #define __swp_entry(type, offset) \ ((swp_entry_t) { \ - ((type << 1) | \ - ((offset & 0x7ffff0) << 9) | ((offset & 0xf) << 6)) }) + (((type & 0x1f) << 1) | \ + ((offset & 0x3ffff8) << 10) | ((offset & 0x7) << 7)) }) + +static inline bool pte_swp_exclusive(pte_t pte) +{ + return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; +} + +static inline pte_t pte_swp_mkexclusive(pte_t pte) +{ + pte_val(pte) |= _PAGE_SWP_EXCLUSIVE; + return pte; +} + +static inline pte_t pte_swp_clear_exclusive(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE; + return pte; +} #endif diff --git a/arch/hexagon/include/asm/processor.h b/arch/hexagon/include/asm/processor.h index 615f7e49968e..0cd39c2cdf8f 100644 --- a/arch/hexagon/include/asm/processor.h +++ b/arch/hexagon/include/asm/processor.h @@ -60,10 +60,6 @@ struct thread_struct { #define KSTK_EIP(tsk) (pt_elr(task_pt_regs(tsk))) #define KSTK_ESP(tsk) (pt_psp(task_pt_regs(tsk))) -/* Free all resources held by a thread; defined in process.c */ -extern void release_thread(struct task_struct *dead_task); - -/* Get wait channel for task P. */ extern unsigned long __get_wchan(struct task_struct *p); /* The following stuff is pretty HEXAGON specific. */ diff --git a/arch/hexagon/include/asm/ptrace.h b/arch/hexagon/include/asm/ptrace.h new file mode 100644 index 000000000000..ed35da1ee685 --- /dev/null +++ b/arch/hexagon/include/asm/ptrace.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Ptrace definitions for the Hexagon architecture + * + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_HEXAGON_PTRACE_H +#define _ASM_HEXAGON_PTRACE_H + +#include <uapi/asm/ptrace.h> + +/* kprobe-based event tracer support */ +extern int regs_query_register_offset(const char *name); +extern const char *regs_query_register_name(unsigned int offset); + +#define current_pt_regs() \ + ((struct pt_regs *) \ + ((unsigned long)current_thread_info() + THREAD_SIZE) - 1) + +#if CONFIG_HEXAGON_ARCH_VERSION >= 4 +#define arch_has_single_step() (1) +#endif + +#endif diff --git a/arch/hexagon/include/asm/setup.h b/arch/hexagon/include/asm/setup.h new file mode 100644 index 000000000000..9f2749cd4052 --- /dev/null +++ b/arch/hexagon/include/asm/setup.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + */ + +#ifndef _ASM_HEXAGON_SETUP_H +#define _ASM_HEXAGON_SETUP_H + +#include <linux/init.h> +#include <uapi/asm/setup.h> + +extern char external_cmdline_buffer; + +void __init setup_arch_memory(void); + +#endif diff --git a/arch/hexagon/include/asm/spinlock_types.h b/arch/hexagon/include/asm/spinlock_types.h index d5f66495b670..63add2d863e8 100644 --- a/arch/hexagon/include/asm/spinlock_types.h +++ b/arch/hexagon/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ #define _ASM_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_RAW_H -# error "please don't include this file directly" +# error "Please do not include this file directly." #endif typedef struct { diff --git a/arch/hexagon/include/asm/syscall.h b/arch/hexagon/include/asm/syscall.h index f6e454f18038..70637261817a 100644 --- a/arch/hexagon/include/asm/syscall.h +++ b/arch/hexagon/include/asm/syscall.h @@ -26,6 +26,13 @@ static inline long syscall_get_nr(struct task_struct *task, return regs->r06; } +static inline void syscall_set_nr(struct task_struct *task, + struct pt_regs *regs, + int nr) +{ + regs->r06 = nr; +} + static inline void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, unsigned long *args) @@ -33,6 +40,13 @@ static inline void syscall_get_arguments(struct task_struct *task, memcpy(args, &(®s->r00)[0], 6 * sizeof(args[0])); } +static inline void syscall_set_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned long *args) +{ + memcpy(&(®s->r00)[0], args, 6 * sizeof(args[0])); +} + static inline long syscall_get_error(struct task_struct *task, struct pt_regs *regs) { @@ -45,6 +59,13 @@ static inline long syscall_get_return_value(struct task_struct *task, return regs->r00; } +static inline void syscall_set_return_value(struct task_struct *task, + struct pt_regs *regs, + int error, long val) +{ + regs->r00 = (long) error ?: val; +} + static inline int syscall_get_arch(struct task_struct *task) { return AUDIT_ARCH_HEXAGON; diff --git a/arch/hexagon/include/asm/syscalls.h b/arch/hexagon/include/asm/syscalls.h new file mode 100644 index 000000000000..40f2d08bec92 --- /dev/null +++ b/arch/hexagon/include/asm/syscalls.h @@ -0,0 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include <asm-generic/syscalls.h> + +asmlinkage long sys_hexagon_fadvise64_64(int fd, int advice, + u32 a2, u32 a3, u32 a4, u32 a5); diff --git a/arch/hexagon/include/asm/unistd.h b/arch/hexagon/include/asm/unistd.h new file mode 100644 index 000000000000..1f462bade75c --- /dev/null +++ b/arch/hexagon/include/asm/unistd.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SYS_VFORK +#define __ARCH_WANT_SYS_FORK + +#define __ARCH_BROKEN_SYS_CLONE3 + +#include <uapi/asm/unistd.h> |