aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/csky
diff options
context:
space:
mode:
Diffstat (limited to 'arch/csky')
-rw-r--r--arch/csky/Kconfig51
-rw-r--r--arch/csky/Makefile2
-rw-r--r--arch/csky/abiv1/alignment.c18
-rw-r--r--arch/csky/abiv1/cacheflush.c35
-rw-r--r--arch/csky/abiv1/inc/abi/cacheflush.h4
-rw-r--r--arch/csky/abiv1/inc/abi/pgtable-bits.h13
-rw-r--r--arch/csky/abiv1/inc/abi/string.h6
-rw-r--r--arch/csky/abiv1/mmap.c15
-rw-r--r--arch/csky/abiv2/cacheflush.c36
-rw-r--r--arch/csky/abiv2/inc/abi/cacheflush.h12
-rw-r--r--arch/csky/abiv2/inc/abi/pgtable-bits.h19
-rw-r--r--arch/csky/boot/dts/Makefile4
-rw-r--r--arch/csky/configs/defconfig3
-rw-r--r--arch/csky/include/asm/Kbuild8
-rw-r--r--arch/csky/include/asm/atomic.h35
-rw-r--r--arch/csky/include/asm/cachetype.h9
-rw-r--r--arch/csky/include/asm/cmpxchg.h41
-rw-r--r--arch/csky/include/asm/ftrace.h6
-rw-r--r--arch/csky/include/asm/io.h13
-rw-r--r--arch/csky/include/asm/irq_work.h2
-rw-r--r--arch/csky/include/asm/jump_label.h52
-rw-r--r--arch/csky/include/asm/page.h20
-rw-r--r--arch/csky/include/asm/pci.h23
-rw-r--r--arch/csky/include/asm/pgalloc.h11
-rw-r--r--arch/csky/include/asm/pgtable.h59
-rw-r--r--arch/csky/include/asm/processor.h9
-rw-r--r--arch/csky/include/asm/ptrace.h2
-rw-r--r--arch/csky/include/asm/sections.h12
-rw-r--r--arch/csky/include/asm/smp.h2
-rw-r--r--arch/csky/include/asm/spinlock.h12
-rw-r--r--arch/csky/include/asm/spinlock_types.h9
-rw-r--r--arch/csky/include/asm/stackprotector.h10
-rw-r--r--arch/csky/include/asm/syscall.h13
-rw-r--r--arch/csky/include/asm/traps.h17
-rw-r--r--arch/csky/include/asm/unistd.h3
-rw-r--r--arch/csky/include/asm/vdso.h5
-rw-r--r--arch/csky/include/asm/vdso/clocksource.h9
-rw-r--r--arch/csky/include/asm/vdso/gettimeofday.h114
-rw-r--r--arch/csky/include/asm/vdso/processor.h12
-rw-r--r--arch/csky/include/asm/vdso/vsyscall.h22
-rw-r--r--arch/csky/include/uapi/asm/Kbuild2
-rw-r--r--arch/csky/include/uapi/asm/unistd.h14
-rw-r--r--arch/csky/kernel/Makefile7
-rw-r--r--arch/csky/kernel/Makefile.syscalls4
-rw-r--r--arch/csky/kernel/entry.S11
-rw-r--r--arch/csky/kernel/io.c91
-rw-r--r--arch/csky/kernel/jump_label.c54
-rw-r--r--arch/csky/kernel/module.c2
-rw-r--r--arch/csky/kernel/perf_event.c3
-rw-r--r--arch/csky/kernel/probes/ftrace.c3
-rw-r--r--arch/csky/kernel/probes/kprobes.c4
-rw-r--r--arch/csky/kernel/probes/uprobes.c3
-rw-r--r--arch/csky/kernel/process.c5
-rw-r--r--arch/csky/kernel/setup.c59
-rw-r--r--arch/csky/kernel/signal.c2
-rw-r--r--arch/csky/kernel/smp.c25
-rw-r--r--arch/csky/kernel/stacktrace.c6
-rw-r--r--arch/csky/kernel/syscall.c2
-rw-r--r--arch/csky/kernel/syscall_table.c4
-rw-r--r--arch/csky/kernel/traps.c1
-rw-r--r--arch/csky/kernel/vdso.c49
-rw-r--r--arch/csky/kernel/vdso/Makefile25
-rw-r--r--arch/csky/kernel/vdso/vdso.lds.S4
-rw-r--r--arch/csky/kernel/vdso/vgettimeofday.c28
-rw-r--r--arch/csky/kernel/vmlinux.lds.S16
-rw-r--r--arch/csky/lib/delay.c2
-rw-r--r--arch/csky/mm/asid.c5
-rw-r--r--arch/csky/mm/fault.c26
-rw-r--r--arch/csky/mm/init.c87
69 files changed, 545 insertions, 747 deletions
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index f55ba1745f7b..acc431c331b0 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -2,12 +2,42 @@
config CSKY
def_bool y
select ARCH_32BIT_OFF_T
+ select ARCH_HAS_CPU_CACHE_ALIASING
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_QUEUED_RWLOCKS
+ select ARCH_USE_QUEUED_SPINLOCKS
+ select ARCH_HAS_CURRENT_STACK_POINTER
+ select ARCH_INLINE_READ_LOCK if !PREEMPTION
+ select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
+ select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION
+ select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPTION
+ select ARCH_INLINE_READ_UNLOCK if !PREEMPTION
+ select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPTION
+ select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPTION
+ select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPTION
+ select ARCH_INLINE_WRITE_LOCK if !PREEMPTION
+ select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPTION
+ select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPTION
+ select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPTION
+ select ARCH_INLINE_WRITE_UNLOCK if !PREEMPTION
+ select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPTION
+ select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPTION
+ select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPTION
+ select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPTION
+ select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPTION
+ select ARCH_INLINE_SPIN_LOCK if !PREEMPTION
+ select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPTION
+ select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPTION
+ select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPTION
+ select ARCH_INLINE_SPIN_UNLOCK if !PREEMPTION
+ select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION
+ select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION
+ select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION
+ select ARCH_NEED_CMPXCHG_1_EMU
select ARCH_WANT_FRAME_POINTERS if !CPU_CK610 && $(cc-option,-mbacktrace)
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
select COMMON_CLK
@@ -34,12 +64,11 @@ config CSKY
select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
- select GENERIC_TIME_VSYSCALL
- select GENERIC_VDSO_32
- select GENERIC_GETTIMEOFDAY
select GX6605S_TIMER if CPU_CK610
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_JUMP_LABEL if !CPU_CK610
+ select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_CONTEXT_TRACKING_USER
@@ -48,7 +77,6 @@ config CSKY
select HAVE_DEBUG_KMEMLEAK
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
- select HAVE_GENERIC_VDSO
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_ERROR_INJECTION
@@ -59,14 +87,16 @@ config CSKY
select HAVE_KPROBES if !CPU_CK610
select HAVE_KPROBES_ON_FTRACE if !CPU_CK610
select HAVE_KRETPROBES if !CPU_CK610
+ select HAVE_PAGE_SIZE_4KB
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_DMA_CONTIGUOUS
select HAVE_REGS_AND_STACK_ACCESS_API
- select HAVE_RSEQ
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
+ select HOTPLUG_CORE_SYNC_DEAD if HOTPLUG_CPU
+ select LOCK_MM_AND_FIND_VMA
select MAY_HAVE_SPARSE_IRQ
select MODULES_USE_ELF_RELA if MODULES
select OF
@@ -137,11 +167,6 @@ config STACKTRACE_SUPPORT
config TIME_LOW_RES
def_bool y
-config CPU_TLB_SIZE
- int
- default "128" if (CPU_CK610 || CPU_CK807 || CPU_CK810)
- default "1024" if (CPU_CK860)
-
config CPU_ASID_BITS
int
default "8" if (CPU_CK610 || CPU_CK807 || CPU_CK810)
@@ -240,7 +265,7 @@ menuconfig HAVE_TCM
bool "Tightly-Coupled/Sram Memory"
depends on !COMPILE_TEST
help
- The implementation are not only used by TCM (Tightly-Coupled Meory)
+ The implementation are not only used by TCM (Tightly-Coupled Memory)
but also used by sram on SOC bus. It follow existed linux tcm
software interface, so that old tcm application codes could be
re-used directly.
@@ -303,10 +328,6 @@ config HIGHMEM
select KMAP_LOCAL
default y
-config FORCE_MAX_ZONEORDER
- int "Maximum zone order"
- default "11"
-
config DRAM_BASE
hex "DRAM start addr (the same with memory-section in dts)"
default 0x0
diff --git a/arch/csky/Makefile b/arch/csky/Makefile
index 4e1d619fd5c6..0e4237e55758 100644
--- a/arch/csky/Makefile
+++ b/arch/csky/Makefile
@@ -59,8 +59,6 @@ LDFLAGS += -EL
KBUILD_AFLAGS += $(KBUILD_CFLAGS)
-head-y := arch/csky/kernel/head.o
-
core-y += arch/csky/$(CSKYABI)/
libs-y += arch/csky/lib/ \
diff --git a/arch/csky/abiv1/alignment.c b/arch/csky/abiv1/alignment.c
index 2df115d0e210..aee904833dec 100644
--- a/arch/csky/abiv1/alignment.c
+++ b/arch/csky/abiv1/alignment.c
@@ -300,7 +300,7 @@ bad_area:
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr);
}
-static struct ctl_table alignment_tbl[5] = {
+static const struct ctl_table alignment_tbl[] = {
{
.procname = "kernel_enable",
.data = &align_kern_enable,
@@ -329,25 +329,11 @@ static struct ctl_table alignment_tbl[5] = {
.mode = 0666,
.proc_handler = &proc_dointvec
},
- {}
-};
-
-static struct ctl_table sysctl_table[2] = {
- {
- .procname = "csky_alignment",
- .mode = 0555,
- .child = alignment_tbl},
- {}
-};
-
-static struct ctl_path sysctl_path[2] = {
- {.procname = "csky"},
- {}
};
static int __init csky_alignment_init(void)
{
- register_sysctl_paths(sysctl_path, sysctl_table);
+ register_sysctl_init("csky/csky_alignment", alignment_tbl);
return 0;
}
diff --git a/arch/csky/abiv1/cacheflush.c b/arch/csky/abiv1/cacheflush.c
index fb91b069dc69..171e8fb32285 100644
--- a/arch/csky/abiv1/cacheflush.c
+++ b/arch/csky/abiv1/cacheflush.c
@@ -11,46 +11,55 @@
#include <asm/cache.h>
#include <asm/cacheflush.h>
#include <asm/cachectl.h>
+#include <asm/tlbflush.h>
#define PG_dcache_clean PG_arch_1
-void flush_dcache_page(struct page *page)
+void flush_dcache_folio(struct folio *folio)
{
struct address_space *mapping;
- if (page == ZERO_PAGE(0))
+ if (is_zero_pfn(folio_pfn(folio)))
return;
- mapping = page_mapping_file(page);
+ mapping = folio_flush_mapping(folio);
- if (mapping && !page_mapcount(page))
- clear_bit(PG_dcache_clean, &page->flags);
+ if (mapping && !folio_mapped(folio))
+ clear_bit(PG_dcache_clean, &folio->flags);
else {
dcache_wbinv_all();
if (mapping)
icache_inv_all();
- set_bit(PG_dcache_clean, &page->flags);
+ set_bit(PG_dcache_clean, &folio->flags);
}
}
+EXPORT_SYMBOL(flush_dcache_folio);
+
+void flush_dcache_page(struct page *page)
+{
+ flush_dcache_folio(page_folio(page));
+}
EXPORT_SYMBOL(flush_dcache_page);
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
- pte_t *ptep)
+void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep, unsigned int nr)
{
unsigned long pfn = pte_pfn(*ptep);
- struct page *page;
+ struct folio *folio;
+
+ flush_tlb_page(vma, addr);
if (!pfn_valid(pfn))
return;
- page = pfn_to_page(pfn);
- if (page == ZERO_PAGE(0))
+ if (is_zero_pfn(pfn))
return;
- if (!test_and_set_bit(PG_dcache_clean, &page->flags))
+ folio = page_folio(pfn_to_page(pfn));
+ if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
dcache_wbinv_all();
- if (page_mapping_file(page)) {
+ if (folio_flush_mapping(folio)) {
if (vma->vm_flags & VM_EXEC)
icache_inv_all();
}
diff --git a/arch/csky/abiv1/inc/abi/cacheflush.h b/arch/csky/abiv1/inc/abi/cacheflush.h
index ed62e2066ba7..d011a81575d2 100644
--- a/arch/csky/abiv1/inc/abi/cacheflush.h
+++ b/arch/csky/abiv1/inc/abi/cacheflush.h
@@ -9,6 +9,8 @@
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *);
+void flush_dcache_folio(struct folio *);
+#define flush_dcache_folio flush_dcache_folio
#define flush_cache_mm(mm) dcache_wbinv_all()
#define flush_cache_page(vma, page, pfn) cache_wbinv_all()
@@ -41,9 +43,9 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
*/
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
#define flush_cache_vmap(start, end) cache_wbinv_all()
+#define flush_cache_vmap_early(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) cache_wbinv_all()
-#define flush_icache_page(vma, page) do {} while (0);
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
#define flush_icache_mm_range(mm, start, end) cache_wbinv_range(start, end)
#define flush_icache_deferred(mm) do {} while (0);
diff --git a/arch/csky/abiv1/inc/abi/pgtable-bits.h b/arch/csky/abiv1/inc/abi/pgtable-bits.h
index 752c8b3f9194..ae7a2f76dd42 100644
--- a/arch/csky/abiv1/inc/abi/pgtable-bits.h
+++ b/arch/csky/abiv1/inc/abi/pgtable-bits.h
@@ -10,6 +10,9 @@
#define _PAGE_ACCESSED (1<<3)
#define _PAGE_MODIFIED (1<<4)
+/* We borrow bit 9 to store the exclusive marker in swap PTEs. */
+#define _PAGE_SWP_EXCLUSIVE (1<<9)
+
/* implemented in hardware */
#define _PAGE_GLOBAL (1<<6)
#define _PAGE_VALID (1<<7)
@@ -26,7 +29,8 @@
#define _PAGE_PROT_NONE _PAGE_READ
/*
- * Encode and decode a swap entry
+ * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
+ * are !pte_none() && !pte_present().
*
* Format of swap PTE:
* bit 0: _PAGE_PRESENT (zero)
@@ -35,15 +39,16 @@
* bit 6: _PAGE_GLOBAL (zero)
* bit 7: _PAGE_VALID (zero)
* bit 8: swap type[4]
- * bit 9 - 31: swap offset
+ * bit 9: exclusive marker
+ * bit 10 - 31: swap offset
*/
#define __swp_type(x) ((((x).val >> 2) & 0xf) | \
(((x).val >> 4) & 0x10))
-#define __swp_offset(x) ((x).val >> 9)
+#define __swp_offset(x) ((x).val >> 10)
#define __swp_entry(type, offset) ((swp_entry_t) { \
((type & 0xf) << 2) | \
((type & 0x10) << 4) | \
- ((offset) << 9)})
+ ((offset) << 10)})
#define HAVE_ARCH_UNMAPPED_AREA
diff --git a/arch/csky/abiv1/inc/abi/string.h b/arch/csky/abiv1/inc/abi/string.h
index 9d95594b0feb..de50117b904d 100644
--- a/arch/csky/abiv1/inc/abi/string.h
+++ b/arch/csky/abiv1/inc/abi/string.h
@@ -6,4 +6,10 @@
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *, const void *, __kernel_size_t);
+#define __HAVE_ARCH_MEMMOVE
+extern void *memmove(void *, const void *, __kernel_size_t);
+
+#define __HAVE_ARCH_MEMSET
+extern void *memset(void *, int, __kernel_size_t);
+
#endif /* __ABI_CSKY_STRING_H */
diff --git a/arch/csky/abiv1/mmap.c b/arch/csky/abiv1/mmap.c
index 6792aca49999..1047865e82a9 100644
--- a/arch/csky/abiv1/mmap.c
+++ b/arch/csky/abiv1/mmap.c
@@ -23,12 +23,18 @@
*/
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags, vm_flags_t vm_flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int do_align = 0;
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {
+ .length = len,
+ .low_limit = mm->mmap_base,
+ .high_limit = TASK_SIZE,
+ .align_offset = pgoff << PAGE_SHIFT
+ };
/*
* We only need to do colour alignment if either the I or D
@@ -61,11 +67,6 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
return addr;
}
- info.flags = 0;
- info.length = len;
- info.low_limit = mm->mmap_base;
- info.high_limit = TASK_SIZE;
info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
- info.align_offset = pgoff << PAGE_SHIFT;
return vm_unmapped_area(&info);
}
diff --git a/arch/csky/abiv2/cacheflush.c b/arch/csky/abiv2/cacheflush.c
index 39c51399dd81..876028b1083f 100644
--- a/arch/csky/abiv2/cacheflush.c
+++ b/arch/csky/abiv2/cacheflush.c
@@ -5,31 +5,35 @@
#include <linux/highmem.h>
#include <linux/mm.h>
#include <asm/cache.h>
+#include <asm/tlbflush.h>
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
- pte_t *pte)
+void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
+ unsigned long address, pte_t *pte, unsigned int nr)
{
- unsigned long addr;
- struct page *page;
+ unsigned long pfn = pte_pfn(*pte);
+ struct folio *folio;
+ unsigned int i;
- if (!pfn_valid(pte_pfn(*pte)))
- return;
-
- page = pfn_to_page(pte_pfn(*pte));
- if (page == ZERO_PAGE(0))
- return;
+ flush_tlb_page(vma, address);
- if (test_and_set_bit(PG_dcache_clean, &page->flags))
+ if (!pfn_valid(pfn))
return;
- addr = (unsigned long) kmap_atomic(page);
+ folio = page_folio(pfn_to_page(pfn));
- dcache_wb_range(addr, addr + PAGE_SIZE);
+ if (test_and_set_bit(PG_dcache_clean, &folio->flags))
+ return;
- if (vma->vm_flags & VM_EXEC)
- icache_inv_range(addr, addr + PAGE_SIZE);
+ icache_inv_range(address, address + nr*PAGE_SIZE);
+ for (i = 0; i < folio_nr_pages(folio); i++) {
+ unsigned long addr = (unsigned long) kmap_local_folio(folio,
+ i * PAGE_SIZE);
- kunmap_atomic((void *) addr);
+ dcache_wb_range(addr, addr + PAGE_SIZE);
+ if (vma->vm_flags & VM_EXEC)
+ icache_inv_range(addr, addr + PAGE_SIZE);
+ kunmap_local((void *) addr);
+ }
}
void flush_icache_deferred(struct mm_struct *mm)
diff --git a/arch/csky/abiv2/inc/abi/cacheflush.h b/arch/csky/abiv2/inc/abi/cacheflush.h
index a565e00c3f70..6513ac5d2578 100644
--- a/arch/csky/abiv2/inc/abi/cacheflush.h
+++ b/arch/csky/abiv2/inc/abi/cacheflush.h
@@ -18,16 +18,21 @@
#define PG_dcache_clean PG_arch_1
+static inline void flush_dcache_folio(struct folio *folio)
+{
+ if (test_bit(PG_dcache_clean, &folio->flags))
+ clear_bit(PG_dcache_clean, &folio->flags);
+}
+#define flush_dcache_folio flush_dcache_folio
+
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
static inline void flush_dcache_page(struct page *page)
{
- if (test_bit(PG_dcache_clean, &page->flags))
- clear_bit(PG_dcache_clean, &page->flags);
+ flush_dcache_folio(page_folio(page));
}
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-#define flush_icache_page(vma, page) do { } while (0)
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
@@ -36,6 +41,7 @@ void flush_icache_mm_range(struct mm_struct *mm,
void flush_icache_deferred(struct mm_struct *mm);
#define flush_cache_vmap(start, end) do { } while (0)
+#define flush_cache_vmap_early(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
diff --git a/arch/csky/abiv2/inc/abi/pgtable-bits.h b/arch/csky/abiv2/inc/abi/pgtable-bits.h
index 7e7f389f546f..526152bd2156 100644
--- a/arch/csky/abiv2/inc/abi/pgtable-bits.h
+++ b/arch/csky/abiv2/inc/abi/pgtable-bits.h
@@ -10,6 +10,9 @@
#define _PAGE_PRESENT (1<<10)
#define _PAGE_MODIFIED (1<<11)
+/* We borrow bit 7 to store the exclusive marker in swap PTEs. */
+#define _PAGE_SWP_EXCLUSIVE (1<<7)
+
/* implemented in hardware */
#define _PAGE_GLOBAL (1<<0)
#define _PAGE_VALID (1<<1)
@@ -26,23 +29,25 @@
#define _PAGE_PROT_NONE _PAGE_WRITE
/*
- * Encode and decode a swap entry
+ * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
+ * are !pte_none() && !pte_present().
*
* Format of swap PTE:
* bit 0: _PAGE_GLOBAL (zero)
* bit 1: _PAGE_VALID (zero)
* bit 2 - 6: swap type
- * bit 7 - 8: swap offset[0 - 1]
+ * bit 7: exclusive marker
+ * bit 8: swap offset[0]
* bit 9: _PAGE_WRITE (zero)
* bit 10: _PAGE_PRESENT (zero)
- * bit 11 - 31: swap offset[2 - 22]
+ * bit 11 - 31: swap offset[1 - 21]
*/
#define __swp_type(x) (((x).val >> 2) & 0x1f)
-#define __swp_offset(x) ((((x).val >> 7) & 0x3) | \
- (((x).val >> 9) & 0x7ffffc))
+#define __swp_offset(x) ((((x).val >> 8) & 0x1) | \
+ (((x).val >> 10) & 0x3ffffe))
#define __swp_entry(type, offset) ((swp_entry_t) { \
((type & 0x1f) << 2) | \
- ((offset & 0x3) << 7) | \
- ((offset & 0x7ffffc) << 9)})
+ ((offset & 0x1) << 8) | \
+ ((offset & 0x3ffffe) << 10)})
#endif /* __ASM_CSKY_PGTABLE_BITS_H */
diff --git a/arch/csky/boot/dts/Makefile b/arch/csky/boot/dts/Makefile
index 5f1f55e911ad..aabffc1912e8 100644
--- a/arch/csky/boot/dts/Makefile
+++ b/arch/csky/boot/dts/Makefile
@@ -1,4 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-dtstree := $(srctree)/$(src)
-
-dtb-y := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
+dtb-y := $(patsubst $(src)/%.dts,%.dtb, $(wildcard $(src)/*.dts))
diff --git a/arch/csky/configs/defconfig b/arch/csky/configs/defconfig
index af722e4dfb47..ff559e5162aa 100644
--- a/arch/csky/configs/defconfig
+++ b/arch/csky/configs/defconfig
@@ -34,7 +34,8 @@ CONFIG_GENERIC_PHY=y
CONFIG_EXT4_FS=y
CONFIG_FANOTIFY=y
CONFIG_QUOTA=y
-CONFIG_FSCACHE=m
+CONFIG_NETFS_SUPPORT=m
+CONFIG_FSCACHE=y
CONFIG_FSCACHE_STATS=y
CONFIG_CACHEFILES=m
CONFIG_MSDOS_FS=y
diff --git a/arch/csky/include/asm/Kbuild b/arch/csky/include/asm/Kbuild
index 103207a58f97..3a5c7f6e5aac 100644
--- a/arch/csky/include/asm/Kbuild
+++ b/arch/csky/include/asm/Kbuild
@@ -1,12 +1,14 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y := syscall_table_32.h
+
generic-y += asm-offsets.h
generic-y += extable.h
-generic-y += gpio.h
generic-y += kvm_para.h
-generic-y += spinlock.h
-generic-y += spinlock_types.h
+generic-y += mcs_spinlock.h
generic-y += qrwlock.h
generic-y += qrwlock_types.h
+generic-y += qspinlock.h
generic-y += parport.h
generic-y += user.h
generic-y += vmlinux.lds.h
+generic-y += text-patching.h
diff --git a/arch/csky/include/asm/atomic.h b/arch/csky/include/asm/atomic.h
index 60406ef9c2bb..4dab44f6143a 100644
--- a/arch/csky/include/asm/atomic.h
+++ b/arch/csky/include/asm/atomic.h
@@ -195,41 +195,6 @@ arch_atomic_dec_if_positive(atomic_t *v)
}
#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
-#define ATOMIC_OP() \
-static __always_inline \
-int arch_atomic_xchg_relaxed(atomic_t *v, int n) \
-{ \
- return __xchg_relaxed(n, &(v->counter), 4); \
-} \
-static __always_inline \
-int arch_atomic_cmpxchg_relaxed(atomic_t *v, int o, int n) \
-{ \
- return __cmpxchg_relaxed(&(v->counter), o, n, 4); \
-} \
-static __always_inline \
-int arch_atomic_cmpxchg_acquire(atomic_t *v, int o, int n) \
-{ \
- return __cmpxchg_acquire(&(v->counter), o, n, 4); \
-} \
-static __always_inline \
-int arch_atomic_cmpxchg(atomic_t *v, int o, int n) \
-{ \
- return __cmpxchg(&(v->counter), o, n, 4); \
-}
-
-#define ATOMIC_OPS() \
- ATOMIC_OP()
-
-ATOMIC_OPS()
-
-#define arch_atomic_xchg_relaxed arch_atomic_xchg_relaxed
-#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
-#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
-#define arch_atomic_cmpxchg arch_atomic_cmpxchg
-
-#undef ATOMIC_OPS
-#undef ATOMIC_OP
-
#else
#include <asm-generic/atomic.h>
#endif
diff --git a/arch/csky/include/asm/cachetype.h b/arch/csky/include/asm/cachetype.h
new file mode 100644
index 000000000000..98cbe3af662f
--- /dev/null
+++ b/arch/csky/include/asm/cachetype.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_CSKY_CACHETYPE_H
+#define __ASM_CSKY_CACHETYPE_H
+
+#include <linux/types.h>
+
+#define cpu_dcache_is_aliasing() true
+
+#endif
diff --git a/arch/csky/include/asm/cmpxchg.h b/arch/csky/include/asm/cmpxchg.h
index 5b8faccd65e4..db6dda47184e 100644
--- a/arch/csky/include/asm/cmpxchg.h
+++ b/arch/csky/include/asm/cmpxchg.h
@@ -4,9 +4,9 @@
#define __ASM_CSKY_CMPXCHG_H
#ifdef CONFIG_SMP
+#include <linux/bug.h>
#include <asm/barrier.h>
-
-extern void __bad_xchg(void);
+#include <linux/cmpxchg-emu.h>
#define __xchg_relaxed(new, ptr, size) \
({ \
@@ -15,6 +15,26 @@ extern void __bad_xchg(void);
__typeof__(*(ptr)) __ret; \
unsigned long tmp; \
switch (size) { \
+ case 2: { \
+ u32 ret; \
+ u32 shif = ((ulong)__ptr & 2) ? 16 : 0; \
+ u32 mask = 0xffff << shif; \
+ __ptr = (__typeof__(ptr))((ulong)__ptr & ~2); \
+ __asm__ __volatile__ ( \
+ "1: ldex.w %0, (%4)\n" \
+ " and %1, %0, %2\n" \
+ " or %1, %1, %3\n" \
+ " stex.w %1, (%4)\n" \
+ " bez %1, 1b\n" \
+ : "=&r" (ret), "=&r" (tmp) \
+ : "r" (~mask), \
+ "r" ((u32)__new << shif), \
+ "r" (__ptr) \
+ : "memory"); \
+ __ret = (__typeof__(*(ptr))) \
+ ((ret & mask) >> shif); \
+ break; \
+ } \
case 4: \
asm volatile ( \
"1: ldex.w %0, (%3) \n" \
@@ -26,7 +46,7 @@ extern void __bad_xchg(void);
:); \
break; \
default: \
- __bad_xchg(); \
+ BUILD_BUG(); \
} \
__ret; \
})
@@ -42,6 +62,9 @@ extern void __bad_xchg(void);
__typeof__(old) __old = (old); \
__typeof__(*(ptr)) __ret; \
switch (size) { \
+ case 1: \
+ __ret = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *)__ptr, (uintptr_t)__old, (uintptr_t)__new); \
+ break; \
case 4: \
asm volatile ( \
"1: ldex.w %0, (%3) \n" \
@@ -56,7 +79,7 @@ extern void __bad_xchg(void);
:); \
break; \
default: \
- __bad_xchg(); \
+ BUILD_BUG(); \
} \
__ret; \
})
@@ -72,6 +95,9 @@ extern void __bad_xchg(void);
__typeof__(old) __old = (old); \
__typeof__(*(ptr)) __ret; \
switch (size) { \
+ case 1: \
+ __ret = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *)__ptr, (uintptr_t)__old, (uintptr_t)__new); \
+ break; \
case 4: \
asm volatile ( \
"1: ldex.w %0, (%3) \n" \
@@ -87,7 +113,7 @@ extern void __bad_xchg(void);
:); \
break; \
default: \
- __bad_xchg(); \
+ BUILD_BUG(); \
} \
__ret; \
})
@@ -103,6 +129,9 @@ extern void __bad_xchg(void);
__typeof__(old) __old = (old); \
__typeof__(*(ptr)) __ret; \
switch (size) { \
+ case 1: \
+ __ret = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *)__ptr, (uintptr_t)__old, (uintptr_t)__new); \
+ break; \
case 4: \
asm volatile ( \
RELEASE_FENCE \
@@ -119,7 +148,7 @@ extern void __bad_xchg(void);
:); \
break; \
default: \
- __bad_xchg(); \
+ BUILD_BUG(); \
} \
__ret; \
})
diff --git a/arch/csky/include/asm/ftrace.h b/arch/csky/include/asm/ftrace.h
index 9b86341731b6..00f9f7647e3f 100644
--- a/arch/csky/include/asm/ftrace.h
+++ b/arch/csky/include/asm/ftrace.h
@@ -7,8 +7,6 @@
#define HAVE_FUNCTION_GRAPH_FP_TEST
-#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
-
#define ARCH_SUPPORTS_FTRACE_OPS 1
#define MCOUNT_ADDR ((unsigned long)_mcount)
@@ -26,5 +24,9 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
struct dyn_arch_ftrace {
};
+
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+ unsigned long frame_pointer);
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_CSKY_FTRACE_H */
diff --git a/arch/csky/include/asm/io.h b/arch/csky/include/asm/io.h
index 4725bb977b0f..536d3bf32ff1 100644
--- a/arch/csky/include/asm/io.h
+++ b/arch/csky/include/asm/io.h
@@ -32,22 +32,11 @@
#endif
/*
- * String version of I/O memory access operations.
- */
-extern void __memcpy_fromio(void *, const volatile void __iomem *, size_t);
-extern void __memcpy_toio(volatile void __iomem *, const void *, size_t);
-extern void __memset_io(volatile void __iomem *, int, size_t);
-
-#define memset_io(c,v,l) __memset_io((c),(v),(l))
-#define memcpy_fromio(a,c,l) __memcpy_fromio((a),(c),(l))
-#define memcpy_toio(c,a,l) __memcpy_toio((c),(a),(l))
-
-/*
* I/O memory mapping functions.
*/
#define ioremap_wc(addr, size) \
ioremap_prot((addr), (size), \
- (_PAGE_IOREMAP & ~_CACHE_MASK) | _CACHE_UNCACHED)
+ __pgprot((_PAGE_IOREMAP & ~_CACHE_MASK) | _CACHE_UNCACHED))
#include <asm-generic/io.h>
diff --git a/arch/csky/include/asm/irq_work.h b/arch/csky/include/asm/irq_work.h
index 33aaf39d6f94..d39fcc1f5395 100644
--- a/arch/csky/include/asm/irq_work.h
+++ b/arch/csky/include/asm/irq_work.h
@@ -7,5 +7,5 @@ static inline bool arch_irq_work_has_interrupt(void)
{
return true;
}
-extern void arch_irq_work_raise(void);
+
#endif /* __ASM_CSKY_IRQ_WORK_H */
diff --git a/arch/csky/include/asm/jump_label.h b/arch/csky/include/asm/jump_label.h
new file mode 100644
index 000000000000..ef2e37a10a0f
--- /dev/null
+++ b/arch/csky/include/asm/jump_label.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_CSKY_JUMP_LABEL_H
+#define __ASM_CSKY_JUMP_LABEL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+#define JUMP_LABEL_NOP_SIZE 4
+
+static __always_inline bool arch_static_branch(struct static_key *key,
+ bool branch)
+{
+ asm goto(
+ "1: nop32 \n"
+ " .pushsection __jump_table, \"aw\" \n"
+ " .align 2 \n"
+ " .long 1b - ., %l[label] - . \n"
+ " .long %0 - . \n"
+ " .popsection \n"
+ : : "i"(&((char *)key)[branch]) : : label);
+
+ return false;
+label:
+ return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key,
+ bool branch)
+{
+ asm goto(
+ "1: bsr32 %l[label] \n"
+ " .pushsection __jump_table, \"aw\" \n"
+ " .align 2 \n"
+ " .long 1b - ., %l[label] - . \n"
+ " .long %0 - . \n"
+ " .popsection \n"
+ : : "i"(&((char *)key)[branch]) : : label);
+
+ return false;
+label:
+ return true;
+}
+
+enum jump_label_type;
+void arch_jump_label_transform_static(struct jump_entry *entry,
+ enum jump_label_type type);
+#define arch_jump_label_transform_static arch_jump_label_transform_static
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_CSKY_JUMP_LABEL_H */
diff --git a/arch/csky/include/asm/page.h b/arch/csky/include/asm/page.h
index ed7451478b1b..4911d0892b71 100644
--- a/arch/csky/include/asm/page.h
+++ b/arch/csky/include/asm/page.h
@@ -7,12 +7,8 @@
#include <asm/cache.h>
#include <linux/const.h>
-/*
- * PAGE_SHIFT determines the page size: 4KB
- */
-#define PAGE_SHIFT 12
-#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE - 1))
+#include <vdso/page.h>
+
#define THREAD_SIZE (PAGE_SIZE * 2)
#define THREAD_MASK (~(THREAD_SIZE - 1))
#define THREAD_SHIFT (PAGE_SHIFT + 1)
@@ -34,12 +30,8 @@
#include <linux/pfn.h>
-#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
-#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
-
#define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && \
(void *)(kaddr) < high_memory)
-#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
extern void *memset(void *dest, int c, size_t l);
extern void *memcpy(void *to, const void *from, size_t l);
@@ -47,9 +39,6 @@ extern void *memcpy(void *to, const void *from, size_t l);
#define clear_page(page) memset((page), 0, PAGE_SIZE)
#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
-#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
-#define phys_to_page(paddr) (pfn_to_page(PFN_DOWN(paddr)))
-
struct page;
#include <abi/page.h>
@@ -81,6 +70,11 @@ extern unsigned long va_pa_offset;
#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
+static inline unsigned long virt_to_pfn(const void *kaddr)
+{
+ return __pa(kaddr) >> PAGE_SHIFT;
+}
+
#define MAP_NR(x) PFN_DOWN((unsigned long)(x) - PAGE_OFFSET - \
PHYS_OFFSET_OFFSET)
#define virt_to_page(x) (mem_map + MAP_NR(x))
diff --git a/arch/csky/include/asm/pci.h b/arch/csky/include/asm/pci.h
index ebc765b1f78b..42724c630d30 100644
--- a/arch/csky/include/asm/pci.h
+++ b/arch/csky/include/asm/pci.h
@@ -9,26 +9,7 @@
#include <asm/io.h>
-#define PCIBIOS_MIN_IO 0
-#define PCIBIOS_MIN_MEM 0
-
-/* C-SKY shim does not initialize PCI bus */
-#define pcibios_assign_all_busses() 1
-
-extern int isa_dma_bridge_buggy;
-
-#ifdef CONFIG_PCI
-static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
-{
- /* no legacy IRQ on csky */
- return -ENODEV;
-}
-
-static inline int pci_proc_domain(struct pci_bus *bus)
-{
- /* always show the domain in /proc */
- return 1;
-}
-#endif /* CONFIG_PCI */
+/* Generic PCI */
+#include <asm-generic/pci.h>
#endif /* __ASM_CSKY_PCI_H */
diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h
index bbbd0698b397..9ed2b15ffd94 100644
--- a/arch/csky/include/asm/pgalloc.h
+++ b/arch/csky/include/asm/pgalloc.h
@@ -29,7 +29,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
pte_t *pte;
unsigned long i;
- pte = (pte_t *) __get_free_page(GFP_KERNEL);
+ pte = __pte_alloc_one_kernel(mm);
if (!pte)
return NULL;
@@ -44,7 +44,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
pgd_t *ret;
pgd_t *init;
- ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
+ ret = __pgd_alloc(mm, 0);
if (ret) {
init = pgd_offset(&init_mm, 0UL);
pgd_init((unsigned long *)ret);
@@ -61,11 +61,8 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
return ret;
}
-#define __pte_free_tlb(tlb, pte, address) \
-do { \
- pgtable_pte_page_dtor(pte); \
- tlb_remove_page(tlb, pte); \
-} while (0)
+#define __pte_free_tlb(tlb, pte, address) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
extern void pagetable_init(void);
extern void mmu_init(unsigned long min_pfn, unsigned long max_pfn);
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h
index bbe245117777..5a394be09c35 100644
--- a/arch/csky/include/asm/pgtable.h
+++ b/arch/csky/include/asm/pgtable.h
@@ -18,18 +18,17 @@
/*
* C-SKY is two-level paging structure:
*/
-#define PGD_ORDER 0
-#define PTE_ORDER 0
-#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
+#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
#define PTRS_PER_PMD 1
-#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
+#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
#define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+#define PFN_PTE_SHIFT PAGE_SHIFT
#define pmd_pfn(pmd) (pmd_phys(pmd) >> PAGE_SHIFT)
#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
#define pte_clear(mm, addr, ptep) set_pte((ptep), \
@@ -77,24 +76,6 @@
#define MAX_SWAPFILES_CHECK() \
BUILD_BUG_ON(MAX_SWAPFILES_SHIFT != 5)
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READ
-#define __P010 PAGE_READ
-#define __P011 PAGE_READ
-#define __P100 PAGE_READ
-#define __P101 PAGE_READ
-#define __P110 PAGE_READ
-#define __P111 PAGE_READ
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READ
-#define __S010 PAGE_WRITE
-#define __S011 PAGE_WRITE
-#define __S100 PAGE_READ
-#define __S101 PAGE_READ
-#define __S110 PAGE_WRITE
-#define __S111 PAGE_WRITE
-
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
@@ -110,7 +91,6 @@ static inline void set_pte(pte_t *p, pte_t pte)
/* prevent out of order excution */
smp_mb();
}
-#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
static inline pte_t *pmd_page_vaddr(pmd_t pmd)
{
@@ -196,7 +176,7 @@ static inline pte_t pte_mkold(pte_t pte)
return pte;
}
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite_novma(pte_t pte)
{
pte_val(pte) |= _PAGE_WRITE;
if (pte_val(pte) & _PAGE_MODIFIED)
@@ -220,6 +200,23 @@ static inline pte_t pte_mkyoung(pte_t pte)
return pte;
}
+static inline bool pte_swp_exclusive(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
+}
+
+static inline pte_t pte_swp_mkexclusive(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
+ return pte;
+}
+
+static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+{
+ pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
+ return pte;
+}
+
#define __HAVE_PHYS_MEM_ACCESS_PROT
struct file;
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
@@ -252,11 +249,6 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
return __pgprot(prot);
}
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
@@ -266,11 +258,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern void paging_init(void);
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
- pte_t *pte);
-
-/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
-#define kern_addr_valid(addr) (1)
+void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
+ unsigned long address, pte_t *pte, unsigned int nr);
+#define update_mmu_cache(vma, addr, ptep) \
+ update_mmu_cache_range(NULL, vma, addr, ptep, 1)
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h
index 9638206bc44f..e487a46d1c37 100644
--- a/arch/csky/include/asm/processor.h
+++ b/arch/csky/include/asm/processor.h
@@ -69,16 +69,9 @@ do { \
/* Forward declaration, a strange C thing */
struct task_struct;
-/* Free all resources held by a thread. */
-static inline void release_thread(struct task_struct *dead_task)
-{
-}
-
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
-extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
-
unsigned long __get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
@@ -89,4 +82,6 @@ unsigned long __get_wchan(struct task_struct *p);
#define cpu_relax() barrier()
+register unsigned long current_stack_pointer __asm__("sp");
+
#endif /* __ASM_CSKY_PROCESSOR_H */
diff --git a/arch/csky/include/asm/ptrace.h b/arch/csky/include/asm/ptrace.h
index 4202aab6df42..0634b7895d81 100644
--- a/arch/csky/include/asm/ptrace.h
+++ b/arch/csky/include/asm/ptrace.h
@@ -96,5 +96,7 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
return *(unsigned long *)((unsigned long)regs + offset);
}
+asmlinkage int syscall_trace_enter(struct pt_regs *regs);
+asmlinkage void syscall_trace_exit(struct pt_regs *regs);
#endif /* __ASSEMBLY__ */
#endif /* __ASM_CSKY_PTRACE_H */
diff --git a/arch/csky/include/asm/sections.h b/arch/csky/include/asm/sections.h
new file mode 100644
index 000000000000..83e82b7c0f6c
--- /dev/null
+++ b/arch/csky/include/asm/sections.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_SECTIONS_H
+#define __ASM_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+extern char _start[];
+
+asmlinkage void csky_start(unsigned int unused, void *dtb_start);
+
+#endif /* __ASM_SECTIONS_H */
diff --git a/arch/csky/include/asm/smp.h b/arch/csky/include/asm/smp.h
index 668b79ce29ea..d3db334f3196 100644
--- a/arch/csky/include/asm/smp.h
+++ b/arch/csky/include/asm/smp.h
@@ -23,7 +23,7 @@ void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq);
int __cpu_disable(void);
-void __cpu_die(unsigned int cpu);
+static inline void __cpu_die(unsigned int cpu) { }
#endif /* CONFIG_SMP */
diff --git a/arch/csky/include/asm/spinlock.h b/arch/csky/include/asm/spinlock.h
new file mode 100644
index 000000000000..83a2005341f5
--- /dev/null
+++ b/arch/csky/include/asm/spinlock.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_SPINLOCK_H
+#define __ASM_CSKY_SPINLOCK_H
+
+#include <asm/qspinlock.h>
+#include <asm/qrwlock.h>
+
+/* See include/linux/spinlock.h */
+#define smp_mb__after_spinlock() smp_mb()
+
+#endif /* __ASM_CSKY_SPINLOCK_H */
diff --git a/arch/csky/include/asm/spinlock_types.h b/arch/csky/include/asm/spinlock_types.h
new file mode 100644
index 000000000000..75bdf3af80ba
--- /dev/null
+++ b/arch/csky/include/asm/spinlock_types.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_SPINLOCK_TYPES_H
+#define __ASM_CSKY_SPINLOCK_TYPES_H
+
+#include <asm-generic/qspinlock_types.h>
+#include <asm-generic/qrwlock_types.h>
+
+#endif /* __ASM_CSKY_SPINLOCK_TYPES_H */
diff --git a/arch/csky/include/asm/stackprotector.h b/arch/csky/include/asm/stackprotector.h
index d7cd4e51edd9..d23747447166 100644
--- a/arch/csky/include/asm/stackprotector.h
+++ b/arch/csky/include/asm/stackprotector.h
@@ -2,9 +2,6 @@
#ifndef _ASM_STACKPROTECTOR_H
#define _ASM_STACKPROTECTOR_H 1
-#include <linux/random.h>
-#include <linux/version.h>
-
extern unsigned long __stack_chk_guard;
/*
@@ -15,12 +12,7 @@ extern unsigned long __stack_chk_guard;
*/
static __always_inline void boot_init_stack_canary(void)
{
- unsigned long canary;
-
- /* Try to get a semi random initial value. */
- get_random_bytes(&canary, sizeof(canary));
- canary ^= LINUX_VERSION_CODE;
- canary &= CANARY_MASK;
+ unsigned long canary = get_random_canary();
current->stack_canary = canary;
__stack_chk_guard = current->stack_canary;
diff --git a/arch/csky/include/asm/syscall.h b/arch/csky/include/asm/syscall.h
index 0de5734950bf..717f44b4d26f 100644
--- a/arch/csky/include/asm/syscall.h
+++ b/arch/csky/include/asm/syscall.h
@@ -59,6 +59,19 @@ syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
memcpy(args, &regs->a1, 5 * sizeof(args[0]));
}
+static inline void
+syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
+ const unsigned long *args)
+{
+ memcpy(&regs->a0, args, 6 * sizeof(regs->a0));
+ /*
+ * Also copy the first argument into orig_a0
+ * so that syscall_get_arguments() would return it
+ * instead of the previous value.
+ */
+ regs->orig_a0 = regs->a0;
+}
+
static inline int
syscall_get_arch(struct task_struct *task)
{
diff --git a/arch/csky/include/asm/traps.h b/arch/csky/include/asm/traps.h
index 421a4195e2fe..6bbbbe43165f 100644
--- a/arch/csky/include/asm/traps.h
+++ b/arch/csky/include/asm/traps.h
@@ -3,6 +3,8 @@
#ifndef __ASM_CSKY_TRAPS_H
#define __ASM_CSKY_TRAPS_H
+#include <linux/linkage.h>
+
#define VEC_RESET 0
#define VEC_ALIGN 1
#define VEC_ACCESS 2
@@ -40,4 +42,19 @@ do { \
void csky_alignment(struct pt_regs *regs);
+asmlinkage void do_trap_unknown(struct pt_regs *regs);
+asmlinkage void do_trap_zdiv(struct pt_regs *regs);
+asmlinkage void do_trap_buserr(struct pt_regs *regs);
+asmlinkage void do_trap_misaligned(struct pt_regs *regs);
+asmlinkage void do_trap_bkpt(struct pt_regs *regs);
+asmlinkage void do_trap_illinsn(struct pt_regs *regs);
+asmlinkage void do_trap_fpe(struct pt_regs *regs);
+asmlinkage void do_trap_priv(struct pt_regs *regs);
+asmlinkage void trap_c(struct pt_regs *regs);
+
+asmlinkage void do_notify_resume(struct pt_regs *regs,
+ unsigned long thread_info_flags);
+
+asmlinkage void do_page_fault(struct pt_regs *regs);
+
#endif /* __ASM_CSKY_TRAPS_H */
diff --git a/arch/csky/include/asm/unistd.h b/arch/csky/include/asm/unistd.h
index 9cf97de9a26d..2c2c24de95d8 100644
--- a/arch/csky/include/asm/unistd.h
+++ b/arch/csky/include/asm/unistd.h
@@ -2,4 +2,7 @@
#include <uapi/asm/unistd.h>
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_CLONE
+
#define NR_syscalls (__NR_syscalls)
diff --git a/arch/csky/include/asm/vdso.h b/arch/csky/include/asm/vdso.h
index bdce581b5fcb..181a15edafe8 100644
--- a/arch/csky/include/asm/vdso.h
+++ b/arch/csky/include/asm/vdso.h
@@ -5,11 +5,6 @@
#include <linux/types.h>
-#ifndef GENERIC_TIME_VSYSCALL
-struct vdso_data {
-};
-#endif
-
/*
* The VDSO symbols are mapped into Linux so we can just use regular symbol
* addressing to get their offsets in userspace. The symbols are mapped at an
diff --git a/arch/csky/include/asm/vdso/clocksource.h b/arch/csky/include/asm/vdso/clocksource.h
deleted file mode 100644
index dfca7b4724b7..000000000000
--- a/arch/csky/include/asm/vdso/clocksource.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef __ASM_VDSO_CSKY_CLOCKSOURCE_H
-#define __ASM_VDSO_CSKY_CLOCKSOURCE_H
-
-#define VDSO_ARCH_CLOCKMODES \
- VDSO_CLOCKMODE_ARCHTIMER
-
-#endif /* __ASM_VDSO_CSKY_CLOCKSOURCE_H */
diff --git a/arch/csky/include/asm/vdso/gettimeofday.h b/arch/csky/include/asm/vdso/gettimeofday.h
deleted file mode 100644
index 6c4f1446944f..000000000000
--- a/arch/csky/include/asm/vdso/gettimeofday.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef __ASM_VDSO_CSKY_GETTIMEOFDAY_H
-#define __ASM_VDSO_CSKY_GETTIMEOFDAY_H
-
-#ifndef __ASSEMBLY__
-
-#include <asm/barrier.h>
-#include <asm/unistd.h>
-#include <abi/regdef.h>
-#include <uapi/linux/time.h>
-
-#define VDSO_HAS_CLOCK_GETRES 1
-
-static __always_inline
-int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
- struct timezone *_tz)
-{
- register struct __kernel_old_timeval *tv asm("a0") = _tv;
- register struct timezone *tz asm("a1") = _tz;
- register long ret asm("a0");
- register long nr asm(syscallid) = __NR_gettimeofday;
-
- asm volatile ("trap 0\n"
- : "=r" (ret)
- : "r"(tv), "r"(tz), "r"(nr)
- : "memory");
-
- return ret;
-}
-
-static __always_inline
-long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
-{
- register clockid_t clkid asm("a0") = _clkid;
- register struct __kernel_timespec *ts asm("a1") = _ts;
- register long ret asm("a0");
- register long nr asm(syscallid) = __NR_clock_gettime64;
-
- asm volatile ("trap 0\n"
- : "=r" (ret)
- : "r"(clkid), "r"(ts), "r"(nr)
- : "memory");
-
- return ret;
-}
-
-static __always_inline
-long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
-{
- register clockid_t clkid asm("a0") = _clkid;
- register struct old_timespec32 *ts asm("a1") = _ts;
- register long ret asm("a0");
- register long nr asm(syscallid) = __NR_clock_gettime;
-
- asm volatile ("trap 0\n"
- : "=r" (ret)
- : "r"(clkid), "r"(ts), "r"(nr)
- : "memory");
-
- return ret;
-}
-
-static __always_inline
-int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
-{
- register clockid_t clkid asm("a0") = _clkid;
- register struct __kernel_timespec *ts asm("a1") = _ts;
- register long ret asm("a0");
- register long nr asm(syscallid) = __NR_clock_getres_time64;
-
- asm volatile ("trap 0\n"
- : "=r" (ret)
- : "r"(clkid), "r"(ts), "r"(nr)
- : "memory");
-
- return ret;
-}
-
-static __always_inline
-int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
-{
- register clockid_t clkid asm("a0") = _clkid;
- register struct old_timespec32 *ts asm("a1") = _ts;
- register long ret asm("a0");
- register long nr asm(syscallid) = __NR_clock_getres;
-
- asm volatile ("trap 0\n"
- : "=r" (ret)
- : "r"(clkid), "r"(ts), "r"(nr)
- : "memory");
-
- return ret;
-}
-
-uint64_t csky_pmu_read_cc(void);
-static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
- const struct vdso_data *vd)
-{
-#ifdef CONFIG_CSKY_PMU_V1
- return csky_pmu_read_cc();
-#else
- return 0;
-#endif
-}
-
-static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
-{
- return _vdso_data;
-}
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* __ASM_VDSO_CSKY_GETTIMEOFDAY_H */
diff --git a/arch/csky/include/asm/vdso/processor.h b/arch/csky/include/asm/vdso/processor.h
deleted file mode 100644
index 39a6b561d0cc..000000000000
--- a/arch/csky/include/asm/vdso/processor.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-
-#ifndef __ASM_VDSO_CSKY_PROCESSOR_H
-#define __ASM_VDSO_CSKY_PROCESSOR_H
-
-#ifndef __ASSEMBLY__
-
-#define cpu_relax() barrier()
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __ASM_VDSO_CSKY_PROCESSOR_H */
diff --git a/arch/csky/include/asm/vdso/vsyscall.h b/arch/csky/include/asm/vdso/vsyscall.h
deleted file mode 100644
index c276211a7c4d..000000000000
--- a/arch/csky/include/asm/vdso/vsyscall.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef __ASM_VDSO_CSKY_VSYSCALL_H
-#define __ASM_VDSO_CSKY_VSYSCALL_H
-
-#ifndef __ASSEMBLY__
-
-#include <vdso/datapage.h>
-
-extern struct vdso_data *vdso_data;
-
-static __always_inline struct vdso_data *__csky_get_k_vdso_data(void)
-{
- return vdso_data;
-}
-#define __arch_get_k_vdso_data __csky_get_k_vdso_data
-
-#include <asm-generic/vdso/vsyscall.h>
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* __ASM_VDSO_CSKY_VSYSCALL_H */
diff --git a/arch/csky/include/uapi/asm/Kbuild b/arch/csky/include/uapi/asm/Kbuild
index e78470141932..2501e82a1a0a 100644
--- a/arch/csky/include/uapi/asm/Kbuild
+++ b/arch/csky/include/uapi/asm/Kbuild
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += unistd_32.h
+
generic-y += ucontext.h
diff --git a/arch/csky/include/uapi/asm/unistd.h b/arch/csky/include/uapi/asm/unistd.h
index 7ff6a2466af1..44882179a6e1 100644
--- a/arch/csky/include/uapi/asm/unistd.h
+++ b/arch/csky/include/uapi/asm/unistd.h
@@ -1,14 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#define __ARCH_WANT_STAT64
-#define __ARCH_WANT_NEW_STAT
-#define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_SYS_CLONE3
-#define __ARCH_WANT_SET_GET_RLIMIT
-#define __ARCH_WANT_TIME32_SYSCALLS
-#include <asm-generic/unistd.h>
+#include <asm/unistd_32.h>
-#define __NR_set_thread_area (__NR_arch_specific_syscall + 0)
-__SYSCALL(__NR_set_thread_area, sys_set_thread_area)
-#define __NR_cacheflush (__NR_arch_specific_syscall + 1)
-__SYSCALL(__NR_cacheflush, sys_cacheflush)
+#define __NR_sync_file_range2 84
+#undef __NR_sync_file_range
diff --git a/arch/csky/kernel/Makefile b/arch/csky/kernel/Makefile
index 4eb41421ca5b..a406a4ac2378 100644
--- a/arch/csky/kernel/Makefile
+++ b/arch/csky/kernel/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
-extra-y := head.o vmlinux.lds
+always-$(KBUILD_BUILTIN) := vmlinux.lds
-obj-y += entry.o atomic.o signal.o traps.o irq.o time.o vdso.o vdso/
-obj-y += power.o syscall.o syscall_table.o setup.o io.o
+obj-y += head.o entry.o atomic.o signal.o traps.o irq.o time.o vdso.o vdso/
+obj-y += power.o syscall.o syscall_table.o setup.o
obj-y += process.o cpu-probe.o ptrace.o stacktrace.o
obj-y += probes/
@@ -13,6 +13,7 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_CSKY_PMU_V1) += perf_event.o
obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o
+obj-$(CONFIG_JUMP_LABEL) += jump_label.o
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
diff --git a/arch/csky/kernel/Makefile.syscalls b/arch/csky/kernel/Makefile.syscalls
new file mode 100644
index 000000000000..3df3b5822fce
--- /dev/null
+++ b/arch/csky/kernel/Makefile.syscalls
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+syscall_abis_32 += csky time32 stat64 rlimit
+
diff --git a/arch/csky/kernel/entry.S b/arch/csky/kernel/entry.S
index 547b4cd1b24b..c68cdcc76d60 100644
--- a/arch/csky/kernel/entry.S
+++ b/arch/csky/kernel/entry.S
@@ -54,7 +54,7 @@ ENTRY(csky_systemcall)
lrw r9, __NR_syscalls
cmphs syscallid, r9 /* Check nr of syscall */
- bt 1f
+ bt ret_from_exception
lrw r9, sys_call_table
ixw r9, syscallid
@@ -80,11 +80,6 @@ ENTRY(csky_systemcall)
jsr syscallid
#endif
stw a0, (sp, LSAVE_A0) /* Save return value */
-1:
-#ifdef CONFIG_DEBUG_RSEQ
- mov a0, sp
- jbsr rseq_syscall
-#endif
jmpi ret_from_exception
csky_syscall_trace:
@@ -113,10 +108,6 @@ csky_syscall_trace:
stw a0, (sp, LSAVE_A0) /* Save return value */
1:
-#ifdef CONFIG_DEBUG_RSEQ
- mov a0, sp
- jbsr rseq_syscall
-#endif
mov a0, sp /* right now, sp --> pt_regs */
jbsr syscall_trace_exit
br ret_from_exception
diff --git a/arch/csky/kernel/io.c b/arch/csky/kernel/io.c
deleted file mode 100644
index 5883f13fa2b1..000000000000
--- a/arch/csky/kernel/io.c
+++ /dev/null
@@ -1,91 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/io.h>
-
-/*
- * Copy data from IO memory space to "real" memory space.
- */
-void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
-{
- while (count && !IS_ALIGNED((unsigned long)from, 4)) {
- *(u8 *)to = __raw_readb(from);
- from++;
- to++;
- count--;
- }
-
- while (count >= 4) {
- *(u32 *)to = __raw_readl(from);
- from += 4;
- to += 4;
- count -= 4;
- }
-
- while (count) {
- *(u8 *)to = __raw_readb(from);
- from++;
- to++;
- count--;
- }
-}
-EXPORT_SYMBOL(__memcpy_fromio);
-
-/*
- * Copy data from "real" memory space to IO memory space.
- */
-void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
-{
- while (count && !IS_ALIGNED((unsigned long)to, 4)) {
- __raw_writeb(*(u8 *)from, to);
- from++;
- to++;
- count--;
- }
-
- while (count >= 4) {
- __raw_writel(*(u32 *)from, to);
- from += 4;
- to += 4;
- count -= 4;
- }
-
- while (count) {
- __raw_writeb(*(u8 *)from, to);
- from++;
- to++;
- count--;
- }
-}
-EXPORT_SYMBOL(__memcpy_toio);
-
-/*
- * "memset" on IO memory space.
- */
-void __memset_io(volatile void __iomem *dst, int c, size_t count)
-{
- u32 qc = (u8)c;
-
- qc |= qc << 8;
- qc |= qc << 16;
-
- while (count && !IS_ALIGNED((unsigned long)dst, 4)) {
- __raw_writeb(c, dst);
- dst++;
- count--;
- }
-
- while (count >= 4) {
- __raw_writel(qc, dst);
- dst += 4;
- count -= 4;
- }
-
- while (count) {
- __raw_writeb(c, dst);
- dst++;
- count--;
- }
-}
-EXPORT_SYMBOL(__memset_io);
diff --git a/arch/csky/kernel/jump_label.c b/arch/csky/kernel/jump_label.c
new file mode 100644
index 000000000000..d0e8b21447e1
--- /dev/null
+++ b/arch/csky/kernel/jump_label.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/memory.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+
+#define NOP32_HI 0xc400
+#define NOP32_LO 0x4820
+#define BSR_LINK 0xe000
+
+void arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ unsigned long addr = jump_entry_code(entry);
+ u16 insn[2];
+ int ret = 0;
+
+ if (type == JUMP_LABEL_JMP) {
+ long offset = jump_entry_target(entry) - jump_entry_code(entry);
+
+ if (WARN_ON(offset & 1 || offset < -67108864 || offset >= 67108864))
+ return;
+
+ offset = offset >> 1;
+
+ insn[0] = BSR_LINK |
+ ((uint16_t)((unsigned long) offset >> 16) & 0x3ff);
+ insn[1] = (uint16_t)((unsigned long) offset & 0xffff);
+ } else {
+ insn[0] = NOP32_HI;
+ insn[1] = NOP32_LO;
+ }
+
+ ret = copy_to_kernel_nofault((void *)addr, insn, 4);
+ WARN_ON(ret);
+
+ flush_icache_range(addr, addr + 4);
+}
+
+void arch_jump_label_transform_static(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ /*
+ * We use the same instructions in the arch_static_branch and
+ * arch_static_branch_jump inline functions, so there's no
+ * need to patch them up here.
+ * The core will call arch_jump_label_transform when those
+ * instructions need to be replaced.
+ */
+ arch_jump_label_transform(entry, type);
+}
diff --git a/arch/csky/kernel/module.c b/arch/csky/kernel/module.c
index f11b3e573344..0b56a8cd12a3 100644
--- a/arch/csky/kernel/module.c
+++ b/arch/csky/kernel/module.c
@@ -40,7 +40,7 @@ static void jsri_2_lrw_jsr(uint32_t *location)
}
}
#else
-static void inline jsri_2_lrw_jsr(uint32_t *location)
+static inline void jsri_2_lrw_jsr(uint32_t *location)
{
return;
}
diff --git a/arch/csky/kernel/perf_event.c b/arch/csky/kernel/perf_event.c
index e5f18420ce64..e0a36acd265b 100644
--- a/arch/csky/kernel/perf_event.c
+++ b/arch/csky/kernel/perf_event.c
@@ -1139,8 +1139,7 @@ static irqreturn_t csky_pmu_handle_irq(int irq_num, void *dev)
perf_sample_data_init(&data, 0, hwc->last_period);
csky_pmu_event_set_period(event);
- if (perf_event_overflow(event, &data, regs))
- csky_pmu_stop_event(event);
+ perf_event_overflow(event, &data, regs);
}
csky_pmu_enable(&csky_pmu.pmu);
diff --git a/arch/csky/kernel/probes/ftrace.c b/arch/csky/kernel/probes/ftrace.c
index 834cffcfbce3..7ba4b98076de 100644
--- a/arch/csky/kernel/probes/ftrace.c
+++ b/arch/csky/kernel/probes/ftrace.c
@@ -12,6 +12,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct kprobe_ctlblk *kcb;
struct pt_regs *regs;
+ if (unlikely(kprobe_ftrace_disabled))
+ return;
+
bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
return;
diff --git a/arch/csky/kernel/probes/kprobes.c b/arch/csky/kernel/probes/kprobes.c
index 34ba684d5962..3c6e5c725d81 100644
--- a/arch/csky/kernel/probes/kprobes.c
+++ b/arch/csky/kernel/probes/kprobes.c
@@ -124,6 +124,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
+ if (p->ainsn.api.insn) {
+ free_insn_slot(p->ainsn.api.insn, 0);
+ p->ainsn.api.insn = NULL;
+ }
}
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
diff --git a/arch/csky/kernel/probes/uprobes.c b/arch/csky/kernel/probes/uprobes.c
index 2d31a12e46cf..936bea6fd32d 100644
--- a/arch/csky/kernel/probes/uprobes.c
+++ b/arch/csky/kernel/probes/uprobes.c
@@ -64,6 +64,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
struct uprobe_task *utask = current->utask;
WARN_ON_ONCE(current->thread.trap_no != UPROBE_TRAP_NR);
+ current->thread.trap_no = utask->autask.saved_trap_no;
instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size);
@@ -101,6 +102,8 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
+ current->thread.trap_no = utask->autask.saved_trap_no;
+
/*
* Task has received a fatal signal, so reset back to probed
* address.
diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c
index eedddb155669..0c6e4b17fe00 100644
--- a/arch/csky/kernel/process.c
+++ b/arch/csky/kernel/process.c
@@ -9,6 +9,7 @@
#include <linux/kallsyms.h>
#include <linux/uaccess.h>
#include <linux/ptrace.h>
+#include <linux/elfcore.h>
#include <asm/elf.h>
#include <abi/reg_ops.h>
@@ -69,12 +70,11 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
}
/* Fill in the fpu structure for a core dump. */
-int dump_fpu(struct pt_regs *regs, struct user_fp *fpu)
+int elf_core_copy_task_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
{
memcpy(fpu, &current->thread.user_fp, sizeof(*fpu));
return 1;
}
-EXPORT_SYMBOL(dump_fpu);
int dump_task_regs(struct task_struct *tsk, elf_gregset_t *pr_regs)
{
@@ -100,6 +100,5 @@ void arch_cpu_idle(void)
#ifdef CONFIG_CPU_PM_STOP
asm volatile("stop\n");
#endif
- raw_local_irq_enable();
}
#endif
diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c
index c64e7be2045b..e0d6ca86ea8c 100644
--- a/arch/csky/kernel/setup.c
+++ b/arch/csky/kernel/setup.c
@@ -8,20 +8,47 @@
#include <linux/of_fdt.h>
#include <linux/start_kernel.h>
#include <linux/dma-map-ops.h>
-#include <linux/screen_info.h>
#include <asm/sections.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
-#ifdef CONFIG_DUMMY_CONSOLE
-struct screen_info screen_info = {
- .orig_video_lines = 30,
- .orig_video_cols = 80,
- .orig_video_mode = 0,
- .orig_video_ega_bx = 0,
- .orig_video_isVGA = 1,
- .orig_video_points = 8
-};
+#ifdef CONFIG_BLK_DEV_INITRD
+static void __init setup_initrd(void)
+{
+ unsigned long size;
+
+ if (initrd_start >= initrd_end) {
+ pr_err("initrd not found or empty");
+ goto disable;
+ }
+
+ if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
+ pr_err("initrd extends beyond end of memory");
+ goto disable;
+ }
+
+ size = initrd_end - initrd_start;
+
+ if (memblock_is_region_reserved(__pa(initrd_start), size)) {
+ pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region",
+ __pa(initrd_start), size);
+ goto disable;
+ }
+
+ memblock_reserve(__pa(initrd_start), size);
+
+ pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
+ (void *)(initrd_start), size);
+
+ initrd_below_start_ok = 1;
+
+ return;
+
+disable:
+ initrd_start = initrd_end = 0;
+
+ pr_err(" - disabling initrd\n");
+}
#endif
static void __init csky_memblock_init(void)
@@ -31,7 +58,7 @@ static void __init csky_memblock_init(void)
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
signed long size;
- memblock_reserve(__pa(_stext), _end - _stext);
+ memblock_reserve(__pa(_start), _end - _start);
early_init_fdt_reserve_self();
early_init_fdt_scan_reserved_mem();
@@ -52,6 +79,10 @@ static void __init csky_memblock_init(void)
max_low_pfn = min_low_pfn + sseg_size;
}
+#ifdef CONFIG_BLK_DEV_INITRD
+ setup_initrd();
+#endif
+
max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
mmu_init(min_low_pfn, max_low_pfn);
@@ -78,7 +109,7 @@ void __init setup_arch(char **cmdline_p)
pr_info("Phys. mem: %ldMB\n",
(unsigned long) memblock_phys_mem_size()/1024/1024);
- setup_initial_init_mm(_stext, _etext, _edata, _end);
+ setup_initial_init_mm(_start, _etext, _edata, _end);
parse_early_param();
@@ -124,9 +155,9 @@ asmlinkage __visible void __init csky_start(unsigned int unused,
pre_trap_init();
if (dtb_start == NULL)
- early_init_dt_scan(__dtb_start);
+ early_init_dt_scan(__dtb_start, __pa(dtb_start));
else
- early_init_dt_scan(dtb_start);
+ early_init_dt_scan(dtb_start, __pa(dtb_start));
start_kernel();
diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c
index b7b3685283d7..10da0fefd431 100644
--- a/arch/csky/kernel/signal.c
+++ b/arch/csky/kernel/signal.c
@@ -179,8 +179,6 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
sigset_t *oldset = sigmask_to_save();
int ret;
- rseq_signal_deliver(ksig, regs);
-
/* Are we from a system call? */
if (in_syscall(regs)) {
/* Avoid additional syscall restarting via ret_from_exception */
diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c
index 6bb38bc2f39b..92dbbf3e0205 100644
--- a/arch/csky/kernel/smp.c
+++ b/arch/csky/kernel/smp.c
@@ -140,7 +140,7 @@ void smp_send_stop(void)
on_each_cpu(ipi_stop, NULL, 1);
}
-void smp_send_reschedule(int cpu)
+void arch_smp_send_reschedule(int cpu)
{
send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
}
@@ -152,10 +152,6 @@ void arch_irq_work_raise(void)
}
#endif
-void __init smp_prepare_boot_cpu(void)
-{
-}
-
void __init smp_prepare_cpus(unsigned int max_cpus)
{
}
@@ -243,11 +239,6 @@ void __init smp_cpus_done(unsigned int max_cpus)
{
}
-int setup_profiling_timer(unsigned int multiplier)
-{
- return -EINVAL;
-}
-
void csky_start_secondary(void)
{
struct mm_struct *mm = &init_mm;
@@ -296,25 +287,21 @@ int __cpu_disable(void)
return 0;
}
-void __cpu_die(unsigned int cpu)
+void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
{
- if (!cpu_wait_death(cpu, 5)) {
- pr_crit("CPU%u: shutdown failed\n", cpu);
- return;
- }
pr_notice("CPU%u: shutdown\n", cpu);
}
-void arch_cpu_idle_dead(void)
+void __noreturn arch_cpu_idle_dead(void)
{
idle_task_exit();
- cpu_report_death();
+ cpuhp_ap_report_dead();
while (!secondary_stack)
arch_cpu_idle();
- local_irq_disable();
+ raw_local_irq_disable();
asm volatile(
"mov sp, %0\n"
@@ -322,5 +309,7 @@ void arch_cpu_idle_dead(void)
"jmpi csky_start_secondary"
:
: "r" (secondary_stack));
+
+ BUG();
}
#endif
diff --git a/arch/csky/kernel/stacktrace.c b/arch/csky/kernel/stacktrace.c
index 9f78f5d21511..27ecd63e321b 100644
--- a/arch/csky/kernel/stacktrace.c
+++ b/arch/csky/kernel/stacktrace.c
@@ -23,10 +23,9 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
sp = user_stack_pointer(regs);
pc = instruction_pointer(regs);
} else if (task == NULL || task == current) {
- const register unsigned long current_sp __asm__ ("sp");
const register unsigned long current_fp __asm__ ("r8");
fp = current_fp;
- sp = current_sp;
+ sp = current_stack_pointer;
pc = (unsigned long)walk_stackframe;
} else {
/* task blocked in __switch_to */
@@ -68,8 +67,7 @@ static void notrace walk_stackframe(struct task_struct *task,
sp = user_stack_pointer(regs);
pc = instruction_pointer(regs);
} else if (task == NULL || task == current) {
- const register unsigned long current_sp __asm__ ("sp");
- sp = current_sp;
+ sp = current_stack_pointer;
pc = (unsigned long)walk_stackframe;
} else {
/* task blocked in __switch_to */
diff --git a/arch/csky/kernel/syscall.c b/arch/csky/kernel/syscall.c
index 3d30e58a45d2..4540a271ee39 100644
--- a/arch/csky/kernel/syscall.c
+++ b/arch/csky/kernel/syscall.c
@@ -20,7 +20,7 @@ SYSCALL_DEFINE6(mmap2,
unsigned long, prot,
unsigned long, flags,
unsigned long, fd,
- off_t, offset)
+ unsigned long, offset)
{
if (unlikely(offset & (~PAGE_MASK >> 12)))
return -EINVAL;
diff --git a/arch/csky/kernel/syscall_table.c b/arch/csky/kernel/syscall_table.c
index a0c238c5377a..a6eb91a0e2f6 100644
--- a/arch/csky/kernel/syscall_table.c
+++ b/arch/csky/kernel/syscall_table.c
@@ -6,9 +6,11 @@
#undef __SYSCALL
#define __SYSCALL(nr, call)[nr] = (call),
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
#define sys_fadvise64_64 sys_csky_fadvise64_64
+#define sys_sync_file_range sys_sync_file_range2
void * const sys_call_table[__NR_syscalls] __page_aligned_data = {
[0 ... __NR_syscalls - 1] = sys_ni_syscall,
-#include <asm/unistd.h>
+#include <asm/syscall_table_32.h>
};
diff --git a/arch/csky/kernel/traps.c b/arch/csky/kernel/traps.c
index 6e426fba0119..c2246b07cc9c 100644
--- a/arch/csky/kernel/traps.c
+++ b/arch/csky/kernel/traps.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+#include <linux/cpu.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/kernel.h>
diff --git a/arch/csky/kernel/vdso.c b/arch/csky/kernel/vdso.c
index 16c20d64d165..c54d019d66bc 100644
--- a/arch/csky/kernel/vdso.c
+++ b/arch/csky/kernel/vdso.c
@@ -8,33 +8,19 @@
#include <linux/slab.h>
#include <asm/page.h>
-#ifdef GENERIC_TIME_VSYSCALL
-#include <vdso/datapage.h>
-#else
-#include <asm/vdso.h>
-#endif
extern char vdso_start[], vdso_end[];
static unsigned int vdso_pages;
static struct page **vdso_pagelist;
-/*
- * The vDSO data page.
- */
-static union {
- struct vdso_data data;
- u8 page[PAGE_SIZE];
-} vdso_data_store __page_aligned_data;
-struct vdso_data *vdso_data = &vdso_data_store.data;
-
static int __init vdso_init(void)
{
unsigned int i;
vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
vdso_pagelist =
- kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
+ kcalloc(vdso_pages, sizeof(struct page *), GFP_KERNEL);
if (unlikely(vdso_pagelist == NULL)) {
pr_err("vdso: pagelist allocation failed\n");
return -ENOMEM;
@@ -46,7 +32,6 @@ static int __init vdso_init(void)
pg = virt_to_page(vdso_start + (i << PAGE_SHIFT));
vdso_pagelist[i] = pg;
}
- vdso_pagelist[i] = virt_to_page(vdso_data);
return 0;
}
@@ -55,11 +40,15 @@ arch_initcall(vdso_init);
int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp)
{
+ struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long vdso_base, vdso_len;
int ret;
+ static struct vm_special_mapping vdso_mapping = {
+ .name = "[vdso]",
+ };
- vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
+ vdso_len = vdso_pages << PAGE_SHIFT;
mmap_write_lock(mm);
vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
@@ -75,33 +64,21 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
*/
mm->context.vdso = (void *)vdso_base;
- ret =
- install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
+ vdso_mapping.pages = vdso_pagelist;
+ vma =
+ _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
- vdso_pagelist);
+ &vdso_mapping);
- if (unlikely(ret)) {
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
mm->context.vdso = NULL;
goto end;
}
vdso_base += (vdso_pages << PAGE_SHIFT);
- ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
- (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
-
- if (unlikely(ret))
- mm->context.vdso = NULL;
+ ret = 0;
end:
mmap_write_unlock(mm);
return ret;
}
-
-const char *arch_vma_name(struct vm_area_struct *vma)
-{
- if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
- return "[vdso]";
- if (vma->vm_mm && (vma->vm_start ==
- (long)vma->vm_mm->context.vdso + PAGE_SIZE))
- return "[vdso_data]";
- return NULL;
-}
diff --git a/arch/csky/kernel/vdso/Makefile b/arch/csky/kernel/vdso/Makefile
index 0b6909f10667..a3042287a070 100644
--- a/arch/csky/kernel/vdso/Makefile
+++ b/arch/csky/kernel/vdso/Makefile
@@ -1,13 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
-# Absolute relocation type $(ARCH_REL_TYPE_ABS) needs to be defined before
-# the inclusion of generic Makefile.
-ARCH_REL_TYPE_ABS := R_CKCORE_ADDR32|R_CKCORE_JUMP_SLOT
-include $(srctree)/lib/vdso/Makefile
+# Include the generic Makefile to check the built vdso.
+include $(srctree)/lib/vdso/Makefile.include
# Symbols present in the vdso
vdso-syms += rt_sigreturn
-vdso-syms += vgettimeofday
# Files to link into the vdso
obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o
@@ -25,15 +22,11 @@ obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
obj-y += vdso.o vdso-syms.o
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
-# Disable gcov profiling for VDSO code
-GCOV_PROFILE := n
-KCOV_INSTRUMENT := n
-
# Force dependency
$(obj)/vdso.o: $(obj)/vdso.so
SYSCFLAGS_vdso.so.dbg = $(c_flags)
-$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
+$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
$(call if_changed,vdsold)
SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
-Wl,--build-id=sha1 -Wl,--hash-style=both
@@ -59,14 +52,4 @@ quiet_cmd_vdsold = VDSOLD $@
# Extracts symbol offsets from the VDSO, converting them into an assembly file
# that contains the same symbols at the same offsets.
quiet_cmd_so2s = SO2S $@
- cmd_so2s = $(NM) -D $< | $(srctree)/$(src)/so2s.sh > $@
-
-# install commands for the unstripped file
-quiet_cmd_vdso_install = INSTALL $@
- cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
-
-vdso.so: $(obj)/vdso.so.dbg
- @mkdir -p $(MODLIB)/vdso
- $(call cmd,vdso_install)
-
-vdso_install: vdso.so
+ cmd_so2s = $(NM) -D $< | $(src)/so2s.sh > $@
diff --git a/arch/csky/kernel/vdso/vdso.lds.S b/arch/csky/kernel/vdso/vdso.lds.S
index 590a6c79fff7..8d226252d439 100644
--- a/arch/csky/kernel/vdso/vdso.lds.S
+++ b/arch/csky/kernel/vdso/vdso.lds.S
@@ -49,10 +49,6 @@ VERSION
LINUX_5.10 {
global:
__vdso_rt_sigreturn;
- __vdso_clock_gettime;
- __vdso_clock_gettime64;
- __vdso_gettimeofday;
- __vdso_clock_getres;
local: *;
};
}
diff --git a/arch/csky/kernel/vdso/vgettimeofday.c b/arch/csky/kernel/vdso/vgettimeofday.c
deleted file mode 100644
index da491832c098..000000000000
--- a/arch/csky/kernel/vdso/vgettimeofday.c
+++ /dev/null
@@ -1,28 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-#include <linux/time.h>
-#include <linux/types.h>
-
-int __vdso_clock_gettime(clockid_t clock,
- struct old_timespec32 *ts)
-{
- return __cvdso_clock_gettime32(clock, ts);
-}
-
-int __vdso_clock_gettime64(clockid_t clock,
- struct __kernel_timespec *ts)
-{
- return __cvdso_clock_gettime(clock, ts);
-}
-
-int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
- struct timezone *tz)
-{
- return __cvdso_gettimeofday(tv, tz);
-}
-
-int __vdso_clock_getres(clockid_t clock_id,
- struct old_timespec32 *res)
-{
- return __cvdso_clock_getres_time32(clock_id, res);
-}
diff --git a/arch/csky/kernel/vmlinux.lds.S b/arch/csky/kernel/vmlinux.lds.S
index e8b1a4a49798..d718961786d2 100644
--- a/arch/csky/kernel/vmlinux.lds.S
+++ b/arch/csky/kernel/vmlinux.lds.S
@@ -22,23 +22,18 @@ SECTIONS
{
. = PAGE_OFFSET + PHYS_OFFSET_OFFSET;
- _stext = .;
- __init_begin = .;
+ _start = .;
HEAD_TEXT_SECTION
- INIT_TEXT_SECTION(PAGE_SIZE)
- INIT_DATA_SECTION(PAGE_SIZE)
- PERCPU_SECTION(L1_CACHE_BYTES)
. = ALIGN(PAGE_SIZE);
- __init_end = .;
.text : AT(ADDR(.text) - LOAD_OFFSET) {
_text = .;
+ _stext = .;
VBR_BASE
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
TEXT_TEXT
SCHED_TEXT
- CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.fixup)
@@ -48,7 +43,12 @@ SECTIONS
/* __init_begin __init_end must be page aligned for free_initmem */
. = ALIGN(PAGE_SIZE);
-
+ __init_begin = .;
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_DATA_SECTION(PAGE_SIZE)
+ PERCPU_SECTION(L1_CACHE_BYTES)
+ . = ALIGN(PAGE_SIZE);
+ __init_end = .;
_sdata = .;
RO_DATA(PAGE_SIZE)
diff --git a/arch/csky/lib/delay.c b/arch/csky/lib/delay.c
index 22570b0790d6..f5db317313bb 100644
--- a/arch/csky/lib/delay.c
+++ b/arch/csky/lib/delay.c
@@ -5,7 +5,7 @@
#include <linux/init.h>
#include <linux/delay.h>
-void __delay(unsigned long loops)
+void __aligned(8) __delay(unsigned long loops)
{
asm volatile (
"mov r0, r0\n"
diff --git a/arch/csky/mm/asid.c b/arch/csky/mm/asid.c
index b2e914745c1d..7fb6c417bbac 100644
--- a/arch/csky/mm/asid.c
+++ b/arch/csky/mm/asid.c
@@ -27,7 +27,7 @@ static void flush_context(struct asid_info *info)
u64 asid;
/* Update the list of reserved ASIDs and the ASID bitmap. */
- bitmap_clear(info->map, 0, NUM_CTXT_ASIDS(info));
+ bitmap_zero(info->map, NUM_CTXT_ASIDS(info));
for_each_possible_cpu(i) {
asid = atomic64_xchg_relaxed(&active_asid(info, i), 0);
@@ -178,8 +178,7 @@ int asid_allocator_init(struct asid_info *info,
*/
WARN_ON(NUM_CTXT_ASIDS(info) - 1 <= num_possible_cpus());
atomic64_set(&info->generation, ASID_FIRST_VERSION(info));
- info->map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)),
- sizeof(*info->map), GFP_KERNEL);
+ info->map = bitmap_zalloc(NUM_CTXT_ASIDS(info), GFP_KERNEL);
if (!info->map)
return -ENOMEM;
diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c
index 7215a46b6b8e..a885518ce1dd 100644
--- a/arch/csky/mm/fault.c
+++ b/arch/csky/mm/fault.c
@@ -97,13 +97,12 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f
BUG();
}
-static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
+static inline void bad_area_nosemaphore(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
{
/*
* Something tried to access memory that isn't in our memory map.
* Fix it, but check if it's kernel or user first.
*/
- mmap_read_unlock(mm);
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
do_trap(regs, SIGSEGV, code, addr);
@@ -238,20 +237,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
if (is_write(regs))
flags |= FAULT_FLAG_WRITE;
retry:
- mmap_read_lock(mm);
- vma = find_vma(mm, addr);
+ vma = lock_mm_and_find_vma(mm, addr, regs);
if (unlikely(!vma)) {
- bad_area(regs, mm, code, addr);
- return;
- }
- if (likely(vma->vm_start <= addr))
- goto good_area;
- if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
- bad_area(regs, mm, code, addr);
- return;
- }
- if (unlikely(expand_stack(vma, addr))) {
- bad_area(regs, mm, code, addr);
+ bad_area_nosemaphore(regs, mm, code, addr);
return;
}
@@ -259,11 +247,11 @@ retry:
* Ok, we have a good vm_area for this memory access, so
* we can handle it.
*/
-good_area:
code = SEGV_ACCERR;
if (unlikely(access_error(regs, vma))) {
- bad_area(regs, mm, code, addr);
+ mmap_read_unlock(mm);
+ bad_area_nosemaphore(regs, mm, code, addr);
return;
}
@@ -285,6 +273,10 @@ good_area:
return;
}
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) {
flags |= FAULT_FLAG_TRIED;
diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c
index bf2004aa811a..573da66b2543 100644
--- a/arch/csky/mm/init.c
+++ b/arch/csky/mm/init.c
@@ -42,73 +42,6 @@ unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
__page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
-#ifdef CONFIG_BLK_DEV_INITRD
-static void __init setup_initrd(void)
-{
- unsigned long size;
-
- if (initrd_start >= initrd_end) {
- pr_err("initrd not found or empty");
- goto disable;
- }
-
- if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
- pr_err("initrd extends beyond end of memory");
- goto disable;
- }
-
- size = initrd_end - initrd_start;
-
- if (memblock_is_region_reserved(__pa(initrd_start), size)) {
- pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region",
- __pa(initrd_start), size);
- goto disable;
- }
-
- memblock_reserve(__pa(initrd_start), size);
-
- pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
- (void *)(initrd_start), size);
-
- initrd_below_start_ok = 1;
-
- return;
-
-disable:
- initrd_start = initrd_end = 0;
-
- pr_err(" - disabling initrd\n");
-}
-#endif
-
-void __init mem_init(void)
-{
-#ifdef CONFIG_HIGHMEM
- unsigned long tmp;
-
- set_max_mapnr(highend_pfn - ARCH_PFN_OFFSET);
-#else
- set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
-#endif
- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
-
-#ifdef CONFIG_BLK_DEV_INITRD
- setup_initrd();
-#endif
-
- memblock_free_all();
-
-#ifdef CONFIG_HIGHMEM
- for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
- struct page *page = pfn_to_page(tmp);
-
- /* FIXME not sure about */
- if (!memblock_is_reserved(tmp << PAGE_SHIFT))
- free_highmem_page(page);
- }
-#endif
-}
-
void free_initmem(void)
{
free_initmem_default(-1);
@@ -197,3 +130,23 @@ void __init fixaddr_init(void)
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
fixrange_init(vaddr, vaddr + PMD_SIZE, swapper_pg_dir);
}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READ,
+ [VM_WRITE] = PAGE_READ,
+ [VM_WRITE | VM_READ] = PAGE_READ,
+ [VM_EXEC] = PAGE_READ,
+ [VM_EXEC | VM_READ] = PAGE_READ,
+ [VM_EXEC | VM_WRITE] = PAGE_READ,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_READ,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READ,
+ [VM_SHARED | VM_WRITE] = PAGE_WRITE,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_WRITE,
+ [VM_SHARED | VM_EXEC] = PAGE_READ,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READ,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_WRITE,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_WRITE
+};
+DECLARE_VM_GET_PAGE_PROT