aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sh/cpu-sh3/cacheflush.h
blob: f51aed00c68f762132c14e9173f4579c6fac6171 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
/*
 * include/asm-sh/cpu-sh3/cacheflush.h
 *
 * Copyright (C) 1999 Niibe Yutaka
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 */
#ifndef __ASM_CPU_SH3_CACHEFLUSH_H
#define __ASM_CPU_SH3_CACHEFLUSH_H

/* 
 * Cache flushing:
 *
 *  - flush_cache_all() flushes entire cache
 *  - flush_cache_mm(mm) flushes the specified mm context's cache lines
 *  - flush_cache_page(mm, vmaddr, pfn) flushes a single page
 *  - flush_cache_range(vma, start, end) flushes a range of pages
 *
 *  - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
 *  - flush_icache_range(start, end) flushes(invalidates) a range for icache
 *  - flush_icache_page(vma, pg) flushes(invalidates) a page for icache
 *
 *  Caches are indexed (effectively) by physical address on SH-3, so
 *  we don't need them.
 */

#if defined(CONFIG_SH7705_CACHE_32KB)

/* SH7705 is an SH3 processor with 32KB cache. This has alias issues like the
 * SH4. Unlike the SH4 this is a unified cache so we need to do some work
 * in mmap when 'exec'ing a new binary
 */
 /* 32KB cache, 4kb PAGE sizes need to check bit 12 */
#define CACHE_ALIAS 0x00001000

struct page;
struct mm_struct;
struct vm_area_struct;

extern void flush_cache_all(void);
extern void flush_cache_mm(struct mm_struct *mm);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
                              unsigned long end);
extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
extern void flush_dcache_page(struct page *pg);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);

#define flush_dcache_mmap_lock(mapping)		do { } while (0)
#define flush_dcache_mmap_unlock(mapping)	do { } while (0)

/* SH3 has unified cache so no special action needed here */
#define flush_cache_sigtramp(vaddr)		do { } while (0)
#define flush_page_to_ram(page)			do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len)	do { } while (0)

#define p3_cache_init()				do { } while (0)

#define PG_mapped	PG_arch_1

/* We provide our own get_unmapped_area to avoid cache alias issue */
#define HAVE_ARCH_UNMAPPED_AREA

#else

#define flush_cache_all()			do { } while (0)
#define flush_cache_mm(mm)			do { } while (0)
#define flush_cache_range(vma, start, end)	do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
#define flush_dcache_page(page)			do { } while (0)
#define flush_dcache_mmap_lock(mapping)		do { } while (0)
#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
#define flush_icache_range(start, end)		do { } while (0)
#define flush_icache_page(vma,pg)		do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len)	do { } while (0)
#define flush_cache_sigtramp(vaddr)		do { } while (0)

#define p3_cache_init()				do { } while (0)

#define HAVE_ARCH_UNMAPPED_AREA

#endif

#endif /* __ASM_CPU_SH3_CACHEFLUSH_H */