aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/include/asm/octeon/cvmx-asm.h
blob: 31eacc24b775ebebd15e7d02e8adbaebaaf69706 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
/***********************license start***************
 * Author: Cavium Networks
 *
 * Contact: support@caviumnetworks.com
 * This file is part of the OCTEON SDK
 *
 * Copyright (c) 2003-2008 Cavium Networks
 *
 * This file is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, Version 2, as
 * published by the Free Software Foundation.
 *
 * This file is distributed in the hope that it will be useful, but
 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
 * NONINFRINGEMENT.  See the GNU General Public License for more
 * details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this file; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
 * or visit http://www.gnu.org/licenses/.
 *
 * This file may also be available under a different license from Cavium.
 * Contact Cavium Networks for more information
 ***********************license end**************************************/

/*
 *
 * This is file defines ASM primitives for the executive.
 */
#ifndef __CVMX_ASM_H__
#define __CVMX_ASM_H__

#include <asm/octeon/octeon-model.h>

/* other useful stuff */
#define CVMX_SYNC asm volatile ("sync" : : : "memory")
/* String version of SYNCW macro for using in inline asm constructs */
#define CVMX_SYNCW_STR "syncw\nsyncw\n"
#ifdef __OCTEON__

/* Deprecated, will be removed in future release */
#define CVMX_SYNCIO asm volatile ("nop")

#define CVMX_SYNCIOBDMA asm volatile ("synciobdma" : : : "memory")

/* Deprecated, will be removed in future release */
#define CVMX_SYNCIOALL asm volatile ("nop")

/*
 * We actually use two syncw instructions in a row when we need a write
 * memory barrier. This is because the CN3XXX series of Octeons have
 * errata Core-401. This can cause a single syncw to not enforce
 * ordering under very rare conditions. Even if it is rare, better safe
 * than sorry.
 */
#define CVMX_SYNCW asm volatile ("syncw\n\tsyncw" : : : "memory")

/*
 * Define new sync instructions to be normal SYNC instructions for
 * operating systems that use threads.
 */
#define CVMX_SYNCWS CVMX_SYNCW
#define CVMX_SYNCS  CVMX_SYNC
#define CVMX_SYNCWS_STR CVMX_SYNCW_STR
#else
/*
 * Not using a Cavium compiler, always use the slower sync so the
 * assembler stays happy.
 */
/* Deprecated, will be removed in future release */
#define CVMX_SYNCIO asm volatile ("nop")

#define CVMX_SYNCIOBDMA asm volatile ("sync" : : : "memory")

/* Deprecated, will be removed in future release */
#define CVMX_SYNCIOALL asm volatile ("nop")

#define CVMX_SYNCW asm volatile ("sync" : : : "memory")
#define CVMX_SYNCWS CVMX_SYNCW
#define CVMX_SYNCS  CVMX_SYNC
#define CVMX_SYNCWS_STR CVMX_SYNCW_STR
#endif

/*
 * CVMX_PREPARE_FOR_STORE makes each byte of the block unpredictable
 * (actually old value or zero) until that byte is stored to (by this or
 * another processor. Note that the value of each byte is not only
 * unpredictable, but may also change again - up until the point when one
 * of the cores stores to the byte.
 */
#define CVMX_PREPARE_FOR_STORE(address, offset) \
	asm volatile ("pref 30, " CVMX_TMP_STR(offset) "(%[rbase])" : : \
	[rbase] "d" (address))
/*
 * This is a command headed to the L2 controller to tell it to clear
 * its dirty bit for a block. Basically, SW is telling HW that the
 * current version of the block will not be used.
 */
#define CVMX_DONT_WRITE_BACK(address, offset) \
	asm volatile ("pref 29, " CVMX_TMP_STR(offset) "(%[rbase])" : : \
	[rbase] "d" (address))

/* flush stores, invalidate entire icache */
#define CVMX_ICACHE_INVALIDATE \
	{ CVMX_SYNC; asm volatile ("synci 0($0)" : : ); }

/* flush stores, invalidate entire icache */
#define CVMX_ICACHE_INVALIDATE2 \
	{ CVMX_SYNC; asm volatile ("cache 0, 0($0)" : : ); }

/* complete prefetches, invalidate entire dcache */
#define CVMX_DCACHE_INVALIDATE \
	{ CVMX_SYNC; asm volatile ("cache 9, 0($0)" : : ); }

#define CVMX_CACHE(op, address, offset)					\
	asm volatile ("cache " CVMX_TMP_STR(op) ", " CVMX_TMP_STR(offset) "(%[rbase])" \
		: : [rbase] "d" (address) )
/* fetch and lock the state. */
#define CVMX_CACHE_LCKL2(address, offset) CVMX_CACHE(31, address, offset)
/* unlock the state. */
#define CVMX_CACHE_WBIL2(address, offset) CVMX_CACHE(23, address, offset)
/* invalidate the cache block and clear the USED bits for the block */
#define CVMX_CACHE_WBIL2I(address, offset) CVMX_CACHE(3, address, offset)
/* load virtual tag and data for the L2 cache block into L2C_TAD0_TAG register */
#define CVMX_CACHE_LTGL2I(address, offset) CVMX_CACHE(7, address, offset)

#define CVMX_POP(result, input) \
	asm ("pop %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
#define CVMX_DPOP(result, input) \
	asm ("dpop %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))

/* some new cop0-like stuff */
#define CVMX_RDHWR(result, regstr) \
	asm volatile ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
#define CVMX_RDHWRNV(result, regstr) \
	asm ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
#endif /* __CVMX_ASM_H__ */