aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/mach-common/interrupt.S
blob: 2df37db3b49b60dbe9df7e768b0fb31cf5946c69 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
/*
 * Interrupt Entries
 *
 * Copyright 2005-2009 Analog Devices Inc.
 *               D. Jeff Dionne <jeff@ryeham.ee.ryerson.ca>
 *               Kenneth Albanowski <kjahds@kjahds.com>
 *
 * Licensed under the GPL-2 or later.
 */

#include <asm/blackfin.h>
#include <mach/irq.h>
#include <linux/linkage.h>
#include <asm/entry.h>
#include <asm/asm-offsets.h>
#include <asm/trace.h>
#include <asm/traps.h>
#include <asm/thread_info.h>

#include <asm/context.S>

.extern _ret_from_exception

#ifdef CONFIG_I_ENTRY_L1
.section .l1.text
#else
.text
#endif

.align 4 	/* just in case */

/* Common interrupt entry code.	 First we do CLI, then push
 * RETI, to keep interrupts disabled, but to allow this state to be changed
 * by local_bh_enable.
 * R0 contains the interrupt number, while R1 may contain the value of IPEND,
 * or garbage if IPEND won't be needed by the ISR.  */
__common_int_entry:
	[--sp] = fp;
	[--sp] = usp;

	[--sp] = i0;
	[--sp] = i1;
	[--sp] = i2;
	[--sp] = i3;

	[--sp] = m0;
	[--sp] = m1;
	[--sp] = m2;
	[--sp] = m3;

	[--sp] = l0;
	[--sp] = l1;
	[--sp] = l2;
	[--sp] = l3;

	[--sp] = b0;
	[--sp] = b1;
	[--sp] = b2;
	[--sp] = b3;
	[--sp] = a0.x;
	[--sp] = a0.w;
	[--sp] = a1.x;
	[--sp] = a1.w;

	[--sp] = LC0;
	[--sp] = LC1;
	[--sp] = LT0;
	[--sp] = LT1;
	[--sp] = LB0;
	[--sp] = LB1;

	[--sp] = ASTAT;

	[--sp] = r0;	/* Skip reserved */
	[--sp] = RETS;
	r2 = RETI;
	[--sp] = r2;
	[--sp] = RETX;
	[--sp] = RETN;
	[--sp] = RETE;
	[--sp] = SEQSTAT;
	[--sp] = r1;	/* IPEND - R1 may or may not be set up before jumping here. */

	/* Switch to other method of keeping interrupts disabled.  */
#ifdef CONFIG_DEBUG_HWERR
	r1 = 0x3f;
	sti r1;
#else
	cli r1;
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
	[--sp] = r0;
	sp += -12;
	call _trace_hardirqs_off;
	sp += 12;
	r0 = [sp++];
#endif
	[--sp] = RETI;  /* orig_pc */
	/* Clear all L registers.  */
	r1 = 0 (x);
	l0 = r1;
	l1 = r1;
	l2 = r1;
	l3 = r1;
#ifdef CONFIG_FRAME_POINTER
	fp = 0;
#endif

	ANOMALY_283_315_WORKAROUND(p5, r7)

	r1 =  sp;
	SP += -12;
#ifdef CONFIG_IPIPE
	call ___ipipe_grab_irq
	SP += 12;
	cc = r0 == 0;
	if cc jump .Lcommon_restore_context;
#else /* CONFIG_IPIPE */

#ifdef CONFIG_PREEMPT
	r7 = sp;
	r4.l = lo(ALIGN_PAGE_MASK);
	r4.h = hi(ALIGN_PAGE_MASK);
	r7 = r7 & r4;
	p5 = r7;
	r7 = [p5 + TI_PREEMPT]; /* get preempt count */
	r7 += 1;                /* increment it */
	[p5 + TI_PREEMPT] = r7;
#endif
	pseudo_long_call _do_irq, p2;

#ifdef CONFIG_PREEMPT
	r7 += -1;
	[p5 + TI_PREEMPT] = r7; /* restore preempt count */
#endif

	SP += 12;
#endif /* CONFIG_IPIPE */
	pseudo_long_call _return_from_int, p2;
.Lcommon_restore_context:
	RESTORE_CONTEXT
	rti;

/* interrupt routine for ivhw - 5 */
ENTRY(_evt_ivhw)
	/* In case a single action kicks off multiple memory transactions, (like
	 * a cache line fetch, - this can cause multiple hardware errors, let's
	 * catch them all. First - make sure all the actions are complete, and
	 * the core sees the hardware errors.
	 */
	SSYNC;
	SSYNC;

	SAVE_ALL_SYS
#ifdef CONFIG_FRAME_POINTER
	fp = 0;
#endif

	ANOMALY_283_315_WORKAROUND(p5, r7)

	/* Handle all stacked hardware errors
	 * To make sure we don't hang forever, only do it 10 times
	 */
	R0 = 0;
	R2 = 10;
1:
	P0.L = LO(ILAT);
	P0.H = HI(ILAT);
	R1 = [P0];
	CC = BITTST(R1, EVT_IVHW_P);
	IF ! CC JUMP 2f;
	/* OK a hardware error is pending - clear it */
	R1 = EVT_IVHW_P;
	[P0] = R1;
	R0 += 1;
	CC = R1 == R2;
	if CC JUMP 2f;
	JUMP 1b;
2:
	# We are going to dump something out, so make sure we print IPEND properly
	p2.l = lo(IPEND);
	p2.h = hi(IPEND);
	r0 = [p2];
	[sp + PT_IPEND] = r0;

	/* set the EXCAUSE to HWERR for trap_c */
	r0 = [sp + PT_SEQSTAT];
	R1.L = LO(VEC_HWERR);
	R1.H = HI(VEC_HWERR);
	R0 = R0 | R1;
	[sp + PT_SEQSTAT] = R0;

	r0 = sp;        /* stack frame pt_regs pointer argument ==> r0 */
	SP += -12;
	pseudo_long_call _trap_c, p5;
	SP += 12;

#ifdef EBIU_ERRMST
	/* make sure EBIU_ERRMST is clear */
	p0.l = LO(EBIU_ERRMST);
	p0.h = HI(EBIU_ERRMST);
	r0.l = (CORE_ERROR | CORE_MERROR);
	w[p0] = r0.l;
#endif

	pseudo_long_call _ret_from_exception, p2;

.Lcommon_restore_all_sys:
	RESTORE_ALL_SYS
	rti;
ENDPROC(_evt_ivhw)

/* Interrupt routine for evt2 (NMI).
 * For inner circle type details, please see:
 * http://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:nmi
 */
ENTRY(_evt_nmi)
#ifndef CONFIG_NMI_WATCHDOG
.weak _evt_nmi
#else
	/* Not take account of CPLBs, this handler will not return */
	SAVE_ALL_SYS
	r0 = sp;
	r1 = retn;
	[sp + PT_PC] = r1;
	trace_buffer_save(p4,r5);

	ANOMALY_283_315_WORKAROUND(p4, r5)

	SP += -12;
	call _do_nmi;
	SP += 12;
1:
	jump 1b;
#endif
	rtn;
ENDPROC(_evt_nmi)

/* interrupt routine for core timer - 6 */
ENTRY(_evt_timer)
	TIMER_INTERRUPT_ENTRY(EVT_IVTMR_P)

/* interrupt routine for evt7 - 7 */
ENTRY(_evt_evt7)
	INTERRUPT_ENTRY(EVT_IVG7_P)
ENTRY(_evt_evt8)
	INTERRUPT_ENTRY(EVT_IVG8_P)
ENTRY(_evt_evt9)
	INTERRUPT_ENTRY(EVT_IVG9_P)
ENTRY(_evt_evt10)
	INTERRUPT_ENTRY(EVT_IVG10_P)
ENTRY(_evt_evt11)
	INTERRUPT_ENTRY(EVT_IVG11_P)
ENTRY(_evt_evt12)
	INTERRUPT_ENTRY(EVT_IVG12_P)
ENTRY(_evt_evt13)
	INTERRUPT_ENTRY(EVT_IVG13_P)


 /* interrupt routine for system_call - 15 */
ENTRY(_evt_system_call)
	SAVE_CONTEXT_SYSCALL
#ifdef CONFIG_FRAME_POINTER
	fp = 0;
#endif
	pseudo_long_call _system_call, p2;
	jump .Lcommon_restore_context;
ENDPROC(_evt_system_call)

#ifdef CONFIG_IPIPE
/*
 * __ipipe_call_irqtail: lowers the current priority level to EVT15
 * before running a user-defined routine, then raises the priority
 * level to EVT14 to prepare the caller for a normal interrupt
 * return through RTI.
 *
 * We currently use this facility in two occasions:
 *
 * - to branch to __ipipe_irq_tail_hook as requested by a high
 *   priority domain after the pipeline delivered an interrupt,
 *   e.g. such as Xenomai, in order to start its rescheduling
 *   procedure, since we may not switch tasks when IRQ levels are
 *   nested on the Blackfin, so we have to fake an interrupt return
 *   so that we may reschedule immediately.
 *
 * - to branch to sync_root_irqs, in order to play any interrupt
 *   pending for the root domain (i.e. the Linux kernel). This lowers
 *   the core priority level enough so that Linux IRQ handlers may
 *   never delay interrupts handled by high priority domains; we defer
 *   those handlers until this point instead. This is a substitute
 *   to using a threaded interrupt model for the Linux kernel.
 *
 * r0: address of user-defined routine
 * context: caller must have preempted EVT15, hw interrupts must be off.
 */
ENTRY(___ipipe_call_irqtail)
	p0 = r0;
	r0.l = 1f;
	r0.h = 1f;
	reti = r0;
	rti;
1:
	[--sp] = rets;
	[--sp] = ( r7:4, p5:3 );
	sp += -12;
	call (p0);
	sp += 12;
	( r7:4, p5:3 ) = [sp++];
	rets = [sp++];

#ifdef CONFIG_DEBUG_HWERR
	/* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */
	r0 = (EVT_IVG14 | EVT_IVHW | \
		EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
#else
	/* Only enable irq14 interrupt, until we transition to _evt_evt14 */
	r0 = (EVT_IVG14 | \
		EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
#endif
	sti r0;
	raise 14;		/* Branches to _evt_evt14 */
2:
	jump 2b;                /* Likely paranoid. */
ENDPROC(___ipipe_call_irqtail)

#endif /* CONFIG_IPIPE */