aboutsummaryrefslogtreecommitdiff
path: root/arch/sparc64/kernel/sun4v_ivec.S
blob: 574bc248bca6f9edf5f5116e399ae1fe1024f65a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
/* sun4v_ivec.S: Sun4v interrupt vector handling.
 *
 * Copyright (C) 2006 <davem@davemloft.net>
 */

#include <asm/cpudata.h>
#include <asm/intr_queue.h>
#include <asm/pil.h>

	.text
	.align	32

sun4v_cpu_mondo:
	/* Head offset in %g2, tail offset in %g4.
	 * If they are the same, no work.
	 */
	mov	INTRQ_CPU_MONDO_HEAD, %g2
	ldxa	[%g2] ASI_QUEUE, %g2
	mov	INTRQ_CPU_MONDO_TAIL, %g4
	ldxa	[%g4] ASI_QUEUE, %g4
	cmp	%g2, %g4
	be,pn	%xcc, sun4v_cpu_mondo_queue_empty
	 nop

	/* Get &trap_block[smp_processor_id()] into %g4.  */
	ldxa	[%g0] ASI_SCRATCHPAD, %g4
	sub	%g4, TRAP_PER_CPU_FAULT_INFO, %g4

	/* Get CPU mondo queue base phys address into %g7.  */
	ldx	[%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7

	/* Now get the cross-call arguments and handler PC, same
	 * layout as sun4u:
	 *
	 * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it
	 *                  high half is context arg to MMU flushes, into %g5
	 * 2nd 64-bit word: 64-bit arg, load into %g1
	 * 3rd 64-bit word: 64-bit arg, load into %g7
	 */
	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g3
	add	%g2, 0x8, %g2
	srlx	%g3, 32, %g5
	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g1
	add	%g2, 0x8, %g2
	srl	%g3, 0, %g3
	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g7
	add	%g2, 0x40 - 0x8 - 0x8, %g2

	/* Update queue head pointer.  */
	lduw	[%g4 + TRAP_PER_CPU_CPU_MONDO_QMASK], %g4
	and	%g2, %g4, %g2

	mov	INTRQ_CPU_MONDO_HEAD, %g4
	stxa	%g2, [%g4] ASI_QUEUE
	membar	#Sync

	jmpl	%g3, %g0
	 nop

sun4v_cpu_mondo_queue_empty:
	retry

sun4v_dev_mondo:
	/* Head offset in %g2, tail offset in %g4.  */
	mov	INTRQ_DEVICE_MONDO_HEAD, %g2
	ldxa	[%g2] ASI_QUEUE, %g2
	mov	INTRQ_DEVICE_MONDO_TAIL, %g4
	ldxa	[%g4] ASI_QUEUE, %g4
	cmp	%g2, %g4
	be,pn	%xcc, sun4v_dev_mondo_queue_empty
	 nop

	/* Get &trap_block[smp_processor_id()] into %g4.  */
	ldxa	[%g0] ASI_SCRATCHPAD, %g4
	sub	%g4, TRAP_PER_CPU_FAULT_INFO, %g4

	/* Get DEV mondo queue base phys address into %g5.  */
	ldx	[%g4 + TRAP_PER_CPU_DEV_MONDO_PA], %g5

	/* Load IVEC into %g3.  */
	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
	add	%g2, 0x40, %g2

	/* XXX There can be a full 64-byte block of data here.
	 * XXX This is how we can get at MSI vector data.
	 * XXX Current we do not capture this, but when we do we'll
	 * XXX need to add a 64-byte storage area in the struct ino_bucket
	 * XXX or the struct irq_desc.
	 */

	/* Update queue head pointer, this frees up some registers.  */
	lduw	[%g4 + TRAP_PER_CPU_DEV_MONDO_QMASK], %g4
	and	%g2, %g4, %g2

	mov	INTRQ_DEVICE_MONDO_HEAD, %g4
	stxa	%g2, [%g4] ASI_QUEUE
	membar	#Sync

	/* Get &__irq_work[smp_processor_id()] into %g1.  */
	TRAP_LOAD_IRQ_WORK(%g1, %g4)

	/* Get &ivector_table[IVEC] into %g4.  */
	sethi	%hi(ivector_table), %g4
	sllx	%g3, 3, %g3
	or	%g4, %lo(ivector_table), %g4
	add	%g4, %g3, %g4

	/* Insert ivector_table[] entry into __irq_work[] queue.  */
	lduw	[%g1], %g2		/* g2 = irq_work(cpu) */
	stw	%g2, [%g4 + 0x00]	/* bucket->irq_chain = g2 */
	stw	%g4, [%g1]		/* irq_work(cpu) = bucket */

	/* Signal the interrupt by setting (1 << pil) in %softint.  */
	wr	%g0, 1 << PIL_DEVICE_IRQ, %set_softint

sun4v_dev_mondo_queue_empty:
	retry

sun4v_res_mondo:
	/* Head offset in %g2, tail offset in %g4.  */
	mov	INTRQ_RESUM_MONDO_HEAD, %g2
	ldxa	[%g2] ASI_QUEUE, %g2
	mov	INTRQ_RESUM_MONDO_TAIL, %g4
	ldxa	[%g4] ASI_QUEUE, %g4
	cmp	%g2, %g4
	be,pn	%xcc, sun4v_res_mondo_queue_empty
	 nop

	/* Get &trap_block[smp_processor_id()] into %g3.  */
	ldxa	[%g0] ASI_SCRATCHPAD, %g3
	sub	%g3, TRAP_PER_CPU_FAULT_INFO, %g3

	/* Get RES mondo queue base phys address into %g5.  */
	ldx	[%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5

	/* Get RES kernel buffer base phys address into %g7.  */
	ldx	[%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7

	/* If the first word is non-zero, queue is full.  */
	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g1
	brnz,pn	%g1, sun4v_res_mondo_queue_full
	 nop

	lduw	[%g3 + TRAP_PER_CPU_RESUM_QMASK], %g4

	/* Remember this entry's offset in %g1.  */
	mov	%g2, %g1

	/* Copy 64-byte queue entry into kernel buffer.  */
	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
	add	%g2, 0x08, %g2
	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
	add	%g2, 0x08, %g2
	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
	add	%g2, 0x08, %g2
	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
	add	%g2, 0x08, %g2
	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
	add	%g2, 0x08, %g2
	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
	add	%g2, 0x08, %g2
	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
	add	%g2, 0x08, %g2
	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
	add	%g2, 0x08, %g2

	/* Update queue head pointer.  */
	and	%g2, %g4, %g2

	mov	INTRQ_RESUM_MONDO_HEAD, %g4
	stxa	%g2, [%g4] ASI_QUEUE
	membar	#Sync

	/* Disable interrupts and save register state so we can call
	 * C code.  The etrap handling will leave %g4 in %l4 for us
	 * when it's done.
	 */
	rdpr	%pil, %g2
	wrpr	%g0, 15, %pil
	mov	%g1, %g4
	ba,pt	%xcc, etrap_irq
	 rd	%pc, %g7
#ifdef CONFIG_TRACE_IRQFLAGS
	call		trace_hardirqs_off
	 nop
#endif
	/* Log the event.  */
	add	%sp, PTREGS_OFF, %o0
	call	sun4v_resum_error
	 mov	%l4, %o1

	/* Return from trap.  */
	ba,pt	%xcc, rtrap_irq
	 nop

sun4v_res_mondo_queue_empty:
	retry

sun4v_res_mondo_queue_full:
	/* The queue is full, consolidate our damage by setting
	 * the head equal to the tail.  We'll just trap again otherwise.
	 * Call C code to log the event.
	 */
	mov	INTRQ_RESUM_MONDO_HEAD, %g2
	stxa	%g4, [%g2] ASI_QUEUE
	membar	#Sync

	rdpr	%pil, %g2
	wrpr	%g0, 15, %pil
	ba,pt	%xcc, etrap_irq
	 rd	%pc, %g7
#ifdef CONFIG_TRACE_IRQFLAGS
	call		trace_hardirqs_off
	 nop
#endif
	call	sun4v_resum_overflow
	 add	%sp, PTREGS_OFF, %o0

	ba,pt	%xcc, rtrap_irq
	 nop

sun4v_nonres_mondo:
	/* Head offset in %g2, tail offset in %g4.  */
	mov	INTRQ_NONRESUM_MONDO_HEAD, %g2
	ldxa	[%g2] ASI_QUEUE, %g2
	mov	INTRQ_NONRESUM_MONDO_TAIL, %g4
	ldxa	[%g4] ASI_QUEUE, %g4
	cmp	%g2, %g4
	be,pn	%xcc, sun4v_nonres_mondo_queue_empty
	 nop

	/* Get &trap_block[smp_processor_id()] into %g3.  */
	ldxa	[%g0] ASI_SCRATCHPAD, %g3
	sub	%g3, TRAP_PER_CPU_FAULT_INFO, %g3

	/* Get RES mondo queue base phys address into %g5.  */
	ldx	[%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5

	/* Get RES kernel buffer base phys address into %g7.  */
	ldx	[%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7

	/* If the first word is non-zero, queue is full.  */
	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g1
	brnz,pn	%g1, sun4v_nonres_mondo_queue_full
	 nop

	lduw	[%g3 + TRAP_PER_CPU_NONRESUM_QMASK], %g4

	/* Remember this entry's offset in %g1.  */
	mov	%g2, %g1

	/* Copy 64-byte queue entry into kernel buffer.  */
	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
	add	%g2, 0x08, %g2
	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
	add	%g2, 0x08, %g2
	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
	add	%g2, 0x08, %g2
	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
	add	%g2, 0x08, %g2
	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
	add	%g2, 0x08, %g2
	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
	add	%g2, 0x08, %g2
	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
	add	%g2, 0x08, %g2
	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
	add	%g2, 0x08, %g2

	/* Update queue head pointer.  */
	and	%g2, %g4, %g2

	mov	INTRQ_NONRESUM_MONDO_HEAD, %g4
	stxa	%g2, [%g4] ASI_QUEUE
	membar	#Sync

	/* Disable interrupts and save register state so we can call
	 * C code.  The etrap handling will leave %g4 in %l4 for us
	 * when it's done.
	 */
	rdpr	%pil, %g2
	wrpr	%g0, 15, %pil
	mov	%g1, %g4
	ba,pt	%xcc, etrap_irq
	 rd	%pc, %g7
#ifdef CONFIG_TRACE_IRQFLAGS
	call		trace_hardirqs_off
	 nop
#endif
	/* Log the event.  */
	add	%sp, PTREGS_OFF, %o0
	call	sun4v_nonresum_error
	 mov	%l4, %o1

	/* Return from trap.  */
	ba,pt	%xcc, rtrap_irq
	 nop

sun4v_nonres_mondo_queue_empty:
	retry

sun4v_nonres_mondo_queue_full:
	/* The queue is full, consolidate our damage by setting
	 * the head equal to the tail.  We'll just trap again otherwise.
	 * Call C code to log the event.
	 */
	mov	INTRQ_NONRESUM_MONDO_HEAD, %g2
	stxa	%g4, [%g2] ASI_QUEUE
	membar	#Sync

	rdpr	%pil, %g2
	wrpr	%g0, 15, %pil
	ba,pt	%xcc, etrap_irq
	 rd	%pc, %g7
#ifdef CONFIG_TRACE_IRQFLAGS
	call		trace_hardirqs_off
	 nop
#endif
	call	sun4v_nonresum_overflow
	 add	%sp, PTREGS_OFF, %o0

	ba,pt	%xcc, rtrap_irq
	 nop