1 /*
2 * arch/arm/vpl011.c
3 *
4 * Virtual PL011 UART
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #define XEN_WANT_FLEX_CONSOLE_RING 1
20
21 /* We assume the PL011 default of "1/2 way" for the FIFO trigger level. */
22 #define SBSA_UART_FIFO_LEVEL (SBSA_UART_FIFO_SIZE / 2)
23
24 #include <xen/errno.h>
25 #include <xen/event.h>
26 #include <xen/guest_access.h>
27 #include <xen/init.h>
28 #include <xen/lib.h>
29 #include <xen/mm.h>
30 #include <xen/sched.h>
31 #include <public/domctl.h>
32 #include <public/io/console.h>
33 #include <asm/pl011-uart.h>
34 #include <asm/vgic-emul.h>
35 #include <asm/vpl011.h>
36
37 /*
38 * Since pl011 registers are 32-bit registers, all registers
39 * are handled similarly allowing 8-bit, 16-bit and 32-bit
40 * accesses except 64-bit access.
41 */
vpl011_reg32_check_access(struct hsr_dabt dabt)42 static bool vpl011_reg32_check_access(struct hsr_dabt dabt)
43 {
44 return (dabt.size != DABT_DOUBLE_WORD);
45 }
46
vpl011_update_interrupt_status(struct domain * d)47 static void vpl011_update_interrupt_status(struct domain *d)
48 {
49 struct vpl011 *vpl011 = &d->arch.vpl011;
50 uint32_t uartmis = vpl011->uartris & vpl011->uartimsc;
51
52 /*
53 * This function is expected to be called with the lock taken.
54 */
55 ASSERT(spin_is_locked(&vpl011->lock));
56
57 /*
58 * TODO: PL011 interrupts are level triggered which means
59 * that interrupt needs to be set/clear instead of being
60 * injected. However, currently vGIC does not handle level
61 * triggered interrupts properly. This function needs to be
62 * revisited once vGIC starts handling level triggered
63 * interrupts.
64 */
65
66 /*
67 * Raise an interrupt only if any additional interrupt
68 * status bit has been set since the last time.
69 */
70 if ( uartmis & ~vpl011->shadow_uartmis )
71 vgic_vcpu_inject_spi(d, GUEST_VPL011_SPI);
72
73 vpl011->shadow_uartmis = uartmis;
74 }
75
vpl011_read_data(struct domain * d)76 static uint8_t vpl011_read_data(struct domain *d)
77 {
78 unsigned long flags;
79 uint8_t data = 0;
80 struct vpl011 *vpl011 = &d->arch.vpl011;
81 struct xencons_interface *intf = vpl011->ring_buf;
82 XENCONS_RING_IDX in_cons, in_prod;
83
84 VPL011_LOCK(d, flags);
85
86 in_cons = intf->in_cons;
87 in_prod = intf->in_prod;
88
89 smp_rmb();
90
91 /*
92 * It is expected that there will be data in the ring buffer when this
93 * function is called since the guest is expected to read the data register
94 * only if the TXFE flag is not set.
95 * If the guest still does read when TXFE bit is set then 0 will be returned.
96 */
97 if ( xencons_queued(in_prod, in_cons, sizeof(intf->in)) > 0 )
98 {
99 unsigned int fifo_level;
100
101 data = intf->in[xencons_mask(in_cons, sizeof(intf->in))];
102 in_cons += 1;
103 smp_mb();
104 intf->in_cons = in_cons;
105
106 fifo_level = xencons_queued(in_prod, in_cons, sizeof(intf->in));
107
108 /* If the FIFO is now empty, we clear the receive timeout interrupt. */
109 if ( fifo_level == 0 )
110 {
111 vpl011->uartfr |= RXFE;
112 vpl011->uartris &= ~RTI;
113 }
114
115 /* If the FIFO is more than half empty, we clear the RX interrupt. */
116 if ( fifo_level < sizeof(intf->in) - SBSA_UART_FIFO_LEVEL )
117 vpl011->uartris &= ~RXI;
118
119 vpl011_update_interrupt_status(d);
120 }
121 else
122 gprintk(XENLOG_ERR, "vpl011: Unexpected IN ring buffer empty\n");
123
124 /*
125 * We have consumed a character or the FIFO was empty, so clear the
126 * "FIFO full" bit.
127 */
128 vpl011->uartfr &= ~RXFF;
129
130 VPL011_UNLOCK(d, flags);
131
132 /*
133 * Send an event to console backend to indicate that data has been
134 * read from the IN ring buffer.
135 */
136 notify_via_xen_event_channel(d, vpl011->evtchn);
137
138 return data;
139 }
140
vpl011_update_tx_fifo_status(struct vpl011 * vpl011,unsigned int fifo_level)141 static void vpl011_update_tx_fifo_status(struct vpl011 *vpl011,
142 unsigned int fifo_level)
143 {
144 struct xencons_interface *intf = vpl011->ring_buf;
145 unsigned int fifo_threshold = sizeof(intf->out) - SBSA_UART_FIFO_LEVEL;
146
147 BUILD_BUG_ON(sizeof(intf->out) < SBSA_UART_FIFO_SIZE);
148
149 /*
150 * Set the TXI bit only when there is space for fifo_size/2 bytes which
151 * is the trigger level for asserting/de-assterting the TX interrupt.
152 */
153 if ( fifo_level <= fifo_threshold )
154 vpl011->uartris |= TXI;
155 else
156 vpl011->uartris &= ~TXI;
157 }
158
vpl011_write_data(struct domain * d,uint8_t data)159 static void vpl011_write_data(struct domain *d, uint8_t data)
160 {
161 unsigned long flags;
162 struct vpl011 *vpl011 = &d->arch.vpl011;
163 struct xencons_interface *intf = vpl011->ring_buf;
164 XENCONS_RING_IDX out_cons, out_prod;
165
166 VPL011_LOCK(d, flags);
167
168 out_cons = intf->out_cons;
169 out_prod = intf->out_prod;
170
171 smp_mb();
172
173 /*
174 * It is expected that the ring is not full when this function is called
175 * as the guest is expected to write to the data register only when the
176 * TXFF flag is not set.
177 * In case the guest does write even when the TXFF flag is set then the
178 * data will be silently dropped.
179 */
180 if ( xencons_queued(out_prod, out_cons, sizeof(intf->out)) !=
181 sizeof (intf->out) )
182 {
183 unsigned int fifo_level;
184
185 intf->out[xencons_mask(out_prod, sizeof(intf->out))] = data;
186 out_prod += 1;
187 smp_wmb();
188 intf->out_prod = out_prod;
189
190 fifo_level = xencons_queued(out_prod, out_cons, sizeof(intf->out));
191
192 if ( fifo_level == sizeof(intf->out) )
193 {
194 vpl011->uartfr |= TXFF;
195
196 /*
197 * This bit is set only when FIFO becomes full. This ensures that
198 * the SBSA UART driver can write the early console data as fast as
199 * possible, without waiting for the BUSY bit to get cleared before
200 * writing each byte.
201 */
202 vpl011->uartfr |= BUSY;
203 }
204
205 vpl011_update_tx_fifo_status(vpl011, fifo_level);
206
207 vpl011_update_interrupt_status(d);
208 }
209 else
210 gprintk(XENLOG_ERR, "vpl011: Unexpected OUT ring buffer full\n");
211
212 vpl011->uartfr &= ~TXFE;
213
214 VPL011_UNLOCK(d, flags);
215
216 /*
217 * Send an event to console backend to indicate that there is
218 * data in the OUT ring buffer.
219 */
220 notify_via_xen_event_channel(d, vpl011->evtchn);
221 }
222
vpl011_mmio_read(struct vcpu * v,mmio_info_t * info,register_t * r,void * priv)223 static int vpl011_mmio_read(struct vcpu *v,
224 mmio_info_t *info,
225 register_t *r,
226 void *priv)
227 {
228 struct hsr_dabt dabt = info->dabt;
229 uint32_t vpl011_reg = (uint32_t)(info->gpa - GUEST_PL011_BASE);
230 struct vpl011 *vpl011 = &v->domain->arch.vpl011;
231 struct domain *d = v->domain;
232 unsigned long flags;
233
234 switch ( vpl011_reg )
235 {
236 case DR:
237 if ( !vpl011_reg32_check_access(dabt) ) goto bad_width;
238
239 *r = vreg_reg32_extract(vpl011_read_data(d), info);
240 return 1;
241
242 case RSR:
243 if ( !vpl011_reg32_check_access(dabt) ) goto bad_width;
244
245 /* It always returns 0 as there are no physical errors. */
246 *r = 0;
247 return 1;
248
249 case FR:
250 if ( !vpl011_reg32_check_access(dabt) ) goto bad_width;
251
252 VPL011_LOCK(d, flags);
253 *r = vreg_reg32_extract(vpl011->uartfr, info);
254 VPL011_UNLOCK(d, flags);
255 return 1;
256
257 case RIS:
258 if ( !vpl011_reg32_check_access(dabt) ) goto bad_width;
259
260 VPL011_LOCK(d, flags);
261 *r = vreg_reg32_extract(vpl011->uartris, info);
262 VPL011_UNLOCK(d, flags);
263 return 1;
264
265 case MIS:
266 if ( !vpl011_reg32_check_access(dabt) ) goto bad_width;
267
268 VPL011_LOCK(d, flags);
269 *r = vreg_reg32_extract(vpl011->uartris & vpl011->uartimsc,
270 info);
271 VPL011_UNLOCK(d, flags);
272 return 1;
273
274 case IMSC:
275 if ( !vpl011_reg32_check_access(dabt) ) goto bad_width;
276
277 VPL011_LOCK(d, flags);
278 *r = vreg_reg32_extract(vpl011->uartimsc, info);
279 VPL011_UNLOCK(d, flags);
280 return 1;
281
282 case ICR:
283 if ( !vpl011_reg32_check_access(dabt) ) goto bad_width;
284
285 /* Only write is valid. */
286 return 0;
287
288 default:
289 gprintk(XENLOG_ERR, "vpl011: unhandled read r%d offset %#08x\n",
290 dabt.reg, vpl011_reg);
291 return 0;
292 }
293
294 return 1;
295
296 bad_width:
297 gprintk(XENLOG_ERR, "vpl011: bad read width %d r%d offset %#08x\n",
298 dabt.size, dabt.reg, vpl011_reg);
299 domain_crash_synchronous();
300 return 0;
301
302 }
303
vpl011_mmio_write(struct vcpu * v,mmio_info_t * info,register_t r,void * priv)304 static int vpl011_mmio_write(struct vcpu *v,
305 mmio_info_t *info,
306 register_t r,
307 void *priv)
308 {
309 struct hsr_dabt dabt = info->dabt;
310 uint32_t vpl011_reg = (uint32_t)(info->gpa - GUEST_PL011_BASE);
311 struct vpl011 *vpl011 = &v->domain->arch.vpl011;
312 struct domain *d = v->domain;
313 unsigned long flags;
314
315 switch ( vpl011_reg )
316 {
317 case DR:
318 {
319 uint32_t data = 0;
320
321 if ( !vpl011_reg32_check_access(dabt) ) goto bad_width;
322
323 vreg_reg32_update(&data, r, info);
324 data &= 0xFF;
325 vpl011_write_data(v->domain, data);
326 return 1;
327 }
328
329 case RSR: /* Nothing to clear. */
330 if ( !vpl011_reg32_check_access(dabt) ) goto bad_width;
331
332 return 1;
333
334 case FR:
335 case RIS:
336 case MIS:
337 goto write_ignore;
338
339 case IMSC:
340 if ( !vpl011_reg32_check_access(dabt) ) goto bad_width;
341
342 VPL011_LOCK(d, flags);
343 vreg_reg32_update(&vpl011->uartimsc, r, info);
344 vpl011_update_interrupt_status(v->domain);
345 VPL011_UNLOCK(d, flags);
346 return 1;
347
348 case ICR:
349 if ( !vpl011_reg32_check_access(dabt) ) goto bad_width;
350
351 VPL011_LOCK(d, flags);
352 vreg_reg32_clearbits(&vpl011->uartris, r, info);
353 vpl011_update_interrupt_status(d);
354 VPL011_UNLOCK(d, flags);
355 return 1;
356
357 default:
358 gprintk(XENLOG_ERR, "vpl011: unhandled write r%d offset %#08x\n",
359 dabt.reg, vpl011_reg);
360 return 0;
361 }
362
363 write_ignore:
364 return 1;
365
366 bad_width:
367 gprintk(XENLOG_ERR, "vpl011: bad write width %d r%d offset %#08x\n",
368 dabt.size, dabt.reg, vpl011_reg);
369 domain_crash_synchronous();
370 return 0;
371
372 }
373
374 static const struct mmio_handler_ops vpl011_mmio_handler = {
375 .read = vpl011_mmio_read,
376 .write = vpl011_mmio_write,
377 };
378
vpl011_data_avail(struct domain * d)379 static void vpl011_data_avail(struct domain *d)
380 {
381 unsigned long flags;
382 struct vpl011 *vpl011 = &d->arch.vpl011;
383 struct xencons_interface *intf = vpl011->ring_buf;
384 XENCONS_RING_IDX in_cons, in_prod, out_cons, out_prod;
385 XENCONS_RING_IDX in_fifo_level, out_fifo_level;
386
387 VPL011_LOCK(d, flags);
388
389 in_cons = intf->in_cons;
390 in_prod = intf->in_prod;
391 out_cons = intf->out_cons;
392 out_prod = intf->out_prod;
393
394 smp_rmb();
395
396 in_fifo_level = xencons_queued(in_prod,
397 in_cons,
398 sizeof(intf->in));
399
400 out_fifo_level = xencons_queued(out_prod,
401 out_cons,
402 sizeof(intf->out));
403
404 /**** Update the UART RX state ****/
405
406 /* Clear the FIFO_EMPTY bit if the FIFO holds at least one character. */
407 if ( in_fifo_level > 0 )
408 vpl011->uartfr &= ~RXFE;
409
410 /* Set the FIFO_FULL bit if the Xen buffer is full. */
411 if ( in_fifo_level == sizeof(intf->in) )
412 vpl011->uartfr |= RXFF;
413
414 /* Assert the RX interrupt if the FIFO is more than half way filled. */
415 if ( in_fifo_level >= sizeof(intf->in) - SBSA_UART_FIFO_LEVEL )
416 vpl011->uartris |= RXI;
417
418 /*
419 * If the input queue is not empty, we assert the receive timeout interrupt.
420 * As we don't emulate any timing here, so we ignore the actual timeout
421 * of 32 baud cycles.
422 */
423 if ( in_fifo_level > 0 )
424 vpl011->uartris |= RTI;
425
426 /**** Update the UART TX state ****/
427
428 if ( out_fifo_level != sizeof(intf->out) )
429 {
430 vpl011->uartfr &= ~TXFF;
431
432 /*
433 * Clear the BUSY bit as soon as space becomes available
434 * so that the SBSA UART driver can start writing more data
435 * without any further delay.
436 */
437 vpl011->uartfr &= ~BUSY;
438
439 vpl011_update_tx_fifo_status(vpl011, out_fifo_level);
440 }
441
442 vpl011_update_interrupt_status(d);
443
444 if ( out_fifo_level == 0 )
445 vpl011->uartfr |= TXFE;
446
447 VPL011_UNLOCK(d, flags);
448 }
449
vpl011_notification(struct vcpu * v,unsigned int port)450 static void vpl011_notification(struct vcpu *v, unsigned int port)
451 {
452 vpl011_data_avail(v->domain);
453 }
454
domain_vpl011_init(struct domain * d,struct vpl011_init_info * info)455 int domain_vpl011_init(struct domain *d, struct vpl011_init_info *info)
456 {
457 int rc;
458 struct vpl011 *vpl011 = &d->arch.vpl011;
459
460 if ( vpl011->ring_buf )
461 return -EINVAL;
462
463 /* Map the guest PFN to Xen address space. */
464 rc = prepare_ring_for_helper(d,
465 gfn_x(info->gfn),
466 &vpl011->ring_page,
467 &vpl011->ring_buf);
468 if ( rc < 0 )
469 goto out;
470
471 rc = vgic_reserve_virq(d, GUEST_VPL011_SPI);
472 if ( !rc )
473 {
474 rc = -EINVAL;
475 goto out1;
476 }
477
478 rc = alloc_unbound_xen_event_channel(d, 0, info->console_domid,
479 vpl011_notification);
480 if ( rc < 0 )
481 goto out2;
482
483 vpl011->evtchn = info->evtchn = rc;
484
485 spin_lock_init(&vpl011->lock);
486
487 register_mmio_handler(d, &vpl011_mmio_handler,
488 GUEST_PL011_BASE, GUEST_PL011_SIZE, NULL);
489
490 return 0;
491
492 out2:
493 vgic_free_virq(d, GUEST_VPL011_SPI);
494
495 out1:
496 destroy_ring_for_helper(&vpl011->ring_buf, vpl011->ring_page);
497
498 out:
499 return rc;
500 }
501
domain_vpl011_deinit(struct domain * d)502 void domain_vpl011_deinit(struct domain *d)
503 {
504 struct vpl011 *vpl011 = &d->arch.vpl011;
505
506 if ( !vpl011->ring_buf )
507 return;
508
509 free_xen_event_channel(d, vpl011->evtchn);
510 destroy_ring_for_helper(&vpl011->ring_buf, vpl011->ring_page);
511 }
512
513 /*
514 * Local variables:
515 * mode: C
516 * c-file-style: "BSD"
517 * c-basic-offset: 4
518 * indent-tabs-mode: nil
519 * End:
520 */
521