1 /******************************************************************************
2  * event_channel.c
3  *
4  * Event notifications from VIRQs, PIRQs, and other domains.
5  *
6  * Copyright (c) 2003-2006, K A Fraser.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; If not, see <http://www.gnu.org/licenses/>.
15  */
16 
17 #include <xen/init.h>
18 #include <xen/lib.h>
19 #include <xen/errno.h>
20 #include <xen/sched.h>
21 #include <xen/event.h>
22 #include <xen/irq.h>
23 #include <xen/iocap.h>
24 #include <xen/compat.h>
25 #include <xen/guest_access.h>
26 #include <xen/keyhandler.h>
27 #include <xen/event_fifo.h>
28 #include <asm/current.h>
29 
30 #include <public/xen.h>
31 #include <public/event_channel.h>
32 #include <xsm/xsm.h>
33 
34 #define ERROR_EXIT(_errno)                                          \
35     do {                                                            \
36         gdprintk(XENLOG_WARNING,                                    \
37                 "EVTCHNOP failure: error %d\n",                     \
38                 (_errno));                                          \
39         rc = (_errno);                                              \
40         goto out;                                                   \
41     } while ( 0 )
42 #define ERROR_EXIT_DOM(_errno, _dom)                                \
43     do {                                                            \
44         gdprintk(XENLOG_WARNING,                                    \
45                 "EVTCHNOP failure: domain %d, error %d\n",          \
46                 (_dom)->domain_id, (_errno));                       \
47         rc = (_errno);                                              \
48         goto out;                                                   \
49     } while ( 0 )
50 
51 #define consumer_is_xen(e) (!!(e)->xen_consumer)
52 
53 /*
54  * Lock an event channel exclusively. This is allowed only when the channel is
55  * free or unbound either when taking or when releasing the lock, as any
56  * concurrent operation on the event channel using evtchn_read_trylock() will
57  * just assume the event channel is free or unbound at the moment when the
58  * evtchn_read_trylock() returns false.
59  */
evtchn_write_lock(struct evtchn * evtchn)60 static inline void evtchn_write_lock(struct evtchn *evtchn)
61 {
62     write_lock(&evtchn->lock);
63 
64 #ifndef NDEBUG
65     evtchn->old_state = evtchn->state;
66 #endif
67 }
68 
old_state(const struct evtchn * evtchn)69 static inline unsigned int old_state(const struct evtchn *evtchn)
70 {
71 #ifndef NDEBUG
72     return evtchn->old_state;
73 #else
74     return ECS_RESERVED; /* Just to allow things to build. */
75 #endif
76 }
77 
evtchn_write_unlock(struct evtchn * evtchn)78 static inline void evtchn_write_unlock(struct evtchn *evtchn)
79 {
80     /* Enforce lock discipline. */
81     ASSERT(old_state(evtchn) == ECS_FREE || old_state(evtchn) == ECS_UNBOUND ||
82            evtchn->state == ECS_FREE || evtchn->state == ECS_UNBOUND);
83 
84     write_unlock(&evtchn->lock);
85 }
86 
87 /*
88  * The function alloc_unbound_xen_event_channel() allows an arbitrary
89  * notifier function to be specified. However, very few unique functions
90  * are specified in practice, so to prevent bloating the evtchn structure
91  * with a pointer, we stash them dynamically in a small lookup array which
92  * can be indexed by a small integer.
93  */
94 static xen_event_channel_notification_t xen_consumers[NR_XEN_CONSUMERS];
95 
96 /* Default notification action: wake up from wait_on_xen_event_channel(). */
default_xen_notification_fn(struct vcpu * v,unsigned int port)97 static void default_xen_notification_fn(struct vcpu *v, unsigned int port)
98 {
99     /* Consumer needs notification only if blocked. */
100     if ( test_and_clear_bit(_VPF_blocked_in_xen, &v->pause_flags) )
101         vcpu_wake(v);
102 }
103 
104 /*
105  * Given a notification function, return the value to stash in
106  * the evtchn->xen_consumer field.
107  */
get_xen_consumer(xen_event_channel_notification_t fn)108 static uint8_t get_xen_consumer(xen_event_channel_notification_t fn)
109 {
110     unsigned int i;
111 
112     if ( fn == NULL )
113         fn = default_xen_notification_fn;
114 
115     for ( i = 0; i < ARRAY_SIZE(xen_consumers); i++ )
116     {
117         if ( xen_consumers[i] == NULL )
118             xen_consumers[i] = fn;
119         if ( xen_consumers[i] == fn )
120             break;
121     }
122 
123     BUG_ON(i >= ARRAY_SIZE(xen_consumers));
124     return i+1;
125 }
126 
127 /* Get the notification function for a given Xen-bound event channel. */
128 #define xen_notification_fn(e) (xen_consumers[(e)->xen_consumer-1])
129 
virq_is_global(unsigned int virq)130 static bool virq_is_global(unsigned int virq)
131 {
132     switch ( virq )
133     {
134     case VIRQ_TIMER:
135     case VIRQ_DEBUG:
136     case VIRQ_XENOPROF:
137     case VIRQ_XENPMU:
138         return false;
139 
140     case VIRQ_ARCH_0 ... VIRQ_ARCH_7:
141         return arch_virq_is_global(virq);
142     }
143 
144     ASSERT(virq < NR_VIRQS);
145     return true;
146 }
147 
148 
alloc_evtchn_bucket(struct domain * d,unsigned int port)149 static struct evtchn *alloc_evtchn_bucket(struct domain *d, unsigned int port)
150 {
151     struct evtchn *chn;
152     unsigned int i;
153 
154     chn = xzalloc_array(struct evtchn, EVTCHNS_PER_BUCKET);
155     if ( !chn )
156         return NULL;
157 
158     for ( i = 0; i < EVTCHNS_PER_BUCKET; i++ )
159     {
160         if ( xsm_alloc_security_evtchn(&chn[i]) )
161         {
162             while ( i-- )
163                 xsm_free_security_evtchn(&chn[i]);
164             xfree(chn);
165             return NULL;
166         }
167         chn[i].port = port + i;
168         rwlock_init(&chn[i].lock);
169     }
170     return chn;
171 }
172 
free_evtchn_bucket(struct domain * d,struct evtchn * bucket)173 static void free_evtchn_bucket(struct domain *d, struct evtchn *bucket)
174 {
175     unsigned int i;
176 
177     if ( !bucket )
178         return;
179 
180     for ( i = 0; i < EVTCHNS_PER_BUCKET; i++ )
181         xsm_free_security_evtchn(bucket + i);
182 
183     xfree(bucket);
184 }
185 
evtchn_allocate_port(struct domain * d,evtchn_port_t port)186 int evtchn_allocate_port(struct domain *d, evtchn_port_t port)
187 {
188     if ( port > d->max_evtchn_port || port >= max_evtchns(d) )
189         return -ENOSPC;
190 
191     if ( port_is_valid(d, port) )
192     {
193         const struct evtchn *chn = evtchn_from_port(d, port);
194 
195         if ( chn->state != ECS_FREE || evtchn_is_busy(d, chn) )
196             return -EBUSY;
197     }
198     else
199     {
200         struct evtchn *chn;
201         struct evtchn **grp;
202 
203         if ( !group_from_port(d, port) )
204         {
205             grp = xzalloc_array(struct evtchn *, BUCKETS_PER_GROUP);
206             if ( !grp )
207                 return -ENOMEM;
208             group_from_port(d, port) = grp;
209         }
210 
211         chn = alloc_evtchn_bucket(d, port);
212         if ( !chn )
213             return -ENOMEM;
214         bucket_from_port(d, port) = chn;
215 
216         /*
217          * d->valid_evtchns is used to check whether the bucket can be
218          * accessed without the per-domain lock. Therefore,
219          * d->valid_evtchns should be seen *after* the new bucket has
220          * been setup.
221          */
222         smp_wmb();
223         write_atomic(&d->valid_evtchns, d->valid_evtchns + EVTCHNS_PER_BUCKET);
224     }
225 
226     write_atomic(&d->active_evtchns, d->active_evtchns + 1);
227 
228     return 0;
229 }
230 
get_free_port(struct domain * d)231 static int get_free_port(struct domain *d)
232 {
233     int            port;
234 
235     if ( d->is_dying )
236         return -EINVAL;
237 
238     for ( port = 0; port <= d->max_evtchn_port; port++ )
239     {
240         int rc = evtchn_allocate_port(d, port);
241 
242         if ( rc == 0 )
243             return port;
244         else if ( rc != -EBUSY )
245             return rc;
246     }
247 
248     return -ENOSPC;
249 }
250 
251 /*
252  * Check whether a port is still marked free, and if so update the domain
253  * counter accordingly.  To be used on function exit paths.
254  */
check_free_port(struct domain * d,evtchn_port_t port)255 static void check_free_port(struct domain *d, evtchn_port_t port)
256 {
257     if ( port_is_valid(d, port) &&
258          evtchn_from_port(d, port)->state == ECS_FREE )
259         write_atomic(&d->active_evtchns, d->active_evtchns - 1);
260 }
261 
evtchn_free(struct domain * d,struct evtchn * chn)262 void evtchn_free(struct domain *d, struct evtchn *chn)
263 {
264     /* Clear pending event to avoid unexpected behavior on re-bind. */
265     evtchn_port_clear_pending(d, chn);
266 
267     if ( consumer_is_xen(chn) )
268         write_atomic(&d->xen_evtchns, d->xen_evtchns - 1);
269     write_atomic(&d->active_evtchns, d->active_evtchns - 1);
270 
271     /* Reset binding to vcpu0 when the channel is freed. */
272     chn->state          = ECS_FREE;
273     chn->notify_vcpu_id = 0;
274     chn->xen_consumer   = 0;
275 
276     xsm_evtchn_close_post(chn);
277 }
278 
evtchn_alloc_unbound(evtchn_alloc_unbound_t * alloc)279 static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
280 {
281     struct evtchn *chn;
282     struct domain *d;
283     int            port;
284     domid_t        dom = alloc->dom;
285     long           rc;
286 
287     d = rcu_lock_domain_by_any_id(dom);
288     if ( d == NULL )
289         return -ESRCH;
290 
291     spin_lock(&d->event_lock);
292 
293     if ( (port = get_free_port(d)) < 0 )
294         ERROR_EXIT_DOM(port, d);
295     chn = evtchn_from_port(d, port);
296 
297     rc = xsm_evtchn_unbound(XSM_TARGET, d, chn, alloc->remote_dom);
298     if ( rc )
299         goto out;
300 
301     evtchn_write_lock(chn);
302 
303     chn->state = ECS_UNBOUND;
304     if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF )
305         chn->u.unbound.remote_domid = current->domain->domain_id;
306     evtchn_port_init(d, chn);
307 
308     evtchn_write_unlock(chn);
309 
310     alloc->port = port;
311 
312  out:
313     check_free_port(d, port);
314     spin_unlock(&d->event_lock);
315     rcu_unlock_domain(d);
316 
317     return rc;
318 }
319 
320 
double_evtchn_lock(struct evtchn * lchn,struct evtchn * rchn)321 static void double_evtchn_lock(struct evtchn *lchn, struct evtchn *rchn)
322 {
323     if ( lchn <= rchn )
324     {
325         evtchn_write_lock(lchn);
326         if ( lchn != rchn )
327             evtchn_write_lock(rchn);
328     }
329     else
330     {
331         evtchn_write_lock(rchn);
332         evtchn_write_lock(lchn);
333     }
334 }
335 
double_evtchn_unlock(struct evtchn * lchn,struct evtchn * rchn)336 static void double_evtchn_unlock(struct evtchn *lchn, struct evtchn *rchn)
337 {
338     if ( lchn != rchn )
339         evtchn_write_unlock(lchn);
340     evtchn_write_unlock(rchn);
341 }
342 
evtchn_bind_interdomain(evtchn_bind_interdomain_t * bind)343 static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
344 {
345     struct evtchn *lchn, *rchn;
346     struct domain *ld = current->domain, *rd;
347     int            lport, rport = bind->remote_port;
348     domid_t        rdom = bind->remote_dom;
349     long           rc;
350 
351     if ( rdom == DOMID_SELF )
352         rdom = current->domain->domain_id;
353 
354     if ( (rd = rcu_lock_domain_by_id(rdom)) == NULL )
355         return -ESRCH;
356 
357     /* Avoid deadlock by first acquiring lock of domain with smaller id. */
358     if ( ld < rd )
359     {
360         spin_lock(&ld->event_lock);
361         spin_lock(&rd->event_lock);
362     }
363     else
364     {
365         if ( ld != rd )
366             spin_lock(&rd->event_lock);
367         spin_lock(&ld->event_lock);
368     }
369 
370     if ( (lport = get_free_port(ld)) < 0 )
371         ERROR_EXIT(lport);
372     lchn = evtchn_from_port(ld, lport);
373 
374     if ( !port_is_valid(rd, rport) )
375         ERROR_EXIT_DOM(-EINVAL, rd);
376     rchn = evtchn_from_port(rd, rport);
377     if ( (rchn->state != ECS_UNBOUND) ||
378          (rchn->u.unbound.remote_domid != ld->domain_id) )
379         ERROR_EXIT_DOM(-EINVAL, rd);
380 
381     rc = xsm_evtchn_interdomain(XSM_HOOK, ld, lchn, rd, rchn);
382     if ( rc )
383         goto out;
384 
385     double_evtchn_lock(lchn, rchn);
386 
387     lchn->u.interdomain.remote_dom  = rd;
388     lchn->u.interdomain.remote_port = rport;
389     lchn->state                     = ECS_INTERDOMAIN;
390     evtchn_port_init(ld, lchn);
391 
392     rchn->u.interdomain.remote_dom  = ld;
393     rchn->u.interdomain.remote_port = lport;
394     rchn->state                     = ECS_INTERDOMAIN;
395 
396     /*
397      * We may have lost notifications on the remote unbound port. Fix that up
398      * here by conservatively always setting a notification on the local port.
399      */
400     evtchn_port_set_pending(ld, lchn->notify_vcpu_id, lchn);
401 
402     double_evtchn_unlock(lchn, rchn);
403 
404     bind->local_port = lport;
405 
406  out:
407     check_free_port(ld, lport);
408     spin_unlock(&ld->event_lock);
409     if ( ld != rd )
410         spin_unlock(&rd->event_lock);
411 
412     rcu_unlock_domain(rd);
413 
414     return rc;
415 }
416 
417 
evtchn_bind_virq(evtchn_bind_virq_t * bind,evtchn_port_t port)418 int evtchn_bind_virq(evtchn_bind_virq_t *bind, evtchn_port_t port)
419 {
420     struct evtchn *chn;
421     struct vcpu   *v;
422     struct domain *d = current->domain;
423     int            virq = bind->virq, vcpu = bind->vcpu;
424     int            rc = 0;
425 
426     if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) )
427         return -EINVAL;
428 
429    /*
430     * Make sure the guest controlled value virq is bounded even during
431     * speculative execution.
432     */
433     virq = array_index_nospec(virq, ARRAY_SIZE(v->virq_to_evtchn));
434 
435     if ( virq_is_global(virq) && (vcpu != 0) )
436         return -EINVAL;
437 
438     if ( (v = domain_vcpu(d, vcpu)) == NULL )
439         return -ENOENT;
440 
441     spin_lock(&d->event_lock);
442 
443     if ( v->virq_to_evtchn[virq] != 0 )
444         ERROR_EXIT(-EEXIST);
445 
446     if ( port != 0 )
447     {
448         if ( (rc = evtchn_allocate_port(d, port)) != 0 )
449             ERROR_EXIT(rc);
450     }
451     else
452     {
453         int alloc_port = get_free_port(d);
454 
455         if ( alloc_port < 0 )
456             ERROR_EXIT(alloc_port);
457         port = alloc_port;
458     }
459 
460     chn = evtchn_from_port(d, port);
461 
462     evtchn_write_lock(chn);
463 
464     chn->state          = ECS_VIRQ;
465     chn->notify_vcpu_id = vcpu;
466     chn->u.virq         = virq;
467     evtchn_port_init(d, chn);
468 
469     evtchn_write_unlock(chn);
470 
471     v->virq_to_evtchn[virq] = bind->port = port;
472 
473  out:
474     spin_unlock(&d->event_lock);
475 
476     return rc;
477 }
478 
479 
evtchn_bind_ipi(evtchn_bind_ipi_t * bind)480 static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
481 {
482     struct evtchn *chn;
483     struct domain *d = current->domain;
484     int            port, vcpu = bind->vcpu;
485     long           rc = 0;
486 
487     if ( domain_vcpu(d, vcpu) == NULL )
488         return -ENOENT;
489 
490     spin_lock(&d->event_lock);
491 
492     if ( (port = get_free_port(d)) < 0 )
493         ERROR_EXIT(port);
494 
495     chn = evtchn_from_port(d, port);
496 
497     evtchn_write_lock(chn);
498 
499     chn->state          = ECS_IPI;
500     chn->notify_vcpu_id = vcpu;
501     evtchn_port_init(d, chn);
502 
503     evtchn_write_unlock(chn);
504 
505     bind->port = port;
506 
507  out:
508     spin_unlock(&d->event_lock);
509 
510     return rc;
511 }
512 
513 
link_pirq_port(int port,struct evtchn * chn,struct vcpu * v)514 static void link_pirq_port(int port, struct evtchn *chn, struct vcpu *v)
515 {
516     chn->u.pirq.prev_port = 0;
517     chn->u.pirq.next_port = v->pirq_evtchn_head;
518     if ( v->pirq_evtchn_head )
519         evtchn_from_port(v->domain, v->pirq_evtchn_head)
520             ->u.pirq.prev_port = port;
521     v->pirq_evtchn_head = port;
522 }
523 
unlink_pirq_port(struct evtchn * chn,struct vcpu * v)524 static void unlink_pirq_port(struct evtchn *chn, struct vcpu *v)
525 {
526     struct domain *d = v->domain;
527 
528     if ( chn->u.pirq.prev_port )
529         evtchn_from_port(d, chn->u.pirq.prev_port)->u.pirq.next_port =
530             chn->u.pirq.next_port;
531     else
532         v->pirq_evtchn_head = chn->u.pirq.next_port;
533     if ( chn->u.pirq.next_port )
534         evtchn_from_port(d, chn->u.pirq.next_port)->u.pirq.prev_port =
535             chn->u.pirq.prev_port;
536 }
537 
538 
evtchn_bind_pirq(evtchn_bind_pirq_t * bind)539 static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
540 {
541     struct evtchn *chn;
542     struct domain *d = current->domain;
543     struct vcpu   *v = d->vcpu[0];
544     struct pirq   *info;
545     int            port = 0, pirq = bind->pirq;
546     long           rc;
547 
548     if ( (pirq < 0) || (pirq >= d->nr_pirqs) )
549         return -EINVAL;
550 
551     if ( !is_hvm_domain(d) && !pirq_access_permitted(d, pirq) )
552         return -EPERM;
553 
554     spin_lock(&d->event_lock);
555 
556     if ( pirq_to_evtchn(d, pirq) != 0 )
557         ERROR_EXIT(-EEXIST);
558 
559     if ( (port = get_free_port(d)) < 0 )
560         ERROR_EXIT(port);
561 
562     chn = evtchn_from_port(d, port);
563 
564     info = pirq_get_info(d, pirq);
565     if ( !info )
566         ERROR_EXIT(-ENOMEM);
567     info->evtchn = port;
568     rc = (!is_hvm_domain(d)
569           ? pirq_guest_bind(v, info,
570                             !!(bind->flags & BIND_PIRQ__WILL_SHARE))
571           : 0);
572     if ( rc != 0 )
573     {
574         info->evtchn = 0;
575         pirq_cleanup_check(info, d);
576         goto out;
577     }
578 
579     evtchn_write_lock(chn);
580 
581     chn->state  = ECS_PIRQ;
582     chn->u.pirq.irq = pirq;
583     link_pirq_port(port, chn, v);
584     evtchn_port_init(d, chn);
585 
586     evtchn_write_unlock(chn);
587 
588     bind->port = port;
589 
590     arch_evtchn_bind_pirq(d, pirq);
591 
592  out:
593     check_free_port(d, port);
594     spin_unlock(&d->event_lock);
595 
596     return rc;
597 }
598 
599 
evtchn_close(struct domain * d1,int port1,bool guest)600 int evtchn_close(struct domain *d1, int port1, bool guest)
601 {
602     struct domain *d2 = NULL;
603     struct vcpu   *v;
604     struct evtchn *chn1, *chn2;
605     int            port2;
606     long           rc = 0;
607 
608  again:
609     spin_lock(&d1->event_lock);
610 
611     if ( !port_is_valid(d1, port1) )
612     {
613         rc = -EINVAL;
614         goto out;
615     }
616 
617     chn1 = evtchn_from_port(d1, port1);
618 
619     /* Guest cannot close a Xen-attached event channel. */
620     if ( unlikely(consumer_is_xen(chn1)) && guest )
621     {
622         rc = -EINVAL;
623         goto out;
624     }
625 
626     switch ( chn1->state )
627     {
628     case ECS_FREE:
629     case ECS_RESERVED:
630         rc = -EINVAL;
631         goto out;
632 
633     case ECS_UNBOUND:
634         break;
635 
636     case ECS_PIRQ: {
637         struct pirq *pirq = pirq_info(d1, chn1->u.pirq.irq);
638 
639         if ( !pirq )
640             break;
641         if ( !is_hvm_domain(d1) )
642             pirq_guest_unbind(d1, pirq);
643         pirq->evtchn = 0;
644         pirq_cleanup_check(pirq, d1);
645         unlink_pirq_port(chn1, d1->vcpu[chn1->notify_vcpu_id]);
646 #ifdef CONFIG_X86
647         if ( is_hvm_domain(d1) && domain_pirq_to_irq(d1, pirq->pirq) > 0 )
648             unmap_domain_pirq_emuirq(d1, pirq->pirq);
649 #endif
650         break;
651     }
652 
653     case ECS_VIRQ:
654         for_each_vcpu ( d1, v )
655         {
656             if ( v->virq_to_evtchn[chn1->u.virq] != port1 )
657                 continue;
658             v->virq_to_evtchn[chn1->u.virq] = 0;
659             spin_barrier(&v->virq_lock);
660         }
661         break;
662 
663     case ECS_IPI:
664         break;
665 
666     case ECS_INTERDOMAIN:
667         if ( d2 == NULL )
668         {
669             d2 = chn1->u.interdomain.remote_dom;
670 
671             /* If we unlock d1 then we could lose d2. Must get a reference. */
672             if ( unlikely(!get_domain(d2)) )
673                 BUG();
674 
675             if ( d1 < d2 )
676             {
677                 spin_lock(&d2->event_lock);
678             }
679             else if ( d1 != d2 )
680             {
681                 spin_unlock(&d1->event_lock);
682                 spin_lock(&d2->event_lock);
683                 goto again;
684             }
685         }
686         else if ( d2 != chn1->u.interdomain.remote_dom )
687         {
688             /*
689              * We can only get here if the port was closed and re-bound after
690              * unlocking d1 but before locking d2 above. We could retry but
691              * it is easier to return the same error as if we had seen the
692              * port in ECS_CLOSED. It must have passed through that state for
693              * us to end up here, so it's a valid error to return.
694              */
695             rc = -EINVAL;
696             goto out;
697         }
698 
699         port2 = chn1->u.interdomain.remote_port;
700         BUG_ON(!port_is_valid(d2, port2));
701 
702         chn2 = evtchn_from_port(d2, port2);
703         BUG_ON(chn2->state != ECS_INTERDOMAIN);
704         BUG_ON(chn2->u.interdomain.remote_dom != d1);
705 
706         double_evtchn_lock(chn1, chn2);
707 
708         evtchn_free(d1, chn1);
709 
710         chn2->state = ECS_UNBOUND;
711         chn2->u.unbound.remote_domid = d1->domain_id;
712 
713         double_evtchn_unlock(chn1, chn2);
714 
715         goto out;
716 
717     default:
718         BUG();
719     }
720 
721     evtchn_write_lock(chn1);
722     evtchn_free(d1, chn1);
723     evtchn_write_unlock(chn1);
724 
725  out:
726     if ( d2 != NULL )
727     {
728         if ( d1 != d2 )
729             spin_unlock(&d2->event_lock);
730         put_domain(d2);
731     }
732 
733     spin_unlock(&d1->event_lock);
734 
735     return rc;
736 }
737 
evtchn_send(struct domain * ld,unsigned int lport)738 int evtchn_send(struct domain *ld, unsigned int lport)
739 {
740     struct evtchn *lchn, *rchn;
741     struct domain *rd;
742     int            rport, ret = 0;
743 
744     if ( !port_is_valid(ld, lport) )
745         return -EINVAL;
746 
747     lchn = evtchn_from_port(ld, lport);
748 
749     evtchn_read_lock(lchn);
750 
751     /* Guest cannot send via a Xen-attached event channel. */
752     if ( unlikely(consumer_is_xen(lchn)) )
753     {
754         ret = -EINVAL;
755         goto out;
756     }
757 
758     ret = xsm_evtchn_send(XSM_HOOK, ld, lchn);
759     if ( ret )
760         goto out;
761 
762     switch ( lchn->state )
763     {
764     case ECS_INTERDOMAIN:
765         rd    = lchn->u.interdomain.remote_dom;
766         rport = lchn->u.interdomain.remote_port;
767         rchn  = evtchn_from_port(rd, rport);
768         if ( consumer_is_xen(rchn) )
769             xen_notification_fn(rchn)(rd->vcpu[rchn->notify_vcpu_id], rport);
770         else
771             evtchn_port_set_pending(rd, rchn->notify_vcpu_id, rchn);
772         break;
773     case ECS_IPI:
774         evtchn_port_set_pending(ld, lchn->notify_vcpu_id, lchn);
775         break;
776     case ECS_UNBOUND:
777         /* silently drop the notification */
778         break;
779     default:
780         ret = -EINVAL;
781     }
782 
783 out:
784     evtchn_read_unlock(lchn);
785 
786     return ret;
787 }
788 
guest_enabled_event(struct vcpu * v,uint32_t virq)789 int guest_enabled_event(struct vcpu *v, uint32_t virq)
790 {
791     return ((v != NULL) && (v->virq_to_evtchn[virq] != 0));
792 }
793 
send_guest_vcpu_virq(struct vcpu * v,uint32_t virq)794 void send_guest_vcpu_virq(struct vcpu *v, uint32_t virq)
795 {
796     unsigned long flags;
797     int port;
798     struct domain *d;
799     struct evtchn *chn;
800 
801     ASSERT(!virq_is_global(virq));
802 
803     spin_lock_irqsave(&v->virq_lock, flags);
804 
805     port = v->virq_to_evtchn[virq];
806     if ( unlikely(port == 0) )
807         goto out;
808 
809     d = v->domain;
810     chn = evtchn_from_port(d, port);
811     if ( evtchn_read_trylock(chn) )
812     {
813         evtchn_port_set_pending(d, v->vcpu_id, chn);
814         evtchn_read_unlock(chn);
815     }
816 
817  out:
818     spin_unlock_irqrestore(&v->virq_lock, flags);
819 }
820 
send_guest_global_virq(struct domain * d,uint32_t virq)821 void send_guest_global_virq(struct domain *d, uint32_t virq)
822 {
823     unsigned long flags;
824     int port;
825     struct vcpu *v;
826     struct evtchn *chn;
827 
828     ASSERT(virq_is_global(virq));
829 
830     if ( unlikely(d == NULL) || unlikely(d->vcpu == NULL) )
831         return;
832 
833     v = d->vcpu[0];
834     if ( unlikely(v == NULL) )
835         return;
836 
837     spin_lock_irqsave(&v->virq_lock, flags);
838 
839     port = v->virq_to_evtchn[virq];
840     if ( unlikely(port == 0) )
841         goto out;
842 
843     chn = evtchn_from_port(d, port);
844     if ( evtchn_read_trylock(chn) )
845     {
846         evtchn_port_set_pending(d, chn->notify_vcpu_id, chn);
847         evtchn_read_unlock(chn);
848     }
849 
850  out:
851     spin_unlock_irqrestore(&v->virq_lock, flags);
852 }
853 
send_guest_pirq(struct domain * d,const struct pirq * pirq)854 void send_guest_pirq(struct domain *d, const struct pirq *pirq)
855 {
856     int port;
857     struct evtchn *chn;
858 
859     /*
860      * PV guests: It should not be possible to race with __evtchn_close(). The
861      *     caller of this function must synchronise with pirq_guest_unbind().
862      * HVM guests: Port is legitimately zero when the guest disables the
863      *     emulated interrupt/evtchn.
864      */
865     if ( pirq == NULL || (port = pirq->evtchn) == 0 )
866     {
867         BUG_ON(!is_hvm_domain(d));
868         return;
869     }
870 
871     chn = evtchn_from_port(d, port);
872     if ( evtchn_read_trylock(chn) )
873     {
874         evtchn_port_set_pending(d, chn->notify_vcpu_id, chn);
875         evtchn_read_unlock(chn);
876     }
877 }
878 
879 static struct domain *global_virq_handlers[NR_VIRQS] __read_mostly;
880 
881 static DEFINE_SPINLOCK(global_virq_handlers_lock);
882 
send_global_virq(uint32_t virq)883 void send_global_virq(uint32_t virq)
884 {
885     ASSERT(virq_is_global(virq));
886 
887     send_guest_global_virq(global_virq_handlers[virq] ?: hardware_domain, virq);
888 }
889 
set_global_virq_handler(struct domain * d,uint32_t virq)890 int set_global_virq_handler(struct domain *d, uint32_t virq)
891 {
892     struct domain *old;
893 
894     if (virq >= NR_VIRQS)
895         return -EINVAL;
896     if (!virq_is_global(virq))
897         return -EINVAL;
898 
899     if (global_virq_handlers[virq] == d)
900         return 0;
901 
902     if (unlikely(!get_domain(d)))
903         return -EINVAL;
904 
905     spin_lock(&global_virq_handlers_lock);
906     old = global_virq_handlers[virq];
907     global_virq_handlers[virq] = d;
908     spin_unlock(&global_virq_handlers_lock);
909 
910     if (old != NULL)
911         put_domain(old);
912 
913     return 0;
914 }
915 
clear_global_virq_handlers(struct domain * d)916 static void clear_global_virq_handlers(struct domain *d)
917 {
918     uint32_t virq;
919     int put_count = 0;
920 
921     spin_lock(&global_virq_handlers_lock);
922 
923     for (virq = 0; virq < NR_VIRQS; virq++)
924     {
925         if (global_virq_handlers[virq] == d)
926         {
927             global_virq_handlers[virq] = NULL;
928             put_count++;
929         }
930     }
931 
932     spin_unlock(&global_virq_handlers_lock);
933 
934     while (put_count)
935     {
936         put_domain(d);
937         put_count--;
938     }
939 }
940 
evtchn_status(evtchn_status_t * status)941 int evtchn_status(evtchn_status_t *status)
942 {
943     struct domain   *d;
944     domid_t          dom = status->dom;
945     int              port = status->port;
946     struct evtchn   *chn;
947     long             rc = 0;
948 
949     d = rcu_lock_domain_by_any_id(dom);
950     if ( d == NULL )
951         return -ESRCH;
952 
953     spin_lock(&d->event_lock);
954 
955     if ( !port_is_valid(d, port) )
956     {
957         rc = -EINVAL;
958         goto out;
959     }
960 
961     chn = evtchn_from_port(d, port);
962 
963     rc = xsm_evtchn_status(XSM_TARGET, d, chn);
964     if ( rc )
965         goto out;
966 
967     switch ( chn->state )
968     {
969     case ECS_FREE:
970     case ECS_RESERVED:
971         status->status = EVTCHNSTAT_closed;
972         break;
973     case ECS_UNBOUND:
974         status->status = EVTCHNSTAT_unbound;
975         status->u.unbound.dom = chn->u.unbound.remote_domid;
976         break;
977     case ECS_INTERDOMAIN:
978         status->status = EVTCHNSTAT_interdomain;
979         status->u.interdomain.dom  =
980             chn->u.interdomain.remote_dom->domain_id;
981         status->u.interdomain.port = chn->u.interdomain.remote_port;
982         break;
983     case ECS_PIRQ:
984         status->status = EVTCHNSTAT_pirq;
985         status->u.pirq = chn->u.pirq.irq;
986         break;
987     case ECS_VIRQ:
988         status->status = EVTCHNSTAT_virq;
989         status->u.virq = chn->u.virq;
990         break;
991     case ECS_IPI:
992         status->status = EVTCHNSTAT_ipi;
993         break;
994     default:
995         BUG();
996     }
997 
998     status->vcpu = chn->notify_vcpu_id;
999 
1000  out:
1001     spin_unlock(&d->event_lock);
1002     rcu_unlock_domain(d);
1003 
1004     return rc;
1005 }
1006 
1007 
evtchn_bind_vcpu(unsigned int port,unsigned int vcpu_id)1008 long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
1009 {
1010     struct domain *d = current->domain;
1011     struct evtchn *chn;
1012     long           rc = 0;
1013     struct vcpu   *v;
1014 
1015     /* Use the vcpu info to prevent speculative out-of-bound accesses */
1016     if ( (v = domain_vcpu(d, vcpu_id)) == NULL )
1017         return -ENOENT;
1018 
1019     spin_lock(&d->event_lock);
1020 
1021     if ( !port_is_valid(d, port) )
1022     {
1023         rc = -EINVAL;
1024         goto out;
1025     }
1026 
1027     chn = evtchn_from_port(d, port);
1028 
1029     /* Guest cannot re-bind a Xen-attached event channel. */
1030     if ( unlikely(consumer_is_xen(chn)) )
1031     {
1032         rc = -EINVAL;
1033         goto out;
1034     }
1035 
1036     switch ( chn->state )
1037     {
1038     case ECS_VIRQ:
1039         if ( virq_is_global(chn->u.virq) )
1040             chn->notify_vcpu_id = v->vcpu_id;
1041         else
1042             rc = -EINVAL;
1043         break;
1044     case ECS_UNBOUND:
1045     case ECS_INTERDOMAIN:
1046         chn->notify_vcpu_id = v->vcpu_id;
1047         break;
1048     case ECS_PIRQ:
1049         if ( chn->notify_vcpu_id == v->vcpu_id )
1050             break;
1051         unlink_pirq_port(chn, d->vcpu[chn->notify_vcpu_id]);
1052         chn->notify_vcpu_id = v->vcpu_id;
1053         pirq_set_affinity(d, chn->u.pirq.irq,
1054                           cpumask_of(v->processor));
1055         link_pirq_port(port, chn, v);
1056         break;
1057     default:
1058         rc = -EINVAL;
1059         break;
1060     }
1061 
1062  out:
1063     spin_unlock(&d->event_lock);
1064 
1065     return rc;
1066 }
1067 
1068 
evtchn_unmask(unsigned int port)1069 int evtchn_unmask(unsigned int port)
1070 {
1071     struct domain *d = current->domain;
1072     struct evtchn *evtchn;
1073 
1074     if ( unlikely(!port_is_valid(d, port)) )
1075         return -EINVAL;
1076 
1077     evtchn = evtchn_from_port(d, port);
1078 
1079     evtchn_read_lock(evtchn);
1080 
1081     evtchn_port_unmask(d, evtchn);
1082 
1083     evtchn_read_unlock(evtchn);
1084 
1085     return 0;
1086 }
1087 
evtchn_reset(struct domain * d,bool resuming)1088 int evtchn_reset(struct domain *d, bool resuming)
1089 {
1090     unsigned int i;
1091     int rc = 0;
1092 
1093     if ( d != current->domain && !d->controller_pause_count )
1094         return -EINVAL;
1095 
1096     spin_lock(&d->event_lock);
1097 
1098     /*
1099      * If we are resuming, then start where we stopped. Otherwise, check
1100      * that a reset operation is not already in progress, and if none is,
1101      * record that this is now the case.
1102      */
1103     i = resuming ? d->next_evtchn : !d->next_evtchn;
1104     if ( i > d->next_evtchn )
1105         d->next_evtchn = i;
1106 
1107     spin_unlock(&d->event_lock);
1108 
1109     if ( !i )
1110         return -EBUSY;
1111 
1112     for ( ; port_is_valid(d, i); i++ )
1113     {
1114         evtchn_close(d, i, 1);
1115 
1116         /* NB: Choice of frequency is arbitrary. */
1117         if ( !(i & 0x3f) && hypercall_preempt_check() )
1118         {
1119             spin_lock(&d->event_lock);
1120             d->next_evtchn = i;
1121             spin_unlock(&d->event_lock);
1122             return -ERESTART;
1123         }
1124     }
1125 
1126     spin_lock(&d->event_lock);
1127 
1128     d->next_evtchn = 0;
1129 
1130     if ( d->active_evtchns > d->xen_evtchns )
1131         rc = -EAGAIN;
1132     else if ( d->evtchn_fifo )
1133     {
1134         /* Switching back to 2-level ABI. */
1135         evtchn_fifo_destroy(d);
1136         evtchn_2l_init(d);
1137     }
1138 
1139     spin_unlock(&d->event_lock);
1140 
1141     return rc;
1142 }
1143 
evtchn_set_priority(const struct evtchn_set_priority * set_priority)1144 static long evtchn_set_priority(const struct evtchn_set_priority *set_priority)
1145 {
1146     struct domain *d = current->domain;
1147     unsigned int port = set_priority->port;
1148     long ret;
1149 
1150     spin_lock(&d->event_lock);
1151 
1152     if ( !port_is_valid(d, port) )
1153     {
1154         spin_unlock(&d->event_lock);
1155         return -EINVAL;
1156     }
1157 
1158     ret = evtchn_port_set_priority(d, evtchn_from_port(d, port),
1159                                    set_priority->priority);
1160 
1161     spin_unlock(&d->event_lock);
1162 
1163     return ret;
1164 }
1165 
do_event_channel_op(int cmd,XEN_GUEST_HANDLE_PARAM (void)arg)1166 long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
1167 {
1168     long rc;
1169 
1170     switch ( cmd )
1171     {
1172     case EVTCHNOP_alloc_unbound: {
1173         struct evtchn_alloc_unbound alloc_unbound;
1174         if ( copy_from_guest(&alloc_unbound, arg, 1) != 0 )
1175             return -EFAULT;
1176         rc = evtchn_alloc_unbound(&alloc_unbound);
1177         if ( !rc && __copy_to_guest(arg, &alloc_unbound, 1) )
1178             rc = -EFAULT; /* Cleaning up here would be a mess! */
1179         break;
1180     }
1181 
1182     case EVTCHNOP_bind_interdomain: {
1183         struct evtchn_bind_interdomain bind_interdomain;
1184         if ( copy_from_guest(&bind_interdomain, arg, 1) != 0 )
1185             return -EFAULT;
1186         rc = evtchn_bind_interdomain(&bind_interdomain);
1187         if ( !rc && __copy_to_guest(arg, &bind_interdomain, 1) )
1188             rc = -EFAULT; /* Cleaning up here would be a mess! */
1189         break;
1190     }
1191 
1192     case EVTCHNOP_bind_virq: {
1193         struct evtchn_bind_virq bind_virq;
1194         if ( copy_from_guest(&bind_virq, arg, 1) != 0 )
1195             return -EFAULT;
1196         rc = evtchn_bind_virq(&bind_virq, 0);
1197         if ( !rc && __copy_to_guest(arg, &bind_virq, 1) )
1198             rc = -EFAULT; /* Cleaning up here would be a mess! */
1199         break;
1200     }
1201 
1202     case EVTCHNOP_bind_ipi: {
1203         struct evtchn_bind_ipi bind_ipi;
1204         if ( copy_from_guest(&bind_ipi, arg, 1) != 0 )
1205             return -EFAULT;
1206         rc = evtchn_bind_ipi(&bind_ipi);
1207         if ( !rc && __copy_to_guest(arg, &bind_ipi, 1) )
1208             rc = -EFAULT; /* Cleaning up here would be a mess! */
1209         break;
1210     }
1211 
1212     case EVTCHNOP_bind_pirq: {
1213         struct evtchn_bind_pirq bind_pirq;
1214         if ( copy_from_guest(&bind_pirq, arg, 1) != 0 )
1215             return -EFAULT;
1216         rc = evtchn_bind_pirq(&bind_pirq);
1217         if ( !rc && __copy_to_guest(arg, &bind_pirq, 1) )
1218             rc = -EFAULT; /* Cleaning up here would be a mess! */
1219         break;
1220     }
1221 
1222     case EVTCHNOP_close: {
1223         struct evtchn_close close;
1224         if ( copy_from_guest(&close, arg, 1) != 0 )
1225             return -EFAULT;
1226         rc = evtchn_close(current->domain, close.port, 1);
1227         break;
1228     }
1229 
1230     case EVTCHNOP_send: {
1231         struct evtchn_send send;
1232         if ( copy_from_guest(&send, arg, 1) != 0 )
1233             return -EFAULT;
1234         rc = evtchn_send(current->domain, send.port);
1235         break;
1236     }
1237 
1238     case EVTCHNOP_status: {
1239         struct evtchn_status status;
1240         if ( copy_from_guest(&status, arg, 1) != 0 )
1241             return -EFAULT;
1242         rc = evtchn_status(&status);
1243         if ( !rc && __copy_to_guest(arg, &status, 1) )
1244             rc = -EFAULT;
1245         break;
1246     }
1247 
1248     case EVTCHNOP_bind_vcpu: {
1249         struct evtchn_bind_vcpu bind_vcpu;
1250         if ( copy_from_guest(&bind_vcpu, arg, 1) != 0 )
1251             return -EFAULT;
1252         rc = evtchn_bind_vcpu(bind_vcpu.port, bind_vcpu.vcpu);
1253         break;
1254     }
1255 
1256     case EVTCHNOP_unmask: {
1257         struct evtchn_unmask unmask;
1258         if ( copy_from_guest(&unmask, arg, 1) != 0 )
1259             return -EFAULT;
1260         rc = evtchn_unmask(unmask.port);
1261         break;
1262     }
1263 
1264     case EVTCHNOP_reset:
1265     case EVTCHNOP_reset_cont: {
1266         struct evtchn_reset reset;
1267         struct domain *d;
1268 
1269         if ( copy_from_guest(&reset, arg, 1) != 0 )
1270             return -EFAULT;
1271 
1272         d = rcu_lock_domain_by_any_id(reset.dom);
1273         if ( d == NULL )
1274             return -ESRCH;
1275 
1276         rc = xsm_evtchn_reset(XSM_TARGET, current->domain, d);
1277         if ( !rc )
1278             rc = evtchn_reset(d, cmd == EVTCHNOP_reset_cont);
1279 
1280         rcu_unlock_domain(d);
1281 
1282         if ( rc == -ERESTART )
1283             rc = hypercall_create_continuation(__HYPERVISOR_event_channel_op,
1284                                                "ih", EVTCHNOP_reset_cont, arg);
1285         break;
1286     }
1287 
1288     case EVTCHNOP_init_control: {
1289         struct evtchn_init_control init_control;
1290         if ( copy_from_guest(&init_control, arg, 1) != 0 )
1291             return -EFAULT;
1292         rc = evtchn_fifo_init_control(&init_control);
1293         if ( !rc && __copy_to_guest(arg, &init_control, 1) )
1294             rc = -EFAULT;
1295         break;
1296     }
1297 
1298     case EVTCHNOP_expand_array: {
1299         struct evtchn_expand_array expand_array;
1300         if ( copy_from_guest(&expand_array, arg, 1) != 0 )
1301             return -EFAULT;
1302         rc = evtchn_fifo_expand_array(&expand_array);
1303         break;
1304     }
1305 
1306     case EVTCHNOP_set_priority: {
1307         struct evtchn_set_priority set_priority;
1308         if ( copy_from_guest(&set_priority, arg, 1) != 0 )
1309             return -EFAULT;
1310         rc = evtchn_set_priority(&set_priority);
1311         break;
1312     }
1313 
1314     default:
1315         rc = -ENOSYS;
1316         break;
1317     }
1318 
1319     return rc;
1320 }
1321 
1322 
alloc_unbound_xen_event_channel(struct domain * ld,unsigned int lvcpu,domid_t remote_domid,xen_event_channel_notification_t notification_fn)1323 int alloc_unbound_xen_event_channel(
1324     struct domain *ld, unsigned int lvcpu, domid_t remote_domid,
1325     xen_event_channel_notification_t notification_fn)
1326 {
1327     struct evtchn *chn;
1328     int            port, rc;
1329 
1330     spin_lock(&ld->event_lock);
1331 
1332     port = rc = get_free_port(ld);
1333     if ( rc < 0 )
1334         goto out;
1335     chn = evtchn_from_port(ld, port);
1336 
1337     rc = xsm_evtchn_unbound(XSM_TARGET, ld, chn, remote_domid);
1338     if ( rc )
1339         goto out;
1340 
1341     evtchn_write_lock(chn);
1342 
1343     chn->state = ECS_UNBOUND;
1344     chn->xen_consumer = get_xen_consumer(notification_fn);
1345     chn->notify_vcpu_id = lvcpu;
1346     chn->u.unbound.remote_domid = remote_domid;
1347 
1348     evtchn_write_unlock(chn);
1349 
1350     write_atomic(&ld->xen_evtchns, ld->xen_evtchns + 1);
1351 
1352  out:
1353     check_free_port(ld, port);
1354     spin_unlock(&ld->event_lock);
1355 
1356     return rc < 0 ? rc : port;
1357 }
1358 
free_xen_event_channel(struct domain * d,int port)1359 void free_xen_event_channel(struct domain *d, int port)
1360 {
1361     if ( !port_is_valid(d, port) )
1362     {
1363         /*
1364          * Make sure ->is_dying is read /after/ ->valid_evtchns, pairing
1365          * with the spin_barrier() and BUG_ON() in evtchn_destroy().
1366          */
1367         smp_rmb();
1368         BUG_ON(!d->is_dying);
1369         return;
1370     }
1371 
1372     evtchn_close(d, port, 0);
1373 }
1374 
1375 
notify_via_xen_event_channel(struct domain * ld,int lport)1376 void notify_via_xen_event_channel(struct domain *ld, int lport)
1377 {
1378     struct evtchn *lchn, *rchn;
1379     struct domain *rd;
1380 
1381     if ( !port_is_valid(ld, lport) )
1382     {
1383         /*
1384          * Make sure ->is_dying is read /after/ ->valid_evtchns, pairing
1385          * with the spin_barrier() and BUG_ON() in evtchn_destroy().
1386          */
1387         smp_rmb();
1388         ASSERT(ld->is_dying);
1389         return;
1390     }
1391 
1392     lchn = evtchn_from_port(ld, lport);
1393 
1394     if ( !evtchn_read_trylock(lchn) )
1395         return;
1396 
1397     if ( likely(lchn->state == ECS_INTERDOMAIN) )
1398     {
1399         ASSERT(consumer_is_xen(lchn));
1400         rd    = lchn->u.interdomain.remote_dom;
1401         rchn  = evtchn_from_port(rd, lchn->u.interdomain.remote_port);
1402         evtchn_port_set_pending(rd, rchn->notify_vcpu_id, rchn);
1403     }
1404 
1405     evtchn_read_unlock(lchn);
1406 }
1407 
evtchn_check_pollers(struct domain * d,unsigned int port)1408 void evtchn_check_pollers(struct domain *d, unsigned int port)
1409 {
1410     struct vcpu *v;
1411     unsigned int vcpuid;
1412 
1413     /* Check if some VCPU might be polling for this event. */
1414     if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
1415         return;
1416 
1417     /* Wake any interested (or potentially interested) pollers. */
1418     for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
1419           vcpuid < d->max_vcpus;
1420           vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) )
1421     {
1422         v = d->vcpu[vcpuid];
1423         if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
1424              test_and_clear_bit(vcpuid, d->poll_mask) )
1425         {
1426             v->poll_evtchn = 0;
1427             vcpu_unblock(v);
1428         }
1429     }
1430 }
1431 
evtchn_init(struct domain * d,unsigned int max_port)1432 int evtchn_init(struct domain *d, unsigned int max_port)
1433 {
1434     evtchn_2l_init(d);
1435     d->max_evtchn_port = min_t(unsigned int, max_port, INT_MAX);
1436 
1437     d->evtchn = alloc_evtchn_bucket(d, 0);
1438     if ( !d->evtchn )
1439         return -ENOMEM;
1440     d->valid_evtchns = EVTCHNS_PER_BUCKET;
1441 
1442     spin_lock_init_prof(d, event_lock);
1443     if ( get_free_port(d) != 0 )
1444     {
1445         free_evtchn_bucket(d, d->evtchn);
1446         return -EINVAL;
1447     }
1448     evtchn_from_port(d, 0)->state = ECS_RESERVED;
1449     write_atomic(&d->active_evtchns, 0);
1450 
1451 #if MAX_VIRT_CPUS > BITS_PER_LONG
1452     d->poll_mask = xzalloc_array(unsigned long, BITS_TO_LONGS(d->max_vcpus));
1453     if ( !d->poll_mask )
1454     {
1455         free_evtchn_bucket(d, d->evtchn);
1456         return -ENOMEM;
1457     }
1458 #endif
1459 
1460     return 0;
1461 }
1462 
evtchn_destroy(struct domain * d)1463 int evtchn_destroy(struct domain *d)
1464 {
1465     unsigned int i;
1466 
1467     /* After this barrier no new event-channel allocations can occur. */
1468     BUG_ON(!d->is_dying);
1469     spin_barrier(&d->event_lock);
1470 
1471     /* Close all existing event channels. */
1472     for ( i = d->valid_evtchns; --i; )
1473     {
1474         evtchn_close(d, i, 0);
1475 
1476         /*
1477          * Avoid preempting when called from domain_create()'s error path,
1478          * and don't check too often (choice of frequency is arbitrary).
1479          */
1480         if ( i && !(i & 0x3f) && d->is_dying != DOMDYING_dead &&
1481              hypercall_preempt_check() )
1482         {
1483             write_atomic(&d->valid_evtchns, i);
1484             return -ERESTART;
1485         }
1486     }
1487 
1488     ASSERT(!d->active_evtchns);
1489 
1490     clear_global_virq_handlers(d);
1491 
1492     evtchn_fifo_destroy(d);
1493 
1494     return 0;
1495 }
1496 
1497 
evtchn_destroy_final(struct domain * d)1498 void evtchn_destroy_final(struct domain *d)
1499 {
1500     unsigned int i, j;
1501 
1502     /* Free all event-channel buckets. */
1503     for ( i = 0; i < NR_EVTCHN_GROUPS; i++ )
1504     {
1505         if ( !d->evtchn_group[i] )
1506             continue;
1507         for ( j = 0; j < BUCKETS_PER_GROUP; j++ )
1508             free_evtchn_bucket(d, d->evtchn_group[i][j]);
1509         xfree(d->evtchn_group[i]);
1510     }
1511     free_evtchn_bucket(d, d->evtchn);
1512 
1513 #if MAX_VIRT_CPUS > BITS_PER_LONG
1514     xfree(d->poll_mask);
1515     d->poll_mask = NULL;
1516 #endif
1517 }
1518 
1519 
evtchn_move_pirqs(struct vcpu * v)1520 void evtchn_move_pirqs(struct vcpu *v)
1521 {
1522     struct domain *d = v->domain;
1523     const cpumask_t *mask = cpumask_of(v->processor);
1524     unsigned int port;
1525     struct evtchn *chn;
1526 
1527     spin_lock(&d->event_lock);
1528     for ( port = v->pirq_evtchn_head; port; port = chn->u.pirq.next_port )
1529     {
1530         chn = evtchn_from_port(d, port);
1531         pirq_set_affinity(d, chn->u.pirq.irq, mask);
1532     }
1533     spin_unlock(&d->event_lock);
1534 }
1535 
1536 
domain_dump_evtchn_info(struct domain * d)1537 static void domain_dump_evtchn_info(struct domain *d)
1538 {
1539     unsigned int port;
1540     int irq;
1541 
1542     printk("Event channel information for domain %d:\n"
1543            "Polling vCPUs: {%*pbl}\n"
1544            "    port [p/m/s]\n", d->domain_id, d->max_vcpus, d->poll_mask);
1545 
1546     spin_lock(&d->event_lock);
1547 
1548     for ( port = 1; port_is_valid(d, port); ++port )
1549     {
1550         const struct evtchn *chn;
1551         char *ssid;
1552 
1553         chn = evtchn_from_port(d, port);
1554         if ( chn->state == ECS_FREE )
1555             continue;
1556 
1557         printk("    %4u [%d/%d/",
1558                port,
1559                evtchn_is_pending(d, chn),
1560                evtchn_is_masked(d, chn));
1561         evtchn_port_print_state(d, chn);
1562         printk("]: s=%d n=%d x=%d",
1563                chn->state, chn->notify_vcpu_id, chn->xen_consumer);
1564 
1565         switch ( chn->state )
1566         {
1567         case ECS_UNBOUND:
1568             printk(" d=%d", chn->u.unbound.remote_domid);
1569             break;
1570         case ECS_INTERDOMAIN:
1571             printk(" d=%d p=%d",
1572                    chn->u.interdomain.remote_dom->domain_id,
1573                    chn->u.interdomain.remote_port);
1574             break;
1575         case ECS_PIRQ:
1576             irq = domain_pirq_to_irq(d, chn->u.pirq.irq);
1577             printk(" p=%d i=%d", chn->u.pirq.irq, irq);
1578             break;
1579         case ECS_VIRQ:
1580             printk(" v=%d", chn->u.virq);
1581             break;
1582         }
1583 
1584         ssid = xsm_show_security_evtchn(d, chn);
1585         if (ssid) {
1586             printk(" Z=%s\n", ssid);
1587             xfree(ssid);
1588         } else {
1589             printk("\n");
1590         }
1591     }
1592 
1593     spin_unlock(&d->event_lock);
1594 }
1595 
dump_evtchn_info(unsigned char key)1596 static void dump_evtchn_info(unsigned char key)
1597 {
1598     struct domain *d;
1599 
1600     printk("'%c' pressed -> dumping event-channel info\n", key);
1601 
1602     rcu_read_lock(&domlist_read_lock);
1603 
1604     for_each_domain ( d )
1605         domain_dump_evtchn_info(d);
1606 
1607     rcu_read_unlock(&domlist_read_lock);
1608 }
1609 
dump_evtchn_info_key_init(void)1610 static int __init dump_evtchn_info_key_init(void)
1611 {
1612     register_keyhandler('e', dump_evtchn_info, "dump evtchn info", 1);
1613     return 0;
1614 }
1615 __initcall(dump_evtchn_info_key_init);
1616 
1617 /*
1618  * Local variables:
1619  * mode: C
1620  * c-file-style: "BSD"
1621  * c-basic-offset: 4
1622  * tab-width: 4
1623  * indent-tabs-mode: nil
1624  * End:
1625  */
1626