1 /******************************************************************************
2 * event.h
3 *
4 * A nice interface for passing asynchronous events to guest OSes.
5 *
6 * Copyright (c) 2002-2006, K A Fraser
7 */
8
9 #ifndef __XEN_EVENT_H__
10 #define __XEN_EVENT_H__
11
12 #include <xen/sched.h>
13 #include <xen/smp.h>
14 #include <xen/softirq.h>
15 #include <xen/bitops.h>
16 #include <xen/nospec.h>
17 #include <asm/event.h>
18
19 /*
20 * send_guest_vcpu_virq: Notify guest via a per-VCPU VIRQ.
21 * @v: VCPU to which virtual IRQ should be sent
22 * @virq: Virtual IRQ number (VIRQ_*)
23 */
24 void send_guest_vcpu_virq(struct vcpu *v, uint32_t virq);
25
26 /*
27 * send_global_virq: Notify the domain handling a global VIRQ.
28 * @virq: Virtual IRQ number (VIRQ_*)
29 */
30 void send_global_virq(uint32_t virq);
31
32 /*
33 * send_guest_global_virq:
34 * @d: Domain to which VIRQ should be sent
35 * @virq: Virtual IRQ number (VIRQ_*), must be global
36 */
37 void send_guest_global_virq(struct domain *d, uint32_t virq);
38
39 /*
40 * sent_global_virq_handler: Set a global VIRQ handler.
41 * @d: New target domain for this VIRQ
42 * @virq: Virtual IRQ number (VIRQ_*), must be global
43 */
44 int set_global_virq_handler(struct domain *d, uint32_t virq);
45
46 /*
47 * send_guest_pirq:
48 * @d: Domain to which physical IRQ should be sent
49 * @pirq: Physical IRQ number
50 */
51 void send_guest_pirq(struct domain *, const struct pirq *);
52
53 /* Send a notification from a given domain's event-channel port. */
54 int evtchn_send(struct domain *d, unsigned int lport);
55
56 /* Bind a local event-channel port to the specified VCPU. */
57 long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id);
58
59 /* Bind a VIRQ. */
60 int evtchn_bind_virq(evtchn_bind_virq_t *bind, evtchn_port_t port);
61
62 /* Get the status of an event channel port. */
63 int evtchn_status(evtchn_status_t *status);
64
65 /* Close an event channel. */
66 int evtchn_close(struct domain *d1, int port1, bool guest);
67
68 /* Free an event channel. */
69 void evtchn_free(struct domain *d, struct evtchn *chn);
70
71 /* Allocate a specific event channel port. */
72 int evtchn_allocate_port(struct domain *d, unsigned int port);
73
74 /* Unmask a local event-channel port. */
75 int evtchn_unmask(unsigned int port);
76
77 /* Move all PIRQs after a vCPU was moved to another pCPU. */
78 void evtchn_move_pirqs(struct vcpu *v);
79
80 /* Allocate/free a Xen-attached event channel port. */
81 typedef void (*xen_event_channel_notification_t)(
82 struct vcpu *v, unsigned int port);
83 int alloc_unbound_xen_event_channel(
84 struct domain *ld, unsigned int lvcpu, domid_t remote_domid,
85 xen_event_channel_notification_t notification_fn);
86 void free_xen_event_channel(struct domain *d, int port);
87
88 /* Query if event channel is in use by the guest */
89 int guest_enabled_event(struct vcpu *v, uint32_t virq);
90
91 /* Notify remote end of a Xen-attached event channel.*/
92 void notify_via_xen_event_channel(struct domain *ld, int lport);
93
94 /*
95 * Internal event channel object storage.
96 *
97 * The objects (struct evtchn) are indexed using a two level scheme of
98 * groups and buckets. Each group is a page of bucket pointers. Each
99 * bucket is a page-sized array of struct evtchn's.
100 *
101 * The first bucket is directly accessed via d->evtchn.
102 */
103 #define group_from_port(d, p) \
104 array_access_nospec((d)->evtchn_group, (p) / EVTCHNS_PER_GROUP)
105 #define bucket_from_port(d, p) \
106 ((group_from_port(d, p))[((p) % EVTCHNS_PER_GROUP) / EVTCHNS_PER_BUCKET])
107
max_evtchns(const struct domain * d)108 static inline unsigned int max_evtchns(const struct domain *d)
109 {
110 return d->evtchn_fifo ? EVTCHN_FIFO_NR_CHANNELS
111 : BITS_PER_EVTCHN_WORD(d) * BITS_PER_EVTCHN_WORD(d);
112 }
113
evtchn_read_lock(struct evtchn * evtchn)114 static inline void evtchn_read_lock(struct evtchn *evtchn)
115 {
116 read_lock(&evtchn->lock);
117 }
118
evtchn_read_trylock(struct evtchn * evtchn)119 static inline bool evtchn_read_trylock(struct evtchn *evtchn)
120 {
121 return read_trylock(&evtchn->lock);
122 }
123
evtchn_read_unlock(struct evtchn * evtchn)124 static inline void evtchn_read_unlock(struct evtchn *evtchn)
125 {
126 read_unlock(&evtchn->lock);
127 }
128
port_is_valid(struct domain * d,unsigned int p)129 static inline bool_t port_is_valid(struct domain *d, unsigned int p)
130 {
131 if ( p >= read_atomic(&d->valid_evtchns) )
132 return false;
133
134 /*
135 * The caller will usually access the event channel afterwards and
136 * may be done without taking the per-domain lock. The barrier is
137 * going in pair the smp_wmb() barrier in evtchn_allocate_port().
138 */
139 smp_rmb();
140
141 return true;
142 }
143
evtchn_from_port(struct domain * d,unsigned int p)144 static inline struct evtchn *evtchn_from_port(struct domain *d, unsigned int p)
145 {
146 if ( p < EVTCHNS_PER_BUCKET )
147 return &d->evtchn[array_index_nospec(p, EVTCHNS_PER_BUCKET)];
148 return bucket_from_port(d, p) + (p % EVTCHNS_PER_BUCKET);
149 }
150
151 /*
152 * "usable" as in "by a guest", i.e. Xen consumed channels are assumed to be
153 * taken care of separately where used for Xen's internal purposes.
154 */
evtchn_usable(const struct evtchn * evtchn)155 static bool evtchn_usable(const struct evtchn *evtchn)
156 {
157 if ( evtchn->xen_consumer )
158 return false;
159
160 #ifdef arch_evtchn_is_special
161 if ( arch_evtchn_is_special(evtchn) )
162 return true;
163 #endif
164
165 BUILD_BUG_ON(ECS_FREE > ECS_RESERVED);
166 return evtchn->state > ECS_RESERVED;
167 }
168
169 /* Wait on a Xen-attached event channel. */
170 #define wait_on_xen_event_channel(port, condition) \
171 do { \
172 if ( condition ) \
173 break; \
174 set_bit(_VPF_blocked_in_xen, ¤t->pause_flags); \
175 smp_mb(); /* set blocked status /then/ re-evaluate condition */ \
176 if ( condition ) \
177 { \
178 clear_bit(_VPF_blocked_in_xen, ¤t->pause_flags); \
179 break; \
180 } \
181 raise_softirq(SCHEDULE_SOFTIRQ); \
182 do_softirq(); \
183 } while ( 0 )
184
185 #define prepare_wait_on_xen_event_channel(port) \
186 do { \
187 set_bit(_VPF_blocked_in_xen, ¤t->pause_flags); \
188 raise_softirq(SCHEDULE_SOFTIRQ); \
189 smp_mb(); /* set blocked status /then/ caller does his work */ \
190 } while ( 0 )
191
192 void evtchn_check_pollers(struct domain *d, unsigned int port);
193
194 void evtchn_2l_init(struct domain *d);
195
196 /* Close all event channels and reset to 2-level ABI. */
197 int evtchn_reset(struct domain *d, bool resuming);
198
199 /*
200 * Low-level event channel port ops.
201 *
202 * All hooks have to be called with a lock held which prevents the channel
203 * from changing state. This may be the domain event lock, the per-channel
204 * lock, or in the case of sending interdomain events also the other side's
205 * per-channel lock. Exceptions apply in certain cases for the PV shim.
206 */
207 struct evtchn_port_ops {
208 void (*init)(struct domain *d, struct evtchn *evtchn);
209 void (*set_pending)(struct vcpu *v, struct evtchn *evtchn);
210 void (*clear_pending)(struct domain *d, struct evtchn *evtchn);
211 void (*unmask)(struct domain *d, struct evtchn *evtchn);
212 bool (*is_pending)(const struct domain *d, const struct evtchn *evtchn);
213 bool (*is_masked)(const struct domain *d, const struct evtchn *evtchn);
214 /*
215 * Is the port unavailable because it's still being cleaned up
216 * after being closed?
217 */
218 bool (*is_busy)(const struct domain *d, const struct evtchn *evtchn);
219 int (*set_priority)(struct domain *d, struct evtchn *evtchn,
220 unsigned int priority);
221 void (*print_state)(struct domain *d, const struct evtchn *evtchn);
222 };
223
evtchn_port_init(struct domain * d,struct evtchn * evtchn)224 static inline void evtchn_port_init(struct domain *d, struct evtchn *evtchn)
225 {
226 if ( d->evtchn_port_ops->init )
227 d->evtchn_port_ops->init(d, evtchn);
228 }
229
evtchn_port_set_pending(struct domain * d,unsigned int vcpu_id,struct evtchn * evtchn)230 static inline void evtchn_port_set_pending(struct domain *d,
231 unsigned int vcpu_id,
232 struct evtchn *evtchn)
233 {
234 if ( evtchn_usable(evtchn) )
235 d->evtchn_port_ops->set_pending(d->vcpu[vcpu_id], evtchn);
236 }
237
evtchn_port_clear_pending(struct domain * d,struct evtchn * evtchn)238 static inline void evtchn_port_clear_pending(struct domain *d,
239 struct evtchn *evtchn)
240 {
241 if ( evtchn_usable(evtchn) )
242 d->evtchn_port_ops->clear_pending(d, evtchn);
243 }
244
evtchn_port_unmask(struct domain * d,struct evtchn * evtchn)245 static inline void evtchn_port_unmask(struct domain *d,
246 struct evtchn *evtchn)
247 {
248 if ( evtchn_usable(evtchn) )
249 d->evtchn_port_ops->unmask(d, evtchn);
250 }
251
evtchn_is_pending(const struct domain * d,const struct evtchn * evtchn)252 static inline bool evtchn_is_pending(const struct domain *d,
253 const struct evtchn *evtchn)
254 {
255 return evtchn_usable(evtchn) && d->evtchn_port_ops->is_pending(d, evtchn);
256 }
257
evtchn_port_is_pending(struct domain * d,evtchn_port_t port)258 static inline bool evtchn_port_is_pending(struct domain *d, evtchn_port_t port)
259 {
260 struct evtchn *evtchn = evtchn_from_port(d, port);
261 bool rc;
262
263 evtchn_read_lock(evtchn);
264 rc = evtchn_is_pending(d, evtchn);
265 evtchn_read_unlock(evtchn);
266
267 return rc;
268 }
269
evtchn_is_masked(const struct domain * d,const struct evtchn * evtchn)270 static inline bool evtchn_is_masked(const struct domain *d,
271 const struct evtchn *evtchn)
272 {
273 return !evtchn_usable(evtchn) || d->evtchn_port_ops->is_masked(d, evtchn);
274 }
275
evtchn_port_is_masked(struct domain * d,evtchn_port_t port)276 static inline bool evtchn_port_is_masked(struct domain *d, evtchn_port_t port)
277 {
278 struct evtchn *evtchn = evtchn_from_port(d, port);
279 bool rc;
280
281 evtchn_read_lock(evtchn);
282
283 rc = evtchn_is_masked(d, evtchn);
284
285 evtchn_read_unlock(evtchn);
286
287 return rc;
288 }
289
evtchn_is_busy(const struct domain * d,const struct evtchn * evtchn)290 static inline bool evtchn_is_busy(const struct domain *d,
291 const struct evtchn *evtchn)
292 {
293 return d->evtchn_port_ops->is_busy &&
294 d->evtchn_port_ops->is_busy(d, evtchn);
295 }
296
evtchn_port_set_priority(struct domain * d,struct evtchn * evtchn,unsigned int priority)297 static inline int evtchn_port_set_priority(struct domain *d,
298 struct evtchn *evtchn,
299 unsigned int priority)
300 {
301 if ( !d->evtchn_port_ops->set_priority )
302 return -ENOSYS;
303 if ( !evtchn_usable(evtchn) )
304 return -EACCES;
305 return d->evtchn_port_ops->set_priority(d, evtchn, priority);
306 }
307
evtchn_port_print_state(struct domain * d,const struct evtchn * evtchn)308 static inline void evtchn_port_print_state(struct domain *d,
309 const struct evtchn *evtchn)
310 {
311 d->evtchn_port_ops->print_state(d, evtchn);
312 }
313
314 #endif /* __XEN_EVENT_H__ */
315