1 /*
2 * xenbus.c: static, synchronous, read-only xenbus client for hvmloader.
3 *
4 * Copyright (c) 2009 Tim Deegan, Citrix Systems (R&D) Ltd.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include "util.h"
29 #include "hypercall.h"
30 #include <xen/sched.h>
31 #include <xen/event_channel.h>
32 #include <xen/hvm/params.h>
33 #include <xen/io/xs_wire.h>
34
35 static struct xenstore_domain_interface *rings; /* Shared ring with dom0 */
36 static evtchn_port_t event; /* Event-channel to dom0 */
37 static char payload[XENSTORE_PAYLOAD_MAX + 1]; /* Unmarshalling area */
38
ring_wait(void)39 static void ring_wait(void)
40 {
41 struct shared_info *shinfo = get_shared_info();
42 struct sched_poll poll;
43
44 memset(&poll, 0, sizeof(poll));
45 set_xen_guest_handle(poll.ports, &event);
46 poll.nr_ports = 1;
47
48 while ( !test_and_clear_bit(event, shinfo->evtchn_pending) )
49 hypercall_sched_op(SCHEDOP_poll, &poll);
50 }
51
52 /* Connect our xenbus client to the backend.
53 * Call once, before any other xenbus actions. */
xenbus_setup(void)54 void xenbus_setup(void)
55 {
56 uint64_t val;
57
58 /* Ask Xen where the xenbus shared page is. */
59 if ( hvm_param_get(HVM_PARAM_STORE_PFN, &val) )
60 BUG();
61 rings = (void *) (unsigned long) (val << PAGE_SHIFT);
62
63 /* Ask Xen where the xenbus event channel is. */
64 if ( hvm_param_get(HVM_PARAM_STORE_EVTCHN, &val) )
65 BUG();
66 event = val;
67
68 printf("Xenbus rings @0x%lx, event channel %lu\n",
69 (unsigned long) rings, (unsigned long) event);
70 }
71
72 /* Reset the xenbus connection so the next kernel can start again. */
xenbus_shutdown(void)73 void xenbus_shutdown(void)
74 {
75 struct shared_info *shinfo = get_shared_info();
76 evtchn_send_t send;
77
78 ASSERT(rings != NULL);
79
80 if (rings->server_features & XENSTORE_SERVER_FEATURE_RECONNECTION) {
81 rings->connection = XENSTORE_RECONNECT;
82 send.port = event;
83 hypercall_event_channel_op(EVTCHNOP_send, &send);
84 while (*(volatile uint32_t*)&rings->connection == XENSTORE_RECONNECT)
85 ring_wait ();
86 } else {
87 /* If the backend reads the state while we're erasing it then the
88 * ring state will become corrupted, preventing guest frontends from
89 * connecting. This is rare. To help diagnose the failure, we fill
90 * the ring with XS_INVALID packets. */
91 memset(rings->req, 0xff, XENSTORE_RING_SIZE);
92 memset(rings->rsp, 0xff, XENSTORE_RING_SIZE);
93 rings->req_cons = rings->req_prod = 0;
94 rings->rsp_cons = rings->rsp_prod = 0;
95 }
96 /* Clear the event-channel state too. */
97 memset(shinfo->vcpu_info, 0, sizeof(shinfo->vcpu_info));
98 memset(shinfo->evtchn_pending, 0, sizeof(shinfo->evtchn_pending));
99 memset(shinfo->evtchn_mask, 0, sizeof(shinfo->evtchn_mask));
100
101 rings = NULL;
102 }
103
104 /* Helper functions: copy data in and out of the ring */
ring_write(const char * data,uint32_t len)105 static void ring_write(const char *data, uint32_t len)
106 {
107 uint32_t part, done = 0;
108
109 ASSERT(len <= XENSTORE_PAYLOAD_MAX);
110
111 while ( len )
112 {
113 /* Don't overrun the consumer pointer */
114 while ( (part = (XENSTORE_RING_SIZE - 1) -
115 MASK_XENSTORE_IDX(rings->req_prod - rings->req_cons)) == 0 )
116 ring_wait();
117 /* Don't overrun the end of the ring */
118 if ( part > (XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(rings->req_prod)) )
119 part = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(rings->req_prod);
120 /* Don't write more than we were asked for */
121 if ( part > len )
122 part = len;
123
124 memcpy(rings->req + MASK_XENSTORE_IDX(rings->req_prod),
125 data + done, part);
126 barrier(); /* = wmb before prod write, rmb before next cons read */
127 rings->req_prod += part;
128 len -= part;
129 done += part;
130 }
131 }
132
ring_read(char * data,uint32_t len)133 static void ring_read(char *data, uint32_t len)
134 {
135 uint32_t part, done = 0;
136
137 ASSERT(len <= XENSTORE_PAYLOAD_MAX);
138
139 while ( len )
140 {
141 /* Don't overrun the producer pointer */
142 while ( (part = MASK_XENSTORE_IDX(rings->rsp_prod -
143 rings->rsp_cons)) == 0 )
144 {
145 /*
146 * Don't wait for producer to fill the ring if it is already full.
147 * Condition happens when you write string > 1K into the ring.
148 * eg case prod=1272 cons=248.
149 */
150 if ( rings->rsp_prod - rings->rsp_cons == XENSTORE_RING_SIZE )
151 {
152 part = XENSTORE_RING_SIZE;
153 break;
154 }
155 ring_wait();
156 }
157 /* Don't overrun the end of the ring */
158 if ( part > (XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(rings->rsp_cons)) )
159 part = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(rings->rsp_cons);
160 /* Don't read more than we were asked for */
161 if ( part > len )
162 part = len;
163
164 memcpy(data + done,
165 rings->rsp + MASK_XENSTORE_IDX(rings->rsp_cons), part);
166 barrier(); /* = wmb before cons write, rmb before next prod read */
167 rings->rsp_cons += part;
168 len -= part;
169 done += part;
170 }
171 }
172
173 #define MAX_SEGMENTS 4
174
175 /* Send a request. */
xenbus_send(uint32_t type,...)176 static void xenbus_send(uint32_t type, ...)
177 {
178 struct xsd_sockmsg hdr;
179 va_list ap;
180 struct {
181 const char *data;
182 uint32_t len;
183 } seg[MAX_SEGMENTS];
184 evtchn_send_t send;
185 int i, n;
186
187 /* Not acceptable to use xenbus before setting it up */
188 ASSERT(rings != NULL);
189
190 /* Put the request on the ring */
191 hdr.type = type;
192 hdr.req_id = 0; /* We only ever issue one request at a time */
193 hdr.tx_id = 0; /* We never use transactions */
194 hdr.len = 0;
195
196 va_start(ap, type);
197 for ( i = 0; ; i++ ) {
198 seg[i].data = va_arg(ap, const char *);
199 seg[i].len = va_arg(ap, uint32_t);
200
201 if ( seg[i].data == NULL )
202 break;
203
204 hdr.len += seg[i].len;
205 }
206 n = i;
207 va_end(ap);
208
209 ring_write((char *) &hdr, sizeof hdr);
210 for ( i = 0; i < n; i++ )
211 ring_write(seg[i].data, seg[i].len);
212
213 /* Tell the other end about the request */
214 send.port = event;
215 hypercall_event_channel_op(EVTCHNOP_send, &send);
216 }
217
218 /* Wait for the answer to a previous request.
219 * Returns 0 for success, or an errno for error.
220 * The answer is returned in a static buffer which is only
221 * valid until the next call of xenbus_send(). */
xenbus_recv(uint32_t * reply_len,const char ** reply_data,uint32_t * reply_type)222 static int xenbus_recv(uint32_t *reply_len, const char **reply_data,
223 uint32_t *reply_type)
224 {
225 struct xsd_sockmsg hdr;
226
227 do
228 {
229 /* Pull the reply off the ring */
230 ring_read((char *) &hdr, sizeof(hdr));
231 ring_read(payload, hdr.len);
232 /* For sanity's sake, nul-terminate the answer */
233 payload[hdr.len] = '\0';
234
235 } while ( hdr.type == XS_DEBUG );
236
237 if ( reply_type )
238 *reply_type = hdr.type;
239
240 /* Handle errors */
241 if ( hdr.type == XS_ERROR )
242 {
243 int i;
244
245 *reply_len = 0;
246 for ( i = 0; i < ((sizeof xsd_errors) / (sizeof xsd_errors[0])); i++ )
247 if ( !strcmp(xsd_errors[i].errstring, payload) )
248 return xsd_errors[i].errnum;
249 /* Default error value if we couldn't decode the ASCII error */
250 return EIO;
251 }
252
253 if ( reply_data )
254 *reply_data = payload;
255 if ( reply_len )
256 *reply_len = hdr.len;
257 return 0;
258 }
259
260
261 /* Read a xenstore key. Returns a nul-terminated string (even if the XS
262 * data wasn't nul-terminated) or NULL. The returned string is in a
263 * static buffer, so only valid until the next xenstore/xenbus operation.
264 * If @default_resp is specified, it is returned in preference to a NULL or
265 * empty string received from xenstore.
266 */
xenstore_read(const char * path,const char * default_resp)267 const char *xenstore_read(const char *path, const char *default_resp)
268 {
269 uint32_t len = 0, type = 0;
270 const char *answer = NULL;
271
272 xenbus_send(XS_READ,
273 path, strlen(path),
274 "", 1, /* nul separator */
275 NULL, 0);
276
277 if ( xenbus_recv(&len, &answer, &type) || (type != XS_READ) )
278 answer = NULL;
279
280 if ( (default_resp != NULL) && ((answer == NULL) || (*answer == '\0')) )
281 answer = default_resp;
282
283 /* We know xenbus_recv() nul-terminates its answer, so just pass it on. */
284 return answer;
285 }
286
287 /* Write a xenstore key. @value must be a nul-terminated string. Returns
288 * zero on success or a xenstore error code on failure.
289 */
xenstore_write(const char * path,const char * value)290 int xenstore_write(const char *path, const char *value)
291 {
292 uint32_t len = 0, type = 0;
293 const char *answer = NULL;
294 int ret;
295
296 xenbus_send(XS_WRITE,
297 path, strlen(path),
298 "", 1, /* nul separator */
299 value, strlen(value),
300 NULL, 0);
301
302 ret = xenbus_recv(&len, &answer, &type);
303
304 if ( ret == 0 && ((type != XS_WRITE) || (len != 3) ||
305 !answer || strcmp(answer, "OK")) )
306 ret = EIO;
307
308 return ret;
309 }
310
311 /*
312 * Local variables:
313 * mode: C
314 * c-file-style: "BSD"
315 * c-basic-offset: 4
316 * tab-width: 4
317 * indent-tabs-mode: nil
318 * End:
319 */
320