1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * (C) 2007-2008 Samuel Thibault.
4  * (C) Copyright 2020 EPAM Systems Inc.
5  */
6 #include <blk.h>
7 #include <common.h>
8 #include <dm.h>
9 #include <dm/device-internal.h>
10 #include <malloc.h>
11 #include <part.h>
12 
13 #include <asm/armv8/mmu.h>
14 #include <asm/global_data.h>
15 #include <asm/io.h>
16 #include <asm/xen/system.h>
17 
18 #include <linux/bug.h>
19 #include <linux/compat.h>
20 
21 #include <xen/events.h>
22 #include <xen/gnttab.h>
23 #include <xen/hvm.h>
24 #include <xen/xenbus.h>
25 
26 #include <xen/interface/io/ring.h>
27 #include <xen/interface/io/blkif.h>
28 #include <xen/interface/io/protocols.h>
29 
30 #define DRV_NAME	"pvblock"
31 #define DRV_NAME_BLK	"pvblock_blk"
32 
33 #define O_RDONLY	00
34 #define O_RDWR		02
35 #define WAIT_RING_TO_MS	10
36 
37 struct blkfront_info {
38 	u64 sectors;
39 	unsigned int sector_size;
40 	int mode;
41 	int info;
42 	int barrier;
43 	int flush;
44 };
45 
46 /**
47  * struct blkfront_dev - Struct representing blkfront device
48  * @dom: Domain id
49  * @ring: Front_ring structure
50  * @ring_ref: The grant reference, allowing us to grant access
51  *	      to the ring to the other end/domain
52  * @evtchn: Event channel used to signal ring events
53  * @handle: Events handle
54  * @nodename: Device XenStore path in format "device/vbd/" + @devid
55  * @backend: Backend XenStore path
56  * @info: Private data
57  * @devid: Device id
58  */
59 struct blkfront_dev {
60 	domid_t dom;
61 
62 	struct blkif_front_ring ring;
63 	grant_ref_t ring_ref;
64 	evtchn_port_t evtchn;
65 	blkif_vdev_t handle;
66 
67 	char *nodename;
68 	char *backend;
69 	struct blkfront_info info;
70 	unsigned int devid;
71 	u8 *bounce_buffer;
72 };
73 
74 struct blkfront_plat {
75 	unsigned int devid;
76 };
77 
78 /**
79  * struct blkfront_aiocb - AIO сontrol block
80  * @aio_dev: Blockfront device
81  * @aio_buf: Memory buffer, which must be sector-aligned for
82  *	     @aio_dev sector
83  * @aio_nbytes: Size of AIO, which must be less than @aio_dev
84  *		sector-sized amounts
85  * @aio_offset: Offset, which must not go beyond @aio_dev
86  *		sector-aligned location
87  * @data: Data used to receiving response from ring
88  * @gref: Array of grant references
89  * @n: Number of segments
90  * @aio_cb: Represents one I/O request.
91  */
92 struct blkfront_aiocb {
93 	struct blkfront_dev *aio_dev;
94 	u8 *aio_buf;
95 	size_t aio_nbytes;
96 	off_t aio_offset;
97 	void *data;
98 
99 	grant_ref_t gref[BLKIF_MAX_SEGMENTS_PER_REQUEST];
100 	int n;
101 
102 	void (*aio_cb)(struct blkfront_aiocb *aiocb, int ret);
103 };
104 
105 static void blkfront_sync(struct blkfront_dev *dev);
106 
free_blkfront(struct blkfront_dev * dev)107 static void free_blkfront(struct blkfront_dev *dev)
108 {
109 	mask_evtchn(dev->evtchn);
110 	free(dev->backend);
111 
112 	gnttab_end_access(dev->ring_ref);
113 	free(dev->ring.sring);
114 
115 	unbind_evtchn(dev->evtchn);
116 
117 	free(dev->bounce_buffer);
118 	free(dev->nodename);
119 	free(dev);
120 }
121 
init_blkfront(unsigned int devid,struct blkfront_dev * dev)122 static int init_blkfront(unsigned int devid, struct blkfront_dev *dev)
123 {
124 	xenbus_transaction_t xbt;
125 	char *err = NULL;
126 	char *message = NULL;
127 	struct blkif_sring *s;
128 	int retry = 0;
129 	char *msg = NULL;
130 	char *c;
131 	char nodename[32];
132 	char path[ARRAY_SIZE(nodename) + strlen("/backend-id") + 1];
133 
134 	sprintf(nodename, "device/vbd/%d", devid);
135 
136 	memset(dev, 0, sizeof(*dev));
137 	dev->nodename = strdup(nodename);
138 	dev->devid = devid;
139 
140 	snprintf(path, sizeof(path), "%s/backend-id", nodename);
141 	dev->dom = xenbus_read_integer(path);
142 	evtchn_alloc_unbound(dev->dom, NULL, dev, &dev->evtchn);
143 
144 	s = (struct blkif_sring *)memalign(PAGE_SIZE, PAGE_SIZE);
145 	if (!s) {
146 		printf("Failed to allocate shared ring\n");
147 		goto error;
148 	}
149 
150 	SHARED_RING_INIT(s);
151 	FRONT_RING_INIT(&dev->ring, s, PAGE_SIZE);
152 
153 	dev->ring_ref = gnttab_grant_access(dev->dom, virt_to_pfn(s), 0);
154 
155 again:
156 	err = xenbus_transaction_start(&xbt);
157 	if (err) {
158 		printf("starting transaction\n");
159 		free(err);
160 	}
161 
162 	err = xenbus_printf(xbt, nodename, "ring-ref", "%u", dev->ring_ref);
163 	if (err) {
164 		message = "writing ring-ref";
165 		goto abort_transaction;
166 	}
167 	err = xenbus_printf(xbt, nodename, "event-channel", "%u", dev->evtchn);
168 	if (err) {
169 		message = "writing event-channel";
170 		goto abort_transaction;
171 	}
172 	err = xenbus_printf(xbt, nodename, "protocol", "%s",
173 			    XEN_IO_PROTO_ABI_NATIVE);
174 	if (err) {
175 		message = "writing protocol";
176 		goto abort_transaction;
177 	}
178 
179 	snprintf(path, sizeof(path), "%s/state", nodename);
180 	err = xenbus_switch_state(xbt, path, XenbusStateConnected);
181 	if (err) {
182 		message = "switching state";
183 		goto abort_transaction;
184 	}
185 
186 	err = xenbus_transaction_end(xbt, 0, &retry);
187 	free(err);
188 	if (retry) {
189 		goto again;
190 		printf("completing transaction\n");
191 	}
192 
193 	goto done;
194 
195 abort_transaction:
196 	free(err);
197 	err = xenbus_transaction_end(xbt, 1, &retry);
198 	printf("Abort transaction %s\n", message);
199 	goto error;
200 
201 done:
202 	snprintf(path, sizeof(path), "%s/backend", nodename);
203 	msg = xenbus_read(XBT_NIL, path, &dev->backend);
204 	if (msg) {
205 		printf("Error %s when reading the backend path %s\n",
206 		       msg, path);
207 		goto error;
208 	}
209 
210 	dev->handle = strtoul(strrchr(nodename, '/') + 1, NULL, 0);
211 
212 	{
213 		XenbusState state;
214 		char path[strlen(dev->backend) +
215 			strlen("/feature-flush-cache") + 1];
216 
217 		snprintf(path, sizeof(path), "%s/mode", dev->backend);
218 		msg = xenbus_read(XBT_NIL, path, &c);
219 		if (msg) {
220 			printf("Error %s when reading the mode\n", msg);
221 			goto error;
222 		}
223 		if (*c == 'w')
224 			dev->info.mode = O_RDWR;
225 		else
226 			dev->info.mode = O_RDONLY;
227 		free(c);
228 
229 		snprintf(path, sizeof(path), "%s/state", dev->backend);
230 
231 		msg = NULL;
232 		state = xenbus_read_integer(path);
233 		while (!msg && state < XenbusStateConnected)
234 			msg = xenbus_wait_for_state_change(path, &state);
235 		if (msg || state != XenbusStateConnected) {
236 			printf("backend not available, state=%d\n", state);
237 			goto error;
238 		}
239 
240 		snprintf(path, sizeof(path), "%s/info", dev->backend);
241 		dev->info.info = xenbus_read_integer(path);
242 
243 		snprintf(path, sizeof(path), "%s/sectors", dev->backend);
244 		/*
245 		 * FIXME: read_integer returns an int, so disk size
246 		 * limited to 1TB for now
247 		 */
248 		dev->info.sectors = xenbus_read_integer(path);
249 
250 		snprintf(path, sizeof(path), "%s/sector-size", dev->backend);
251 		dev->info.sector_size = xenbus_read_integer(path);
252 
253 		snprintf(path, sizeof(path), "%s/feature-barrier",
254 			 dev->backend);
255 		dev->info.barrier = xenbus_read_integer(path);
256 
257 		snprintf(path, sizeof(path), "%s/feature-flush-cache",
258 			 dev->backend);
259 		dev->info.flush = xenbus_read_integer(path);
260 	}
261 	unmask_evtchn(dev->evtchn);
262 
263 	dev->bounce_buffer = memalign(dev->info.sector_size,
264 				      dev->info.sector_size);
265 	if (!dev->bounce_buffer) {
266 		printf("Failed to allocate bouncing buffer\n");
267 		goto error;
268 	}
269 
270 	debug("%llu sectors of %u bytes, bounce buffer at %p\n",
271 	      dev->info.sectors, dev->info.sector_size,
272 	      dev->bounce_buffer);
273 
274 	return 0;
275 
276 error:
277 	free(msg);
278 	free(err);
279 	free_blkfront(dev);
280 	return -ENODEV;
281 }
282 
shutdown_blkfront(struct blkfront_dev * dev)283 static void shutdown_blkfront(struct blkfront_dev *dev)
284 {
285 	char *err = NULL, *err2;
286 	XenbusState state;
287 
288 	char path[strlen(dev->backend) + strlen("/state") + 1];
289 	char nodename[strlen(dev->nodename) + strlen("/event-channel") + 1];
290 
291 	debug("Close " DRV_NAME ", device ID %d\n", dev->devid);
292 
293 	blkfront_sync(dev);
294 
295 	snprintf(path, sizeof(path), "%s/state", dev->backend);
296 	snprintf(nodename, sizeof(nodename), "%s/state", dev->nodename);
297 
298 	err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateClosing);
299 	if (err) {
300 		printf("%s: error changing state to %d: %s\n", __func__,
301 		       XenbusStateClosing, err);
302 		goto close;
303 	}
304 
305 	state = xenbus_read_integer(path);
306 	while (!err && state < XenbusStateClosing)
307 		err = xenbus_wait_for_state_change(path, &state);
308 	free(err);
309 
310 	err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateClosed);
311 	if (err) {
312 		printf("%s: error changing state to %d: %s\n", __func__,
313 		       XenbusStateClosed, err);
314 		goto close;
315 	}
316 
317 	state = xenbus_read_integer(path);
318 	while (state < XenbusStateClosed) {
319 		err = xenbus_wait_for_state_change(path, &state);
320 		free(err);
321 	}
322 
323 	err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateInitialising);
324 	if (err) {
325 		printf("%s: error changing state to %d: %s\n", __func__,
326 		       XenbusStateInitialising, err);
327 		goto close;
328 	}
329 
330 	state = xenbus_read_integer(path);
331 	while (!err &&
332 	       (state < XenbusStateInitWait || state >= XenbusStateClosed))
333 		err = xenbus_wait_for_state_change(path, &state);
334 
335 close:
336 	free(err);
337 
338 	snprintf(nodename, sizeof(nodename), "%s/ring-ref", dev->nodename);
339 	err2 = xenbus_rm(XBT_NIL, nodename);
340 	free(err2);
341 	snprintf(nodename, sizeof(nodename), "%s/event-channel", dev->nodename);
342 	err2 = xenbus_rm(XBT_NIL, nodename);
343 	free(err2);
344 
345 	if (!err)
346 		free_blkfront(dev);
347 }
348 
349 /**
350  * blkfront_aio_poll() - AIO polling function.
351  * @dev: Blkfront device
352  *
353  * Here we receive response from the ring and check its status. This happens
354  * until we read all data from the ring. We read the data from consumed pointer
355  * to the response pointer. Then increase consumed pointer to make it clear that
356  * the data has been read.
357  *
358  * Return: Number of consumed bytes.
359  */
blkfront_aio_poll(struct blkfront_dev * dev)360 static int blkfront_aio_poll(struct blkfront_dev *dev)
361 {
362 	RING_IDX rp, cons;
363 	struct blkif_response *rsp;
364 	int more;
365 	int nr_consumed;
366 
367 moretodo:
368 	rp = dev->ring.sring->rsp_prod;
369 	rmb(); /* Ensure we see queued responses up to 'rp'. */
370 	cons = dev->ring.rsp_cons;
371 
372 	nr_consumed = 0;
373 	while ((cons != rp)) {
374 		struct blkfront_aiocb *aiocbp;
375 		int status;
376 
377 		rsp = RING_GET_RESPONSE(&dev->ring, cons);
378 		nr_consumed++;
379 
380 		aiocbp = (void *)(uintptr_t)rsp->id;
381 		status = rsp->status;
382 
383 		switch (rsp->operation) {
384 		case BLKIF_OP_READ:
385 		case BLKIF_OP_WRITE:
386 		{
387 			int j;
388 
389 			if (status != BLKIF_RSP_OKAY)
390 				printf("%s error %d on %s at offset %llu, num bytes %llu\n",
391 				       rsp->operation == BLKIF_OP_READ ?
392 				       "read" : "write",
393 				       status, aiocbp->aio_dev->nodename,
394 				       (unsigned long long)aiocbp->aio_offset,
395 				       (unsigned long long)aiocbp->aio_nbytes);
396 
397 			for (j = 0; j < aiocbp->n; j++)
398 				gnttab_end_access(aiocbp->gref[j]);
399 
400 			break;
401 		}
402 
403 		case BLKIF_OP_WRITE_BARRIER:
404 			if (status != BLKIF_RSP_OKAY)
405 				printf("write barrier error %d\n", status);
406 			break;
407 		case BLKIF_OP_FLUSH_DISKCACHE:
408 			if (status != BLKIF_RSP_OKAY)
409 				printf("flush error %d\n", status);
410 			break;
411 
412 		default:
413 			printf("unrecognized block operation %d response (status %d)\n",
414 			       rsp->operation, status);
415 			break;
416 		}
417 
418 		dev->ring.rsp_cons = ++cons;
419 		/* Nota: callback frees aiocbp itself */
420 		if (aiocbp && aiocbp->aio_cb)
421 			aiocbp->aio_cb(aiocbp, status ? -EIO : 0);
422 		if (dev->ring.rsp_cons != cons)
423 			/* We reentered, we must not continue here */
424 			break;
425 	}
426 
427 	RING_FINAL_CHECK_FOR_RESPONSES(&dev->ring, more);
428 	if (more)
429 		goto moretodo;
430 
431 	return nr_consumed;
432 }
433 
blkfront_wait_slot(struct blkfront_dev * dev)434 static void blkfront_wait_slot(struct blkfront_dev *dev)
435 {
436 	/* Wait for a slot */
437 	if (RING_FULL(&dev->ring)) {
438 		while (true) {
439 			blkfront_aio_poll(dev);
440 			if (!RING_FULL(&dev->ring))
441 				break;
442 			wait_event_timeout(NULL, !RING_FULL(&dev->ring),
443 					   WAIT_RING_TO_MS);
444 		}
445 	}
446 }
447 
448 /**
449  * blkfront_aio_poll() - Issue an aio.
450  * @aiocbp: AIO control block structure
451  * @write: Describes is it read or write operation
452  *	   0 - read
453  *	   1 - write
454  *
455  * We check whether the AIO parameters meet the requirements of the device.
456  * Then receive request from ring and define its arguments. After this we
457  * grant access to the grant references. The last step is notifying about AIO
458  * via event channel.
459  */
blkfront_aio(struct blkfront_aiocb * aiocbp,int write)460 static void blkfront_aio(struct blkfront_aiocb *aiocbp, int write)
461 {
462 	struct blkfront_dev *dev = aiocbp->aio_dev;
463 	struct blkif_request *req;
464 	RING_IDX i;
465 	int notify;
466 	int n, j;
467 	uintptr_t start, end;
468 
469 	/* Can't io at non-sector-aligned location */
470 	BUG_ON(aiocbp->aio_offset & (dev->info.sector_size - 1));
471 	/* Can't io non-sector-sized amounts */
472 	BUG_ON(aiocbp->aio_nbytes & (dev->info.sector_size - 1));
473 	/* Can't io non-sector-aligned buffer */
474 	BUG_ON(((uintptr_t)aiocbp->aio_buf & (dev->info.sector_size - 1)));
475 
476 	start = (uintptr_t)aiocbp->aio_buf & PAGE_MASK;
477 	end = ((uintptr_t)aiocbp->aio_buf + aiocbp->aio_nbytes +
478 	       PAGE_SIZE - 1) & PAGE_MASK;
479 	n = (end - start) / PAGE_SIZE;
480 	aiocbp->n = n;
481 
482 	BUG_ON(n > BLKIF_MAX_SEGMENTS_PER_REQUEST);
483 
484 	blkfront_wait_slot(dev);
485 	i = dev->ring.req_prod_pvt;
486 	req = RING_GET_REQUEST(&dev->ring, i);
487 
488 	req->operation = write ? BLKIF_OP_WRITE : BLKIF_OP_READ;
489 	req->nr_segments = n;
490 	req->handle = dev->handle;
491 	req->id = (uintptr_t)aiocbp;
492 	req->sector_number = aiocbp->aio_offset / dev->info.sector_size;
493 
494 	for (j = 0; j < n; j++) {
495 		req->seg[j].first_sect = 0;
496 		req->seg[j].last_sect = PAGE_SIZE / dev->info.sector_size - 1;
497 	}
498 	req->seg[0].first_sect = ((uintptr_t)aiocbp->aio_buf & ~PAGE_MASK) /
499 		dev->info.sector_size;
500 	req->seg[n - 1].last_sect = (((uintptr_t)aiocbp->aio_buf +
501 		aiocbp->aio_nbytes - 1) & ~PAGE_MASK) / dev->info.sector_size;
502 	for (j = 0; j < n; j++) {
503 		uintptr_t data = start + j * PAGE_SIZE;
504 
505 		if (!write) {
506 			/* Trigger CoW if needed */
507 			*(char *)(data + (req->seg[j].first_sect *
508 					  dev->info.sector_size)) = 0;
509 			barrier();
510 		}
511 		req->seg[j].gref = gnttab_grant_access(dev->dom,
512 						       virt_to_pfn((void *)data),
513 						       write);
514 		aiocbp->gref[j] = req->seg[j].gref;
515 	}
516 
517 	dev->ring.req_prod_pvt = i + 1;
518 
519 	wmb();
520 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->ring, notify);
521 
522 	if (notify)
523 		notify_remote_via_evtchn(dev->evtchn);
524 }
525 
blkfront_aio_cb(struct blkfront_aiocb * aiocbp,int ret)526 static void blkfront_aio_cb(struct blkfront_aiocb *aiocbp, int ret)
527 {
528 	aiocbp->data = (void *)1;
529 	aiocbp->aio_cb = NULL;
530 }
531 
blkfront_io(struct blkfront_aiocb * aiocbp,int write)532 static void blkfront_io(struct blkfront_aiocb *aiocbp, int write)
533 {
534 	aiocbp->aio_cb = blkfront_aio_cb;
535 	blkfront_aio(aiocbp, write);
536 	aiocbp->data = NULL;
537 
538 	while (true) {
539 		blkfront_aio_poll(aiocbp->aio_dev);
540 		if (aiocbp->data)
541 			break;
542 		cpu_relax();
543 	}
544 }
545 
blkfront_push_operation(struct blkfront_dev * dev,u8 op,uint64_t id)546 static void blkfront_push_operation(struct blkfront_dev *dev, u8 op,
547 				    uint64_t id)
548 {
549 	struct blkif_request *req;
550 	int notify, i;
551 
552 	blkfront_wait_slot(dev);
553 	i = dev->ring.req_prod_pvt;
554 	req = RING_GET_REQUEST(&dev->ring, i);
555 	req->operation = op;
556 	req->nr_segments = 0;
557 	req->handle = dev->handle;
558 	req->id = id;
559 	req->sector_number = 0;
560 	dev->ring.req_prod_pvt = i + 1;
561 	wmb();
562 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->ring, notify);
563 	if (notify)
564 		notify_remote_via_evtchn(dev->evtchn);
565 }
566 
blkfront_sync(struct blkfront_dev * dev)567 static void blkfront_sync(struct blkfront_dev *dev)
568 {
569 	if (dev->info.mode == O_RDWR) {
570 		if (dev->info.barrier == 1)
571 			blkfront_push_operation(dev,
572 						BLKIF_OP_WRITE_BARRIER, 0);
573 
574 		if (dev->info.flush == 1)
575 			blkfront_push_operation(dev,
576 						BLKIF_OP_FLUSH_DISKCACHE, 0);
577 	}
578 
579 	while (true) {
580 		blkfront_aio_poll(dev);
581 		if (RING_FREE_REQUESTS(&dev->ring) == RING_SIZE(&dev->ring))
582 			break;
583 		cpu_relax();
584 	}
585 }
586 
587 /**
588  * pvblock_iop() - Issue an aio.
589  * @udev: Pvblock device
590  * @blknr: Block number to read from / write to
591  * @blkcnt: Amount of blocks to read / write
592  * @buffer: Memory buffer with data to be read / write
593  * @write: Describes is it read or write operation
594  *	   0 - read
595  *	   1 - write
596  *
597  * Depending on the operation - reading or writing, data is read / written from the
598  * specified address (@buffer) to the sector (@blknr).
599  */
pvblock_iop(struct udevice * udev,lbaint_t blknr,lbaint_t blkcnt,void * buffer,int write)600 static ulong pvblock_iop(struct udevice *udev, lbaint_t blknr,
601 			 lbaint_t blkcnt, void *buffer, int write)
602 {
603 	struct blkfront_dev *blk_dev = dev_get_priv(udev);
604 	struct blk_desc *desc = dev_get_uclass_plat(udev);
605 	struct blkfront_aiocb aiocb;
606 	lbaint_t blocks_todo;
607 	bool unaligned;
608 
609 	if (blkcnt == 0)
610 		return 0;
611 
612 	if ((blknr + blkcnt) > desc->lba) {
613 		printf(DRV_NAME ": block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
614 		       blknr + blkcnt, desc->lba);
615 		return 0;
616 	}
617 
618 	unaligned = (uintptr_t)buffer & (blk_dev->info.sector_size - 1);
619 
620 	aiocb.aio_dev = blk_dev;
621 	aiocb.aio_offset = blknr * desc->blksz;
622 	aiocb.aio_cb = NULL;
623 	aiocb.data = NULL;
624 	blocks_todo = blkcnt;
625 	do {
626 		aiocb.aio_buf = unaligned ? blk_dev->bounce_buffer : buffer;
627 
628 		if (write && unaligned)
629 			memcpy(blk_dev->bounce_buffer, buffer, desc->blksz);
630 
631 		aiocb.aio_nbytes = unaligned ? desc->blksz :
632 			min((size_t)(BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE),
633 			    (size_t)(blocks_todo * desc->blksz));
634 
635 		blkfront_io(&aiocb, write);
636 
637 		if (!write && unaligned)
638 			memcpy(buffer, blk_dev->bounce_buffer, desc->blksz);
639 
640 		aiocb.aio_offset += aiocb.aio_nbytes;
641 		buffer += aiocb.aio_nbytes;
642 		blocks_todo -= aiocb.aio_nbytes / desc->blksz;
643 	} while (blocks_todo > 0);
644 
645 	return blkcnt;
646 }
647 
pvblock_blk_read(struct udevice * udev,lbaint_t blknr,lbaint_t blkcnt,void * buffer)648 ulong pvblock_blk_read(struct udevice *udev, lbaint_t blknr, lbaint_t blkcnt,
649 		       void *buffer)
650 {
651 	return pvblock_iop(udev, blknr, blkcnt, buffer, 0);
652 }
653 
pvblock_blk_write(struct udevice * udev,lbaint_t blknr,lbaint_t blkcnt,const void * buffer)654 ulong pvblock_blk_write(struct udevice *udev, lbaint_t blknr, lbaint_t blkcnt,
655 			const void *buffer)
656 {
657 	return pvblock_iop(udev, blknr, blkcnt, (void *)buffer, 1);
658 }
659 
pvblock_blk_bind(struct udevice * udev)660 static int pvblock_blk_bind(struct udevice *udev)
661 {
662 	struct blk_desc *desc = dev_get_uclass_plat(udev);
663 	int devnum;
664 
665 	desc->if_type = IF_TYPE_PVBLOCK;
666 	/*
667 	 * Initialize the devnum to -ENODEV. This is to make sure that
668 	 * blk_next_free_devnum() works as expected, since the default
669 	 * value 0 is a valid devnum.
670 	 */
671 	desc->devnum = -ENODEV;
672 	devnum = blk_next_free_devnum(IF_TYPE_PVBLOCK);
673 	if (devnum < 0)
674 		return devnum;
675 	desc->devnum = devnum;
676 	desc->part_type = PART_TYPE_UNKNOWN;
677 	desc->bdev = udev;
678 
679 	strncpy(desc->vendor, "Xen", sizeof(desc->vendor));
680 	strncpy(desc->revision, "1", sizeof(desc->revision));
681 	strncpy(desc->product, "Virtual disk", sizeof(desc->product));
682 
683 	return 0;
684 }
685 
pvblock_blk_probe(struct udevice * udev)686 static int pvblock_blk_probe(struct udevice *udev)
687 {
688 	struct blkfront_dev *blk_dev = dev_get_priv(udev);
689 	struct blkfront_plat *plat = dev_get_plat(udev);
690 	struct blk_desc *desc = dev_get_uclass_plat(udev);
691 	int ret, devid;
692 
693 	devid = plat->devid;
694 	free(plat);
695 
696 	ret = init_blkfront(devid, blk_dev);
697 	if (ret < 0)
698 		return ret;
699 
700 	desc->blksz = blk_dev->info.sector_size;
701 	desc->lba = blk_dev->info.sectors;
702 	desc->log2blksz = LOG2(blk_dev->info.sector_size);
703 
704 	return 0;
705 }
706 
pvblock_blk_remove(struct udevice * udev)707 static int pvblock_blk_remove(struct udevice *udev)
708 {
709 	struct blkfront_dev *blk_dev = dev_get_priv(udev);
710 
711 	shutdown_blkfront(blk_dev);
712 	return 0;
713 }
714 
715 static const struct blk_ops pvblock_blk_ops = {
716 	.read	= pvblock_blk_read,
717 	.write	= pvblock_blk_write,
718 };
719 
720 U_BOOT_DRIVER(pvblock_blk) = {
721 	.name			= DRV_NAME_BLK,
722 	.id			= UCLASS_BLK,
723 	.ops			= &pvblock_blk_ops,
724 	.bind			= pvblock_blk_bind,
725 	.probe			= pvblock_blk_probe,
726 	.remove			= pvblock_blk_remove,
727 	.priv_auto	= sizeof(struct blkfront_dev),
728 	.flags			= DM_FLAG_OS_PREPARE,
729 };
730 
731 /*******************************************************************************
732  * Para-virtual block device class
733  *******************************************************************************/
734 
735 typedef int (*enum_vbd_callback)(struct udevice *parent, unsigned int devid);
736 
on_new_vbd(struct udevice * parent,unsigned int devid)737 static int on_new_vbd(struct udevice *parent, unsigned int devid)
738 {
739 	struct driver_info info;
740 	struct udevice *udev;
741 	struct blkfront_plat *plat;
742 	int ret;
743 
744 	debug("New " DRV_NAME_BLK ", device ID %d\n", devid);
745 
746 	plat = malloc(sizeof(struct blkfront_plat));
747 	if (!plat) {
748 		printf("Failed to allocate platform data\n");
749 		return -ENOMEM;
750 	}
751 
752 	plat->devid = devid;
753 
754 	info.name = DRV_NAME_BLK;
755 	info.plat = plat;
756 
757 	ret = device_bind_by_name(parent, false, &info, &udev);
758 	if (ret < 0) {
759 		printf("Failed to bind " DRV_NAME_BLK " to device with ID %d, ret: %d\n",
760 		       devid, ret);
761 		free(plat);
762 	}
763 	return ret;
764 }
765 
xenbus_enumerate_vbd(struct udevice * udev,enum_vbd_callback clb)766 static int xenbus_enumerate_vbd(struct udevice *udev, enum_vbd_callback clb)
767 {
768 	char **dirs, *msg;
769 	int i, ret;
770 
771 	msg = xenbus_ls(XBT_NIL, "device/vbd", &dirs);
772 	if (msg) {
773 		printf("Failed to read device/vbd directory: %s\n", msg);
774 		free(msg);
775 		return -ENODEV;
776 	}
777 
778 	for (i = 0; dirs[i]; i++) {
779 		int devid;
780 
781 		sscanf(dirs[i], "%d", &devid);
782 		ret = clb(udev, devid);
783 		if (ret < 0)
784 			goto fail;
785 
786 		free(dirs[i]);
787 	}
788 	ret = 0;
789 
790 fail:
791 	for (; dirs[i]; i++)
792 		free(dirs[i]);
793 	free(dirs);
794 	return ret;
795 }
796 
print_pvblock_devices(void)797 static void print_pvblock_devices(void)
798 {
799 	struct udevice *udev;
800 	bool first = true;
801 	const char *class_name;
802 
803 	class_name = uclass_get_name(UCLASS_PVBLOCK);
804 	for (blk_first_device(IF_TYPE_PVBLOCK, &udev); udev;
805 	     blk_next_device(&udev), first = false) {
806 		struct blk_desc *desc = dev_get_uclass_plat(udev);
807 
808 		if (!first)
809 			puts(", ");
810 		printf("%s: %d", class_name, desc->devnum);
811 	}
812 	printf("\n");
813 }
814 
pvblock_init(void)815 void pvblock_init(void)
816 {
817 	struct driver_info info;
818 	struct udevice *udev;
819 	struct uclass *uc;
820 	int ret;
821 
822 	/*
823 	 * At this point Xen drivers have already initialized,
824 	 * so we can instantiate the class driver and enumerate
825 	 * virtual block devices.
826 	 */
827 	info.name = DRV_NAME;
828 	ret = device_bind_by_name(gd->dm_root, false, &info, &udev);
829 	if (ret < 0)
830 		printf("Failed to bind " DRV_NAME ", ret: %d\n", ret);
831 
832 	/* Bootstrap virtual block devices class driver */
833 	ret = uclass_get(UCLASS_PVBLOCK, &uc);
834 	if (ret)
835 		return;
836 	uclass_foreach_dev_probe(UCLASS_PVBLOCK, udev);
837 
838 	print_pvblock_devices();
839 }
840 
pvblock_probe(struct udevice * udev)841 static int pvblock_probe(struct udevice *udev)
842 {
843 	struct uclass *uc;
844 	int ret;
845 
846 	if (xenbus_enumerate_vbd(udev, on_new_vbd) < 0)
847 		return -ENODEV;
848 
849 	ret = uclass_get(UCLASS_BLK, &uc);
850 	if (ret)
851 		return ret;
852 	uclass_foreach_dev_probe(UCLASS_BLK, udev) {
853 		if (_ret)
854 			return _ret;
855 	};
856 	return 0;
857 }
858 
859 U_BOOT_DRIVER(pvblock_drv) = {
860 	.name		= DRV_NAME,
861 	.id		= UCLASS_PVBLOCK,
862 	.probe		= pvblock_probe,
863 };
864 
865 UCLASS_DRIVER(pvblock) = {
866 	.name		= DRV_NAME,
867 	.id		= UCLASS_PVBLOCK,
868 };
869