1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Driver for s390 chsc subchannels
4 *
5 * Copyright IBM Corp. 2008, 2011
6 *
7 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
8 *
9 */
10
11 #include <linux/slab.h>
12 #include <linux/compat.h>
13 #include <linux/device.h>
14 #include <linux/module.h>
15 #include <linux/uaccess.h>
16 #include <linux/miscdevice.h>
17 #include <linux/kernel_stat.h>
18
19 #include <asm/cio.h>
20 #include <asm/chsc.h>
21 #include <asm/isc.h>
22
23 #include "cio.h"
24 #include "cio_debug.h"
25 #include "css.h"
26 #include "chsc_sch.h"
27 #include "ioasm.h"
28
29 static debug_info_t *chsc_debug_msg_id;
30 static debug_info_t *chsc_debug_log_id;
31
32 static struct chsc_request *on_close_request;
33 static struct chsc_async_area *on_close_chsc_area;
34 static DEFINE_MUTEX(on_close_mutex);
35
36 #define CHSC_MSG(imp, args...) do { \
37 debug_sprintf_event(chsc_debug_msg_id, imp , ##args); \
38 } while (0)
39
40 #define CHSC_LOG(imp, txt) do { \
41 debug_text_event(chsc_debug_log_id, imp , txt); \
42 } while (0)
43
CHSC_LOG_HEX(int level,void * data,int length)44 static void CHSC_LOG_HEX(int level, void *data, int length)
45 {
46 debug_event(chsc_debug_log_id, level, data, length);
47 }
48
49 MODULE_AUTHOR("IBM Corporation");
50 MODULE_DESCRIPTION("driver for s390 chsc subchannels");
51 MODULE_LICENSE("GPL");
52
chsc_subchannel_irq(struct subchannel * sch)53 static void chsc_subchannel_irq(struct subchannel *sch)
54 {
55 struct chsc_private *private = dev_get_drvdata(&sch->dev);
56 struct chsc_request *request = private->request;
57 struct irb *irb = this_cpu_ptr(&cio_irb);
58
59 CHSC_LOG(4, "irb");
60 CHSC_LOG_HEX(4, irb, sizeof(*irb));
61 inc_irq_stat(IRQIO_CSC);
62
63 /* Copy irb to provided request and set done. */
64 if (!request) {
65 CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n",
66 sch->schid.ssid, sch->schid.sch_no);
67 return;
68 }
69 private->request = NULL;
70 memcpy(&request->irb, irb, sizeof(*irb));
71 cio_update_schib(sch);
72 complete(&request->completion);
73 put_device(&sch->dev);
74 }
75
chsc_subchannel_probe(struct subchannel * sch)76 static int chsc_subchannel_probe(struct subchannel *sch)
77 {
78 struct chsc_private *private;
79 int ret;
80
81 CHSC_MSG(6, "Detected chsc subchannel 0.%x.%04x\n",
82 sch->schid.ssid, sch->schid.sch_no);
83 sch->isc = CHSC_SCH_ISC;
84 private = kzalloc(sizeof(*private), GFP_KERNEL);
85 if (!private)
86 return -ENOMEM;
87 dev_set_drvdata(&sch->dev, private);
88 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
89 if (ret) {
90 CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n",
91 sch->schid.ssid, sch->schid.sch_no, ret);
92 dev_set_drvdata(&sch->dev, NULL);
93 kfree(private);
94 } else {
95 if (dev_get_uevent_suppress(&sch->dev)) {
96 dev_set_uevent_suppress(&sch->dev, 0);
97 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
98 }
99 }
100 return ret;
101 }
102
chsc_subchannel_remove(struct subchannel * sch)103 static void chsc_subchannel_remove(struct subchannel *sch)
104 {
105 struct chsc_private *private;
106
107 cio_disable_subchannel(sch);
108 private = dev_get_drvdata(&sch->dev);
109 dev_set_drvdata(&sch->dev, NULL);
110 if (private->request) {
111 complete(&private->request->completion);
112 put_device(&sch->dev);
113 }
114 kfree(private);
115 }
116
chsc_subchannel_shutdown(struct subchannel * sch)117 static void chsc_subchannel_shutdown(struct subchannel *sch)
118 {
119 cio_disable_subchannel(sch);
120 }
121
122 static struct css_device_id chsc_subchannel_ids[] = {
123 { .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, },
124 { /* end of list */ },
125 };
126 MODULE_DEVICE_TABLE(css, chsc_subchannel_ids);
127
128 static struct css_driver chsc_subchannel_driver = {
129 .drv = {
130 .owner = THIS_MODULE,
131 .name = "chsc_subchannel",
132 },
133 .subchannel_type = chsc_subchannel_ids,
134 .irq = chsc_subchannel_irq,
135 .probe = chsc_subchannel_probe,
136 .remove = chsc_subchannel_remove,
137 .shutdown = chsc_subchannel_shutdown,
138 };
139
chsc_init_dbfs(void)140 static int __init chsc_init_dbfs(void)
141 {
142 chsc_debug_msg_id = debug_register("chsc_msg", 8, 1, 4 * sizeof(long));
143 if (!chsc_debug_msg_id)
144 goto out;
145 debug_register_view(chsc_debug_msg_id, &debug_sprintf_view);
146 debug_set_level(chsc_debug_msg_id, 2);
147 chsc_debug_log_id = debug_register("chsc_log", 16, 1, 16);
148 if (!chsc_debug_log_id)
149 goto out;
150 debug_register_view(chsc_debug_log_id, &debug_hex_ascii_view);
151 debug_set_level(chsc_debug_log_id, 2);
152 return 0;
153 out:
154 debug_unregister(chsc_debug_msg_id);
155 return -ENOMEM;
156 }
157
chsc_remove_dbfs(void)158 static void chsc_remove_dbfs(void)
159 {
160 debug_unregister(chsc_debug_log_id);
161 debug_unregister(chsc_debug_msg_id);
162 }
163
chsc_init_sch_driver(void)164 static int __init chsc_init_sch_driver(void)
165 {
166 return css_driver_register(&chsc_subchannel_driver);
167 }
168
chsc_cleanup_sch_driver(void)169 static void chsc_cleanup_sch_driver(void)
170 {
171 css_driver_unregister(&chsc_subchannel_driver);
172 }
173
174 static DEFINE_SPINLOCK(chsc_lock);
175
chsc_subchannel_match_next_free(struct device * dev,const void * data)176 static int chsc_subchannel_match_next_free(struct device *dev, const void *data)
177 {
178 struct subchannel *sch = to_subchannel(dev);
179
180 return sch->schib.pmcw.ena && !scsw_fctl(&sch->schib.scsw);
181 }
182
chsc_get_next_subchannel(struct subchannel * sch)183 static struct subchannel *chsc_get_next_subchannel(struct subchannel *sch)
184 {
185 struct device *dev;
186
187 dev = driver_find_device(&chsc_subchannel_driver.drv,
188 sch ? &sch->dev : NULL, NULL,
189 chsc_subchannel_match_next_free);
190 return dev ? to_subchannel(dev) : NULL;
191 }
192
193 /**
194 * chsc_async() - try to start a chsc request asynchronously
195 * @chsc_area: request to be started
196 * @request: request structure to associate
197 *
198 * Tries to start a chsc request on one of the existing chsc subchannels.
199 * Returns:
200 * %0 if the request was performed synchronously
201 * %-EINPROGRESS if the request was successfully started
202 * %-EBUSY if all chsc subchannels are busy
203 * %-ENODEV if no chsc subchannels are available
204 * Context:
205 * interrupts disabled, chsc_lock held
206 */
chsc_async(struct chsc_async_area * chsc_area,struct chsc_request * request)207 static int chsc_async(struct chsc_async_area *chsc_area,
208 struct chsc_request *request)
209 {
210 int cc;
211 struct chsc_private *private;
212 struct subchannel *sch = NULL;
213 int ret = -ENODEV;
214 char dbf[10];
215
216 chsc_area->header.key = PAGE_DEFAULT_KEY >> 4;
217 while ((sch = chsc_get_next_subchannel(sch))) {
218 spin_lock(sch->lock);
219 private = dev_get_drvdata(&sch->dev);
220 if (private->request) {
221 spin_unlock(sch->lock);
222 ret = -EBUSY;
223 continue;
224 }
225 chsc_area->header.sid = sch->schid;
226 CHSC_LOG(2, "schid");
227 CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid));
228 cc = chsc(chsc_area);
229 snprintf(dbf, sizeof(dbf), "cc:%d", cc);
230 CHSC_LOG(2, dbf);
231 switch (cc) {
232 case 0:
233 ret = 0;
234 break;
235 case 1:
236 sch->schib.scsw.cmd.fctl |= SCSW_FCTL_START_FUNC;
237 ret = -EINPROGRESS;
238 private->request = request;
239 break;
240 case 2:
241 ret = -EBUSY;
242 break;
243 default:
244 ret = -ENODEV;
245 }
246 spin_unlock(sch->lock);
247 CHSC_MSG(2, "chsc on 0.%x.%04x returned cc=%d\n",
248 sch->schid.ssid, sch->schid.sch_no, cc);
249 if (ret == -EINPROGRESS)
250 return -EINPROGRESS;
251 put_device(&sch->dev);
252 if (ret == 0)
253 return 0;
254 }
255 return ret;
256 }
257
chsc_log_command(void * chsc_area)258 static void chsc_log_command(void *chsc_area)
259 {
260 char dbf[10];
261
262 snprintf(dbf, sizeof(dbf), "CHSC:%x", ((uint16_t *)chsc_area)[1]);
263 CHSC_LOG(0, dbf);
264 CHSC_LOG_HEX(0, chsc_area, 32);
265 }
266
chsc_examine_irb(struct chsc_request * request)267 static int chsc_examine_irb(struct chsc_request *request)
268 {
269 int backed_up;
270
271 if (!(scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND))
272 return -EIO;
273 backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK;
274 request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK;
275 if (scsw_cstat(&request->irb.scsw) == 0)
276 return 0;
277 if (!backed_up)
278 return 0;
279 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROG_CHECK)
280 return -EIO;
281 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROT_CHECK)
282 return -EPERM;
283 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_DATA_CHK)
284 return -EAGAIN;
285 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_CTRL_CHK)
286 return -EAGAIN;
287 return -EIO;
288 }
289
chsc_ioctl_start(void __user * user_area)290 static int chsc_ioctl_start(void __user *user_area)
291 {
292 struct chsc_request *request;
293 struct chsc_async_area *chsc_area;
294 int ret;
295 char dbf[10];
296
297 if (!css_general_characteristics.dynio)
298 /* It makes no sense to try. */
299 return -EOPNOTSUPP;
300 chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
301 if (!chsc_area)
302 return -ENOMEM;
303 request = kzalloc(sizeof(*request), GFP_KERNEL);
304 if (!request) {
305 ret = -ENOMEM;
306 goto out_free;
307 }
308 init_completion(&request->completion);
309 if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
310 ret = -EFAULT;
311 goto out_free;
312 }
313 chsc_log_command(chsc_area);
314 spin_lock_irq(&chsc_lock);
315 ret = chsc_async(chsc_area, request);
316 spin_unlock_irq(&chsc_lock);
317 if (ret == -EINPROGRESS) {
318 wait_for_completion(&request->completion);
319 ret = chsc_examine_irb(request);
320 }
321 /* copy area back to user */
322 if (!ret)
323 if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
324 ret = -EFAULT;
325 out_free:
326 snprintf(dbf, sizeof(dbf), "ret:%d", ret);
327 CHSC_LOG(0, dbf);
328 kfree(request);
329 free_page((unsigned long)chsc_area);
330 return ret;
331 }
332
chsc_ioctl_on_close_set(void __user * user_area)333 static int chsc_ioctl_on_close_set(void __user *user_area)
334 {
335 char dbf[13];
336 int ret;
337
338 mutex_lock(&on_close_mutex);
339 if (on_close_chsc_area) {
340 ret = -EBUSY;
341 goto out_unlock;
342 }
343 on_close_request = kzalloc(sizeof(*on_close_request), GFP_KERNEL);
344 if (!on_close_request) {
345 ret = -ENOMEM;
346 goto out_unlock;
347 }
348 on_close_chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
349 if (!on_close_chsc_area) {
350 ret = -ENOMEM;
351 goto out_free_request;
352 }
353 if (copy_from_user(on_close_chsc_area, user_area, PAGE_SIZE)) {
354 ret = -EFAULT;
355 goto out_free_chsc;
356 }
357 ret = 0;
358 goto out_unlock;
359
360 out_free_chsc:
361 free_page((unsigned long)on_close_chsc_area);
362 on_close_chsc_area = NULL;
363 out_free_request:
364 kfree(on_close_request);
365 on_close_request = NULL;
366 out_unlock:
367 mutex_unlock(&on_close_mutex);
368 snprintf(dbf, sizeof(dbf), "ocsret:%d", ret);
369 CHSC_LOG(0, dbf);
370 return ret;
371 }
372
chsc_ioctl_on_close_remove(void)373 static int chsc_ioctl_on_close_remove(void)
374 {
375 char dbf[13];
376 int ret;
377
378 mutex_lock(&on_close_mutex);
379 if (!on_close_chsc_area) {
380 ret = -ENOENT;
381 goto out_unlock;
382 }
383 free_page((unsigned long)on_close_chsc_area);
384 on_close_chsc_area = NULL;
385 kfree(on_close_request);
386 on_close_request = NULL;
387 ret = 0;
388 out_unlock:
389 mutex_unlock(&on_close_mutex);
390 snprintf(dbf, sizeof(dbf), "ocrret:%d", ret);
391 CHSC_LOG(0, dbf);
392 return ret;
393 }
394
chsc_ioctl_start_sync(void __user * user_area)395 static int chsc_ioctl_start_sync(void __user *user_area)
396 {
397 struct chsc_sync_area *chsc_area;
398 int ret, ccode;
399
400 chsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
401 if (!chsc_area)
402 return -ENOMEM;
403 if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
404 ret = -EFAULT;
405 goto out_free;
406 }
407 if (chsc_area->header.code & 0x4000) {
408 ret = -EINVAL;
409 goto out_free;
410 }
411 chsc_log_command(chsc_area);
412 ccode = chsc(chsc_area);
413 if (ccode != 0) {
414 ret = -EIO;
415 goto out_free;
416 }
417 if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
418 ret = -EFAULT;
419 else
420 ret = 0;
421 out_free:
422 free_page((unsigned long)chsc_area);
423 return ret;
424 }
425
chsc_ioctl_info_channel_path(void __user * user_cd)426 static int chsc_ioctl_info_channel_path(void __user *user_cd)
427 {
428 struct chsc_chp_cd *cd;
429 int ret, ccode;
430 struct {
431 struct chsc_header request;
432 u32 : 2;
433 u32 m : 1;
434 u32 : 1;
435 u32 fmt1 : 4;
436 u32 cssid : 8;
437 u32 : 8;
438 u32 first_chpid : 8;
439 u32 : 24;
440 u32 last_chpid : 8;
441 u32 : 32;
442 struct chsc_header response;
443 u8 data[PAGE_SIZE - 20];
444 } __attribute__ ((packed)) *scpcd_area;
445
446 scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
447 if (!scpcd_area)
448 return -ENOMEM;
449 cd = kzalloc(sizeof(*cd), GFP_KERNEL);
450 if (!cd) {
451 ret = -ENOMEM;
452 goto out_free;
453 }
454 if (copy_from_user(cd, user_cd, sizeof(*cd))) {
455 ret = -EFAULT;
456 goto out_free;
457 }
458 scpcd_area->request.length = 0x0010;
459 scpcd_area->request.code = 0x0028;
460 scpcd_area->m = cd->m;
461 scpcd_area->fmt1 = cd->fmt;
462 scpcd_area->cssid = cd->chpid.cssid;
463 scpcd_area->first_chpid = cd->chpid.id;
464 scpcd_area->last_chpid = cd->chpid.id;
465
466 ccode = chsc(scpcd_area);
467 if (ccode != 0) {
468 ret = -EIO;
469 goto out_free;
470 }
471 if (scpcd_area->response.code != 0x0001) {
472 ret = -EIO;
473 CHSC_MSG(0, "scpcd: response code=%x\n",
474 scpcd_area->response.code);
475 goto out_free;
476 }
477 memcpy(&cd->cpcb, &scpcd_area->response, scpcd_area->response.length);
478 if (copy_to_user(user_cd, cd, sizeof(*cd)))
479 ret = -EFAULT;
480 else
481 ret = 0;
482 out_free:
483 kfree(cd);
484 free_page((unsigned long)scpcd_area);
485 return ret;
486 }
487
chsc_ioctl_info_cu(void __user * user_cd)488 static int chsc_ioctl_info_cu(void __user *user_cd)
489 {
490 struct chsc_cu_cd *cd;
491 int ret, ccode;
492 struct {
493 struct chsc_header request;
494 u32 : 2;
495 u32 m : 1;
496 u32 : 1;
497 u32 fmt1 : 4;
498 u32 cssid : 8;
499 u32 : 8;
500 u32 first_cun : 8;
501 u32 : 24;
502 u32 last_cun : 8;
503 u32 : 32;
504 struct chsc_header response;
505 u8 data[PAGE_SIZE - 20];
506 } __attribute__ ((packed)) *scucd_area;
507
508 scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
509 if (!scucd_area)
510 return -ENOMEM;
511 cd = kzalloc(sizeof(*cd), GFP_KERNEL);
512 if (!cd) {
513 ret = -ENOMEM;
514 goto out_free;
515 }
516 if (copy_from_user(cd, user_cd, sizeof(*cd))) {
517 ret = -EFAULT;
518 goto out_free;
519 }
520 scucd_area->request.length = 0x0010;
521 scucd_area->request.code = 0x0026;
522 scucd_area->m = cd->m;
523 scucd_area->fmt1 = cd->fmt;
524 scucd_area->cssid = cd->cssid;
525 scucd_area->first_cun = cd->cun;
526 scucd_area->last_cun = cd->cun;
527
528 ccode = chsc(scucd_area);
529 if (ccode != 0) {
530 ret = -EIO;
531 goto out_free;
532 }
533 if (scucd_area->response.code != 0x0001) {
534 ret = -EIO;
535 CHSC_MSG(0, "scucd: response code=%x\n",
536 scucd_area->response.code);
537 goto out_free;
538 }
539 memcpy(&cd->cucb, &scucd_area->response, scucd_area->response.length);
540 if (copy_to_user(user_cd, cd, sizeof(*cd)))
541 ret = -EFAULT;
542 else
543 ret = 0;
544 out_free:
545 kfree(cd);
546 free_page((unsigned long)scucd_area);
547 return ret;
548 }
549
chsc_ioctl_info_sch_cu(void __user * user_cud)550 static int chsc_ioctl_info_sch_cu(void __user *user_cud)
551 {
552 struct chsc_sch_cud *cud;
553 int ret, ccode;
554 struct {
555 struct chsc_header request;
556 u32 : 2;
557 u32 m : 1;
558 u32 : 5;
559 u32 fmt1 : 4;
560 u32 : 2;
561 u32 ssid : 2;
562 u32 first_sch : 16;
563 u32 : 8;
564 u32 cssid : 8;
565 u32 last_sch : 16;
566 u32 : 32;
567 struct chsc_header response;
568 u8 data[PAGE_SIZE - 20];
569 } __attribute__ ((packed)) *sscud_area;
570
571 sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
572 if (!sscud_area)
573 return -ENOMEM;
574 cud = kzalloc(sizeof(*cud), GFP_KERNEL);
575 if (!cud) {
576 ret = -ENOMEM;
577 goto out_free;
578 }
579 if (copy_from_user(cud, user_cud, sizeof(*cud))) {
580 ret = -EFAULT;
581 goto out_free;
582 }
583 sscud_area->request.length = 0x0010;
584 sscud_area->request.code = 0x0006;
585 sscud_area->m = cud->schid.m;
586 sscud_area->fmt1 = cud->fmt;
587 sscud_area->ssid = cud->schid.ssid;
588 sscud_area->first_sch = cud->schid.sch_no;
589 sscud_area->cssid = cud->schid.cssid;
590 sscud_area->last_sch = cud->schid.sch_no;
591
592 ccode = chsc(sscud_area);
593 if (ccode != 0) {
594 ret = -EIO;
595 goto out_free;
596 }
597 if (sscud_area->response.code != 0x0001) {
598 ret = -EIO;
599 CHSC_MSG(0, "sscud: response code=%x\n",
600 sscud_area->response.code);
601 goto out_free;
602 }
603 memcpy(&cud->scub, &sscud_area->response, sscud_area->response.length);
604 if (copy_to_user(user_cud, cud, sizeof(*cud)))
605 ret = -EFAULT;
606 else
607 ret = 0;
608 out_free:
609 kfree(cud);
610 free_page((unsigned long)sscud_area);
611 return ret;
612 }
613
chsc_ioctl_conf_info(void __user * user_ci)614 static int chsc_ioctl_conf_info(void __user *user_ci)
615 {
616 struct chsc_conf_info *ci;
617 int ret, ccode;
618 struct {
619 struct chsc_header request;
620 u32 : 2;
621 u32 m : 1;
622 u32 : 1;
623 u32 fmt1 : 4;
624 u32 cssid : 8;
625 u32 : 6;
626 u32 ssid : 2;
627 u32 : 8;
628 u64 : 64;
629 struct chsc_header response;
630 u8 data[PAGE_SIZE - 20];
631 } __attribute__ ((packed)) *sci_area;
632
633 sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
634 if (!sci_area)
635 return -ENOMEM;
636 ci = kzalloc(sizeof(*ci), GFP_KERNEL);
637 if (!ci) {
638 ret = -ENOMEM;
639 goto out_free;
640 }
641 if (copy_from_user(ci, user_ci, sizeof(*ci))) {
642 ret = -EFAULT;
643 goto out_free;
644 }
645 sci_area->request.length = 0x0010;
646 sci_area->request.code = 0x0012;
647 sci_area->m = ci->id.m;
648 sci_area->fmt1 = ci->fmt;
649 sci_area->cssid = ci->id.cssid;
650 sci_area->ssid = ci->id.ssid;
651
652 ccode = chsc(sci_area);
653 if (ccode != 0) {
654 ret = -EIO;
655 goto out_free;
656 }
657 if (sci_area->response.code != 0x0001) {
658 ret = -EIO;
659 CHSC_MSG(0, "sci: response code=%x\n",
660 sci_area->response.code);
661 goto out_free;
662 }
663 memcpy(&ci->scid, &sci_area->response, sci_area->response.length);
664 if (copy_to_user(user_ci, ci, sizeof(*ci)))
665 ret = -EFAULT;
666 else
667 ret = 0;
668 out_free:
669 kfree(ci);
670 free_page((unsigned long)sci_area);
671 return ret;
672 }
673
chsc_ioctl_conf_comp_list(void __user * user_ccl)674 static int chsc_ioctl_conf_comp_list(void __user *user_ccl)
675 {
676 struct chsc_comp_list *ccl;
677 int ret, ccode;
678 struct {
679 struct chsc_header request;
680 u32 ctype : 8;
681 u32 : 4;
682 u32 fmt : 4;
683 u32 : 16;
684 u64 : 64;
685 u32 list_parm[2];
686 u64 : 64;
687 struct chsc_header response;
688 u8 data[PAGE_SIZE - 36];
689 } __attribute__ ((packed)) *sccl_area;
690 struct {
691 u32 m : 1;
692 u32 : 31;
693 u32 cssid : 8;
694 u32 : 16;
695 u32 chpid : 8;
696 } __attribute__ ((packed)) *chpid_parm;
697 struct {
698 u32 f_cssid : 8;
699 u32 l_cssid : 8;
700 u32 : 16;
701 u32 res;
702 } __attribute__ ((packed)) *cssids_parm;
703
704 sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
705 if (!sccl_area)
706 return -ENOMEM;
707 ccl = kzalloc(sizeof(*ccl), GFP_KERNEL);
708 if (!ccl) {
709 ret = -ENOMEM;
710 goto out_free;
711 }
712 if (copy_from_user(ccl, user_ccl, sizeof(*ccl))) {
713 ret = -EFAULT;
714 goto out_free;
715 }
716 sccl_area->request.length = 0x0020;
717 sccl_area->request.code = 0x0030;
718 sccl_area->fmt = ccl->req.fmt;
719 sccl_area->ctype = ccl->req.ctype;
720 switch (sccl_area->ctype) {
721 case CCL_CU_ON_CHP:
722 case CCL_IOP_CHP:
723 chpid_parm = (void *)&sccl_area->list_parm;
724 chpid_parm->m = ccl->req.chpid.m;
725 chpid_parm->cssid = ccl->req.chpid.chp.cssid;
726 chpid_parm->chpid = ccl->req.chpid.chp.id;
727 break;
728 case CCL_CSS_IMG:
729 case CCL_CSS_IMG_CONF_CHAR:
730 cssids_parm = (void *)&sccl_area->list_parm;
731 cssids_parm->f_cssid = ccl->req.cssids.f_cssid;
732 cssids_parm->l_cssid = ccl->req.cssids.l_cssid;
733 break;
734 }
735 ccode = chsc(sccl_area);
736 if (ccode != 0) {
737 ret = -EIO;
738 goto out_free;
739 }
740 if (sccl_area->response.code != 0x0001) {
741 ret = -EIO;
742 CHSC_MSG(0, "sccl: response code=%x\n",
743 sccl_area->response.code);
744 goto out_free;
745 }
746 memcpy(&ccl->sccl, &sccl_area->response, sccl_area->response.length);
747 if (copy_to_user(user_ccl, ccl, sizeof(*ccl)))
748 ret = -EFAULT;
749 else
750 ret = 0;
751 out_free:
752 kfree(ccl);
753 free_page((unsigned long)sccl_area);
754 return ret;
755 }
756
chsc_ioctl_chpd(void __user * user_chpd)757 static int chsc_ioctl_chpd(void __user *user_chpd)
758 {
759 struct chsc_scpd *scpd_area;
760 struct chsc_cpd_info *chpd;
761 int ret;
762
763 chpd = kzalloc(sizeof(*chpd), GFP_KERNEL);
764 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
765 if (!scpd_area || !chpd) {
766 ret = -ENOMEM;
767 goto out_free;
768 }
769 if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) {
770 ret = -EFAULT;
771 goto out_free;
772 }
773 ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt,
774 chpd->rfmt, chpd->c, chpd->m,
775 scpd_area);
776 if (ret)
777 goto out_free;
778 memcpy(&chpd->chpdb, &scpd_area->response, scpd_area->response.length);
779 if (copy_to_user(user_chpd, chpd, sizeof(*chpd)))
780 ret = -EFAULT;
781 out_free:
782 kfree(chpd);
783 free_page((unsigned long)scpd_area);
784 return ret;
785 }
786
chsc_ioctl_dcal(void __user * user_dcal)787 static int chsc_ioctl_dcal(void __user *user_dcal)
788 {
789 struct chsc_dcal *dcal;
790 int ret, ccode;
791 struct {
792 struct chsc_header request;
793 u32 atype : 8;
794 u32 : 4;
795 u32 fmt : 4;
796 u32 : 16;
797 u32 res0[2];
798 u32 list_parm[2];
799 u32 res1[2];
800 struct chsc_header response;
801 u8 data[PAGE_SIZE - 36];
802 } __attribute__ ((packed)) *sdcal_area;
803
804 sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
805 if (!sdcal_area)
806 return -ENOMEM;
807 dcal = kzalloc(sizeof(*dcal), GFP_KERNEL);
808 if (!dcal) {
809 ret = -ENOMEM;
810 goto out_free;
811 }
812 if (copy_from_user(dcal, user_dcal, sizeof(*dcal))) {
813 ret = -EFAULT;
814 goto out_free;
815 }
816 sdcal_area->request.length = 0x0020;
817 sdcal_area->request.code = 0x0034;
818 sdcal_area->atype = dcal->req.atype;
819 sdcal_area->fmt = dcal->req.fmt;
820 memcpy(&sdcal_area->list_parm, &dcal->req.list_parm,
821 sizeof(sdcal_area->list_parm));
822
823 ccode = chsc(sdcal_area);
824 if (ccode != 0) {
825 ret = -EIO;
826 goto out_free;
827 }
828 if (sdcal_area->response.code != 0x0001) {
829 ret = -EIO;
830 CHSC_MSG(0, "sdcal: response code=%x\n",
831 sdcal_area->response.code);
832 goto out_free;
833 }
834 memcpy(&dcal->sdcal, &sdcal_area->response,
835 sdcal_area->response.length);
836 if (copy_to_user(user_dcal, dcal, sizeof(*dcal)))
837 ret = -EFAULT;
838 else
839 ret = 0;
840 out_free:
841 kfree(dcal);
842 free_page((unsigned long)sdcal_area);
843 return ret;
844 }
845
chsc_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)846 static long chsc_ioctl(struct file *filp, unsigned int cmd,
847 unsigned long arg)
848 {
849 void __user *argp;
850
851 CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd);
852 if (is_compat_task())
853 argp = compat_ptr(arg);
854 else
855 argp = (void __user *)arg;
856 switch (cmd) {
857 case CHSC_START:
858 return chsc_ioctl_start(argp);
859 case CHSC_START_SYNC:
860 return chsc_ioctl_start_sync(argp);
861 case CHSC_INFO_CHANNEL_PATH:
862 return chsc_ioctl_info_channel_path(argp);
863 case CHSC_INFO_CU:
864 return chsc_ioctl_info_cu(argp);
865 case CHSC_INFO_SCH_CU:
866 return chsc_ioctl_info_sch_cu(argp);
867 case CHSC_INFO_CI:
868 return chsc_ioctl_conf_info(argp);
869 case CHSC_INFO_CCL:
870 return chsc_ioctl_conf_comp_list(argp);
871 case CHSC_INFO_CPD:
872 return chsc_ioctl_chpd(argp);
873 case CHSC_INFO_DCAL:
874 return chsc_ioctl_dcal(argp);
875 case CHSC_ON_CLOSE_SET:
876 return chsc_ioctl_on_close_set(argp);
877 case CHSC_ON_CLOSE_REMOVE:
878 return chsc_ioctl_on_close_remove();
879 default: /* unknown ioctl number */
880 return -ENOIOCTLCMD;
881 }
882 }
883
884 static atomic_t chsc_ready_for_use = ATOMIC_INIT(1);
885
chsc_open(struct inode * inode,struct file * file)886 static int chsc_open(struct inode *inode, struct file *file)
887 {
888 if (!atomic_dec_and_test(&chsc_ready_for_use)) {
889 atomic_inc(&chsc_ready_for_use);
890 return -EBUSY;
891 }
892 return nonseekable_open(inode, file);
893 }
894
chsc_release(struct inode * inode,struct file * filp)895 static int chsc_release(struct inode *inode, struct file *filp)
896 {
897 char dbf[13];
898 int ret;
899
900 mutex_lock(&on_close_mutex);
901 if (!on_close_chsc_area)
902 goto out_unlock;
903 init_completion(&on_close_request->completion);
904 CHSC_LOG(0, "on_close");
905 chsc_log_command(on_close_chsc_area);
906 spin_lock_irq(&chsc_lock);
907 ret = chsc_async(on_close_chsc_area, on_close_request);
908 spin_unlock_irq(&chsc_lock);
909 if (ret == -EINPROGRESS) {
910 wait_for_completion(&on_close_request->completion);
911 ret = chsc_examine_irb(on_close_request);
912 }
913 snprintf(dbf, sizeof(dbf), "relret:%d", ret);
914 CHSC_LOG(0, dbf);
915 free_page((unsigned long)on_close_chsc_area);
916 on_close_chsc_area = NULL;
917 kfree(on_close_request);
918 on_close_request = NULL;
919 out_unlock:
920 mutex_unlock(&on_close_mutex);
921 atomic_inc(&chsc_ready_for_use);
922 return 0;
923 }
924
925 static const struct file_operations chsc_fops = {
926 .owner = THIS_MODULE,
927 .open = chsc_open,
928 .release = chsc_release,
929 .unlocked_ioctl = chsc_ioctl,
930 .compat_ioctl = chsc_ioctl,
931 .llseek = no_llseek,
932 };
933
934 static struct miscdevice chsc_misc_device = {
935 .minor = MISC_DYNAMIC_MINOR,
936 .name = "chsc",
937 .fops = &chsc_fops,
938 };
939
chsc_misc_init(void)940 static int __init chsc_misc_init(void)
941 {
942 return misc_register(&chsc_misc_device);
943 }
944
chsc_misc_cleanup(void)945 static void chsc_misc_cleanup(void)
946 {
947 misc_deregister(&chsc_misc_device);
948 }
949
chsc_sch_init(void)950 static int __init chsc_sch_init(void)
951 {
952 int ret;
953
954 ret = chsc_init_dbfs();
955 if (ret)
956 return ret;
957 isc_register(CHSC_SCH_ISC);
958 ret = chsc_init_sch_driver();
959 if (ret)
960 goto out_dbf;
961 ret = chsc_misc_init();
962 if (ret)
963 goto out_driver;
964 return ret;
965 out_driver:
966 chsc_cleanup_sch_driver();
967 out_dbf:
968 isc_unregister(CHSC_SCH_ISC);
969 chsc_remove_dbfs();
970 return ret;
971 }
972
chsc_sch_exit(void)973 static void __exit chsc_sch_exit(void)
974 {
975 chsc_misc_cleanup();
976 chsc_cleanup_sch_driver();
977 isc_unregister(CHSC_SCH_ISC);
978 chsc_remove_dbfs();
979 }
980
981 module_init(chsc_sch_init);
982 module_exit(chsc_sch_exit);
983