1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/module.h>
4 #include <linux/mutex.h>
5 #include <linux/slab.h>
6 #include <linux/fs.h>
7 #include <linux/bitops.h>
8 #include <linux/pci.h>
9 #include <linux/cdev.h>
10 #include <linux/uaccess.h>
11 #include <linux/crypto.h>
12 
13 #include "adf_accel_devices.h"
14 #include "adf_common_drv.h"
15 #include "adf_cfg.h"
16 #include "adf_cfg_common.h"
17 #include "adf_cfg_user.h"
18 
19 #define ADF_CFG_MAX_SECTION 512
20 #define ADF_CFG_MAX_KEY_VAL 256
21 
22 #define DEVICE_NAME "qat_adf_ctl"
23 
24 static DEFINE_MUTEX(adf_ctl_lock);
25 static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
26 
27 static const struct file_operations adf_ctl_ops = {
28 	.owner = THIS_MODULE,
29 	.unlocked_ioctl = adf_ctl_ioctl,
30 	.compat_ioctl = compat_ptr_ioctl,
31 };
32 
33 struct adf_ctl_drv_info {
34 	unsigned int major;
35 	struct cdev drv_cdev;
36 	struct class *drv_class;
37 };
38 
39 static struct adf_ctl_drv_info adf_ctl_drv;
40 
adf_chr_drv_destroy(void)41 static void adf_chr_drv_destroy(void)
42 {
43 	device_destroy(adf_ctl_drv.drv_class, MKDEV(adf_ctl_drv.major, 0));
44 	cdev_del(&adf_ctl_drv.drv_cdev);
45 	class_destroy(adf_ctl_drv.drv_class);
46 	unregister_chrdev_region(MKDEV(adf_ctl_drv.major, 0), 1);
47 }
48 
adf_chr_drv_create(void)49 static int adf_chr_drv_create(void)
50 {
51 	dev_t dev_id;
52 	struct device *drv_device;
53 
54 	if (alloc_chrdev_region(&dev_id, 0, 1, DEVICE_NAME)) {
55 		pr_err("QAT: unable to allocate chrdev region\n");
56 		return -EFAULT;
57 	}
58 
59 	adf_ctl_drv.drv_class = class_create(THIS_MODULE, DEVICE_NAME);
60 	if (IS_ERR(adf_ctl_drv.drv_class)) {
61 		pr_err("QAT: class_create failed for adf_ctl\n");
62 		goto err_chrdev_unreg;
63 	}
64 	adf_ctl_drv.major = MAJOR(dev_id);
65 	cdev_init(&adf_ctl_drv.drv_cdev, &adf_ctl_ops);
66 	if (cdev_add(&adf_ctl_drv.drv_cdev, dev_id, 1)) {
67 		pr_err("QAT: cdev add failed\n");
68 		goto err_class_destr;
69 	}
70 
71 	drv_device = device_create(adf_ctl_drv.drv_class, NULL,
72 				   MKDEV(adf_ctl_drv.major, 0),
73 				   NULL, DEVICE_NAME);
74 	if (IS_ERR(drv_device)) {
75 		pr_err("QAT: failed to create device\n");
76 		goto err_cdev_del;
77 	}
78 	return 0;
79 err_cdev_del:
80 	cdev_del(&adf_ctl_drv.drv_cdev);
81 err_class_destr:
82 	class_destroy(adf_ctl_drv.drv_class);
83 err_chrdev_unreg:
84 	unregister_chrdev_region(dev_id, 1);
85 	return -EFAULT;
86 }
87 
adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data ** ctl_data,unsigned long arg)88 static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data,
89 				   unsigned long arg)
90 {
91 	struct adf_user_cfg_ctl_data *cfg_data;
92 
93 	cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL);
94 	if (!cfg_data)
95 		return -ENOMEM;
96 
97 	/* Initialize device id to NO DEVICE as 0 is a valid device id */
98 	cfg_data->device_id = ADF_CFG_NO_DEVICE;
99 
100 	if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) {
101 		pr_err("QAT: failed to copy from user cfg_data.\n");
102 		kfree(cfg_data);
103 		return -EIO;
104 	}
105 
106 	*ctl_data = cfg_data;
107 	return 0;
108 }
109 
adf_add_key_value_data(struct adf_accel_dev * accel_dev,const char * section,const struct adf_user_cfg_key_val * key_val)110 static int adf_add_key_value_data(struct adf_accel_dev *accel_dev,
111 				  const char *section,
112 				  const struct adf_user_cfg_key_val *key_val)
113 {
114 	if (key_val->type == ADF_HEX) {
115 		long *ptr = (long *)key_val->val;
116 		long val = *ptr;
117 
118 		if (adf_cfg_add_key_value_param(accel_dev, section,
119 						key_val->key, (void *)val,
120 						key_val->type)) {
121 			dev_err(&GET_DEV(accel_dev),
122 				"failed to add hex keyvalue.\n");
123 			return -EFAULT;
124 		}
125 	} else {
126 		if (adf_cfg_add_key_value_param(accel_dev, section,
127 						key_val->key, key_val->val,
128 						key_val->type)) {
129 			dev_err(&GET_DEV(accel_dev),
130 				"failed to add keyvalue.\n");
131 			return -EFAULT;
132 		}
133 	}
134 	return 0;
135 }
136 
adf_copy_key_value_data(struct adf_accel_dev * accel_dev,struct adf_user_cfg_ctl_data * ctl_data)137 static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
138 				   struct adf_user_cfg_ctl_data *ctl_data)
139 {
140 	struct adf_user_cfg_key_val key_val;
141 	struct adf_user_cfg_key_val *params_head;
142 	struct adf_user_cfg_section section, *section_head;
143 	int i, j;
144 
145 	section_head = ctl_data->config_section;
146 
147 	for (i = 0; section_head && i < ADF_CFG_MAX_SECTION; i++) {
148 		if (copy_from_user(&section, (void __user *)section_head,
149 				   sizeof(*section_head))) {
150 			dev_err(&GET_DEV(accel_dev),
151 				"failed to copy section info\n");
152 			goto out_err;
153 		}
154 
155 		if (adf_cfg_section_add(accel_dev, section.name)) {
156 			dev_err(&GET_DEV(accel_dev),
157 				"failed to add section.\n");
158 			goto out_err;
159 		}
160 
161 		params_head = section.params;
162 
163 		for (j = 0; params_head && j < ADF_CFG_MAX_KEY_VAL; j++) {
164 			if (copy_from_user(&key_val, (void __user *)params_head,
165 					   sizeof(key_val))) {
166 				dev_err(&GET_DEV(accel_dev),
167 					"Failed to copy keyvalue.\n");
168 				goto out_err;
169 			}
170 			if (adf_add_key_value_data(accel_dev, section.name,
171 						   &key_val)) {
172 				goto out_err;
173 			}
174 			params_head = key_val.next;
175 		}
176 		section_head = section.next;
177 	}
178 	return 0;
179 out_err:
180 	adf_cfg_del_all(accel_dev);
181 	return -EFAULT;
182 }
183 
adf_ctl_ioctl_dev_config(struct file * fp,unsigned int cmd,unsigned long arg)184 static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd,
185 				    unsigned long arg)
186 {
187 	int ret;
188 	struct adf_user_cfg_ctl_data *ctl_data;
189 	struct adf_accel_dev *accel_dev;
190 
191 	ret = adf_ctl_alloc_resources(&ctl_data, arg);
192 	if (ret)
193 		return ret;
194 
195 	accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
196 	if (!accel_dev) {
197 		ret = -EFAULT;
198 		goto out;
199 	}
200 
201 	if (adf_dev_started(accel_dev)) {
202 		ret = -EFAULT;
203 		goto out;
204 	}
205 
206 	if (adf_copy_key_value_data(accel_dev, ctl_data)) {
207 		ret = -EFAULT;
208 		goto out;
209 	}
210 	set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
211 out:
212 	kfree(ctl_data);
213 	return ret;
214 }
215 
adf_ctl_is_device_in_use(int id)216 static int adf_ctl_is_device_in_use(int id)
217 {
218 	struct adf_accel_dev *dev;
219 
220 	list_for_each_entry(dev, adf_devmgr_get_head(), list) {
221 		if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
222 			if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) {
223 				dev_info(&GET_DEV(dev),
224 					 "device qat_dev%d is busy\n",
225 					 dev->accel_id);
226 				return -EBUSY;
227 			}
228 		}
229 	}
230 	return 0;
231 }
232 
adf_ctl_stop_devices(u32 id)233 static void adf_ctl_stop_devices(u32 id)
234 {
235 	struct adf_accel_dev *accel_dev;
236 
237 	list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
238 		if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
239 			if (!adf_dev_started(accel_dev))
240 				continue;
241 
242 			/* First stop all VFs */
243 			if (!accel_dev->is_vf)
244 				continue;
245 
246 			adf_dev_stop(accel_dev);
247 			adf_dev_shutdown(accel_dev);
248 		}
249 	}
250 
251 	list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
252 		if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
253 			if (!adf_dev_started(accel_dev))
254 				continue;
255 
256 			adf_dev_stop(accel_dev);
257 			adf_dev_shutdown(accel_dev);
258 		}
259 	}
260 }
261 
adf_ctl_ioctl_dev_stop(struct file * fp,unsigned int cmd,unsigned long arg)262 static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
263 				  unsigned long arg)
264 {
265 	int ret;
266 	struct adf_user_cfg_ctl_data *ctl_data;
267 
268 	ret = adf_ctl_alloc_resources(&ctl_data, arg);
269 	if (ret)
270 		return ret;
271 
272 	if (adf_devmgr_verify_id(ctl_data->device_id)) {
273 		pr_err("QAT: Device %d not found\n", ctl_data->device_id);
274 		ret = -ENODEV;
275 		goto out;
276 	}
277 
278 	ret = adf_ctl_is_device_in_use(ctl_data->device_id);
279 	if (ret)
280 		goto out;
281 
282 	if (ctl_data->device_id == ADF_CFG_ALL_DEVICES)
283 		pr_info("QAT: Stopping all acceleration devices.\n");
284 	else
285 		pr_info("QAT: Stopping acceleration device qat_dev%d.\n",
286 			ctl_data->device_id);
287 
288 	adf_ctl_stop_devices(ctl_data->device_id);
289 
290 out:
291 	kfree(ctl_data);
292 	return ret;
293 }
294 
adf_ctl_ioctl_dev_start(struct file * fp,unsigned int cmd,unsigned long arg)295 static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
296 				   unsigned long arg)
297 {
298 	int ret;
299 	struct adf_user_cfg_ctl_data *ctl_data;
300 	struct adf_accel_dev *accel_dev;
301 
302 	ret = adf_ctl_alloc_resources(&ctl_data, arg);
303 	if (ret)
304 		return ret;
305 
306 	ret = -ENODEV;
307 	accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
308 	if (!accel_dev)
309 		goto out;
310 
311 	if (!adf_dev_started(accel_dev)) {
312 		dev_info(&GET_DEV(accel_dev),
313 			 "Starting acceleration device qat_dev%d.\n",
314 			 ctl_data->device_id);
315 		ret = adf_dev_init(accel_dev);
316 		if (!ret)
317 			ret = adf_dev_start(accel_dev);
318 	} else {
319 		dev_info(&GET_DEV(accel_dev),
320 			 "Acceleration device qat_dev%d already started.\n",
321 			 ctl_data->device_id);
322 	}
323 	if (ret) {
324 		dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
325 			ctl_data->device_id);
326 		adf_dev_stop(accel_dev);
327 		adf_dev_shutdown(accel_dev);
328 	}
329 out:
330 	kfree(ctl_data);
331 	return ret;
332 }
333 
adf_ctl_ioctl_get_num_devices(struct file * fp,unsigned int cmd,unsigned long arg)334 static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd,
335 					 unsigned long arg)
336 {
337 	u32 num_devices = 0;
338 
339 	adf_devmgr_get_num_dev(&num_devices);
340 	if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices)))
341 		return -EFAULT;
342 
343 	return 0;
344 }
345 
adf_ctl_ioctl_get_status(struct file * fp,unsigned int cmd,unsigned long arg)346 static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
347 				    unsigned long arg)
348 {
349 	struct adf_hw_device_data *hw_data;
350 	struct adf_dev_status_info dev_info;
351 	struct adf_accel_dev *accel_dev;
352 
353 	if (copy_from_user(&dev_info, (void __user *)arg,
354 			   sizeof(struct adf_dev_status_info))) {
355 		pr_err("QAT: failed to copy from user.\n");
356 		return -EFAULT;
357 	}
358 
359 	accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
360 	if (!accel_dev)
361 		return -ENODEV;
362 
363 	hw_data = accel_dev->hw_device;
364 	dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
365 	dev_info.num_ae = hw_data->get_num_aes(hw_data);
366 	dev_info.num_accel = hw_data->get_num_accels(hw_data);
367 	dev_info.num_logical_accel = hw_data->num_logical_accel;
368 	dev_info.banks_per_accel = hw_data->num_banks
369 					/ hw_data->num_logical_accel;
370 	strscpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
371 	dev_info.instance_id = hw_data->instance_id;
372 	dev_info.type = hw_data->dev_class->type;
373 	dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number;
374 	dev_info.dev = PCI_SLOT(accel_to_pci_dev(accel_dev)->devfn);
375 	dev_info.fun = PCI_FUNC(accel_to_pci_dev(accel_dev)->devfn);
376 
377 	if (copy_to_user((void __user *)arg, &dev_info,
378 			 sizeof(struct adf_dev_status_info))) {
379 		dev_err(&GET_DEV(accel_dev), "failed to copy status.\n");
380 		return -EFAULT;
381 	}
382 	return 0;
383 }
384 
adf_ctl_ioctl(struct file * fp,unsigned int cmd,unsigned long arg)385 static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
386 {
387 	int ret;
388 
389 	if (mutex_lock_interruptible(&adf_ctl_lock))
390 		return -EFAULT;
391 
392 	switch (cmd) {
393 	case IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS:
394 		ret = adf_ctl_ioctl_dev_config(fp, cmd, arg);
395 		break;
396 
397 	case IOCTL_STOP_ACCEL_DEV:
398 		ret = adf_ctl_ioctl_dev_stop(fp, cmd, arg);
399 		break;
400 
401 	case IOCTL_START_ACCEL_DEV:
402 		ret = adf_ctl_ioctl_dev_start(fp, cmd, arg);
403 		break;
404 
405 	case IOCTL_GET_NUM_DEVICES:
406 		ret = adf_ctl_ioctl_get_num_devices(fp, cmd, arg);
407 		break;
408 
409 	case IOCTL_STATUS_ACCEL_DEV:
410 		ret = adf_ctl_ioctl_get_status(fp, cmd, arg);
411 		break;
412 	default:
413 		pr_err_ratelimited("QAT: Invalid ioctl %d\n", cmd);
414 		ret = -EFAULT;
415 		break;
416 	}
417 	mutex_unlock(&adf_ctl_lock);
418 	return ret;
419 }
420 
adf_register_ctl_device_driver(void)421 static int __init adf_register_ctl_device_driver(void)
422 {
423 	if (adf_chr_drv_create())
424 		goto err_chr_dev;
425 
426 	if (adf_init_misc_wq())
427 		goto err_misc_wq;
428 
429 	if (adf_init_aer())
430 		goto err_aer;
431 
432 	if (adf_init_pf_wq())
433 		goto err_pf_wq;
434 
435 	if (adf_init_vf_wq())
436 		goto err_vf_wq;
437 
438 	if (qat_crypto_register())
439 		goto err_crypto_register;
440 
441 	if (qat_compression_register())
442 		goto err_compression_register;
443 
444 	return 0;
445 
446 err_compression_register:
447 	qat_crypto_unregister();
448 err_crypto_register:
449 	adf_exit_vf_wq();
450 err_vf_wq:
451 	adf_exit_pf_wq();
452 err_pf_wq:
453 	adf_exit_aer();
454 err_aer:
455 	adf_exit_misc_wq();
456 err_misc_wq:
457 	adf_chr_drv_destroy();
458 err_chr_dev:
459 	mutex_destroy(&adf_ctl_lock);
460 	return -EFAULT;
461 }
462 
adf_unregister_ctl_device_driver(void)463 static void __exit adf_unregister_ctl_device_driver(void)
464 {
465 	adf_chr_drv_destroy();
466 	adf_exit_misc_wq();
467 	adf_exit_aer();
468 	adf_exit_vf_wq();
469 	adf_exit_pf_wq();
470 	qat_crypto_unregister();
471 	qat_compression_unregister();
472 	adf_clean_vf_map(false);
473 	mutex_destroy(&adf_ctl_lock);
474 }
475 
476 module_init(adf_register_ctl_device_driver);
477 module_exit(adf_unregister_ctl_device_driver);
478 MODULE_LICENSE("Dual BSD/GPL");
479 MODULE_AUTHOR("Intel");
480 MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
481 MODULE_ALIAS_CRYPTO("intel_qat");
482 MODULE_VERSION(ADF_DRV_VERSION);
483 MODULE_IMPORT_NS(CRYPTO_INTERNAL);
484