1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2020 Intel Corporation */
3 #include <linux/device.h>
4 #include <linux/module.h>
5 #include <linux/pci.h>
6
7 #include <adf_accel_devices.h>
8 #include <adf_cfg.h>
9 #include <adf_common_drv.h>
10
11 #include "adf_4xxx_hw_data.h"
12 #include "qat_compression.h"
13 #include "qat_crypto.h"
14 #include "adf_transport_access_macros.h"
15
16 static const struct pci_device_id adf_pci_tbl[] = {
17 { PCI_VDEVICE(INTEL, ADF_4XXX_PCI_DEVICE_ID), },
18 { PCI_VDEVICE(INTEL, ADF_401XX_PCI_DEVICE_ID), },
19 { }
20 };
21 MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
22
23 enum configs {
24 DEV_CFG_CY = 0,
25 DEV_CFG_DC,
26 };
27
28 static const char * const services_operations[] = {
29 ADF_CFG_CY,
30 ADF_CFG_DC,
31 };
32
adf_cleanup_accel(struct adf_accel_dev * accel_dev)33 static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
34 {
35 if (accel_dev->hw_device) {
36 adf_clean_hw_data_4xxx(accel_dev->hw_device);
37 accel_dev->hw_device = NULL;
38 }
39 adf_cfg_dev_remove(accel_dev);
40 debugfs_remove(accel_dev->debugfs_dir);
41 adf_devmgr_rm_dev(accel_dev, NULL);
42 }
43
adf_cfg_dev_init(struct adf_accel_dev * accel_dev)44 static int adf_cfg_dev_init(struct adf_accel_dev *accel_dev)
45 {
46 const char *config;
47 int ret;
48
49 config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY;
50
51 ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
52 if (ret)
53 return ret;
54
55 /* Default configuration is crypto only for even devices
56 * and compression for odd devices
57 */
58 ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
59 ADF_SERVICES_ENABLED, config,
60 ADF_STR);
61 if (ret)
62 return ret;
63
64 return 0;
65 }
66
adf_crypto_dev_config(struct adf_accel_dev * accel_dev)67 static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
68 {
69 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
70 int banks = GET_MAX_BANKS(accel_dev);
71 int cpus = num_online_cpus();
72 unsigned long bank, val;
73 int instances;
74 int ret;
75 int i;
76
77 if (adf_hw_dev_has_crypto(accel_dev))
78 instances = min(cpus, banks / 2);
79 else
80 instances = 0;
81
82 for (i = 0; i < instances; i++) {
83 val = i;
84 bank = i * 2;
85 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
86 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
87 key, &bank, ADF_DEC);
88 if (ret)
89 goto err;
90
91 bank += 1;
92 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
93 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
94 key, &bank, ADF_DEC);
95 if (ret)
96 goto err;
97
98 snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
99 i);
100 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
101 key, &val, ADF_DEC);
102 if (ret)
103 goto err;
104
105 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
106 val = 128;
107 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
108 key, &val, ADF_DEC);
109 if (ret)
110 goto err;
111
112 val = 512;
113 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
114 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
115 key, &val, ADF_DEC);
116 if (ret)
117 goto err;
118
119 val = 0;
120 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
121 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
122 key, &val, ADF_DEC);
123 if (ret)
124 goto err;
125
126 val = 0;
127 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
128 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
129 key, &val, ADF_DEC);
130 if (ret)
131 goto err;
132
133 val = 1;
134 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
135 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
136 key, &val, ADF_DEC);
137 if (ret)
138 goto err;
139
140 val = 1;
141 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
142 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
143 key, &val, ADF_DEC);
144 if (ret)
145 goto err;
146
147 val = ADF_COALESCING_DEF_TIME;
148 snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
149 ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
150 key, &val, ADF_DEC);
151 if (ret)
152 goto err;
153 }
154
155 val = i;
156 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
157 &val, ADF_DEC);
158 if (ret)
159 goto err;
160
161 val = 0;
162 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
163 &val, ADF_DEC);
164 if (ret)
165 goto err;
166
167 return 0;
168 err:
169 dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n");
170 return ret;
171 }
172
adf_comp_dev_config(struct adf_accel_dev * accel_dev)173 static int adf_comp_dev_config(struct adf_accel_dev *accel_dev)
174 {
175 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
176 int banks = GET_MAX_BANKS(accel_dev);
177 int cpus = num_online_cpus();
178 unsigned long val;
179 int instances;
180 int ret;
181 int i;
182
183 if (adf_hw_dev_has_compression(accel_dev))
184 instances = min(cpus, banks);
185 else
186 instances = 0;
187
188 for (i = 0; i < instances; i++) {
189 val = i;
190 snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
191 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
192 key, &val, ADF_DEC);
193 if (ret)
194 goto err;
195
196 val = 512;
197 snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
198 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
199 key, &val, ADF_DEC);
200 if (ret)
201 goto err;
202
203 val = 0;
204 snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
205 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
206 key, &val, ADF_DEC);
207 if (ret)
208 goto err;
209
210 val = 1;
211 snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
212 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
213 key, &val, ADF_DEC);
214 if (ret)
215 goto err;
216
217 val = ADF_COALESCING_DEF_TIME;
218 snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
219 ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
220 key, &val, ADF_DEC);
221 if (ret)
222 goto err;
223 }
224
225 val = i;
226 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
227 &val, ADF_DEC);
228 if (ret)
229 goto err;
230
231 val = 0;
232 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
233 &val, ADF_DEC);
234 if (ret)
235 goto err;
236
237 return 0;
238 err:
239 dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n");
240 return ret;
241 }
242
adf_gen4_dev_config(struct adf_accel_dev * accel_dev)243 int adf_gen4_dev_config(struct adf_accel_dev *accel_dev)
244 {
245 char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
246 int ret;
247
248 ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
249 if (ret)
250 goto err;
251
252 ret = adf_cfg_section_add(accel_dev, "Accelerator0");
253 if (ret)
254 goto err;
255
256 ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
257 ADF_SERVICES_ENABLED, services);
258 if (ret)
259 goto err;
260
261 ret = sysfs_match_string(services_operations, services);
262 if (ret < 0)
263 goto err;
264
265 switch (ret) {
266 case DEV_CFG_CY:
267 ret = adf_crypto_dev_config(accel_dev);
268 break;
269 case DEV_CFG_DC:
270 ret = adf_comp_dev_config(accel_dev);
271 break;
272 }
273
274 if (ret)
275 goto err;
276
277 set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
278
279 return ret;
280
281 err:
282 dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n");
283 return ret;
284 }
285
adf_probe(struct pci_dev * pdev,const struct pci_device_id * ent)286 static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
287 {
288 struct adf_accel_dev *accel_dev;
289 struct adf_accel_pci *accel_pci_dev;
290 struct adf_hw_device_data *hw_data;
291 char name[ADF_DEVICE_NAME_LENGTH];
292 unsigned int i, bar_nr;
293 unsigned long bar_mask;
294 struct adf_bar *bar;
295 int ret;
296
297 if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
298 /*
299 * If the accelerator is connected to a node with no memory
300 * there is no point in using the accelerator since the remote
301 * memory transaction will be very slow.
302 */
303 dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
304 return -EINVAL;
305 }
306
307 accel_dev = devm_kzalloc(&pdev->dev, sizeof(*accel_dev), GFP_KERNEL);
308 if (!accel_dev)
309 return -ENOMEM;
310
311 INIT_LIST_HEAD(&accel_dev->crypto_list);
312 accel_pci_dev = &accel_dev->accel_pci_dev;
313 accel_pci_dev->pci_dev = pdev;
314
315 /*
316 * Add accel device to accel table
317 * This should be called before adf_cleanup_accel is called
318 */
319 if (adf_devmgr_add_dev(accel_dev, NULL)) {
320 dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
321 return -EFAULT;
322 }
323
324 accel_dev->owner = THIS_MODULE;
325 /* Allocate and initialise device hardware meta-data structure */
326 hw_data = devm_kzalloc(&pdev->dev, sizeof(*hw_data), GFP_KERNEL);
327 if (!hw_data) {
328 ret = -ENOMEM;
329 goto out_err;
330 }
331
332 accel_dev->hw_device = hw_data;
333 adf_init_hw_data_4xxx(accel_dev->hw_device);
334
335 pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
336 pci_read_config_dword(pdev, ADF_4XXX_FUSECTL4_OFFSET, &hw_data->fuses);
337
338 /* Get Accelerators and Accelerators Engines masks */
339 hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
340 hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
341 accel_pci_dev->sku = hw_data->get_sku(hw_data);
342 /* If the device has no acceleration engines then ignore it */
343 if (!hw_data->accel_mask || !hw_data->ae_mask ||
344 (~hw_data->ae_mask & 0x01)) {
345 dev_err(&pdev->dev, "No acceleration units found.\n");
346 ret = -EFAULT;
347 goto out_err;
348 }
349
350 /* Create dev top level debugfs entry */
351 snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
352 hw_data->dev_class->name, pci_name(pdev));
353
354 accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
355
356 /* Create device configuration table */
357 ret = adf_cfg_dev_add(accel_dev);
358 if (ret)
359 goto out_err;
360
361 /* Enable PCI device */
362 ret = pcim_enable_device(pdev);
363 if (ret) {
364 dev_err(&pdev->dev, "Can't enable PCI device.\n");
365 goto out_err;
366 }
367
368 /* Set DMA identifier */
369 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
370 if (ret) {
371 dev_err(&pdev->dev, "No usable DMA configuration.\n");
372 goto out_err;
373 }
374
375 ret = adf_cfg_dev_init(accel_dev);
376 if (ret) {
377 dev_err(&pdev->dev, "Failed to initialize configuration.\n");
378 goto out_err;
379 }
380
381 /* Get accelerator capabilities mask */
382 hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
383 if (!hw_data->accel_capabilities_mask) {
384 dev_err(&pdev->dev, "Failed to get capabilities mask.\n");
385 ret = -EINVAL;
386 goto out_err;
387 }
388
389 /* Find and map all the device's BARS */
390 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_4XXX_BAR_MASK;
391
392 ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev));
393 if (ret) {
394 dev_err(&pdev->dev, "Failed to map pci regions.\n");
395 goto out_err;
396 }
397
398 i = 0;
399 for_each_set_bit(bar_nr, &bar_mask, PCI_STD_NUM_BARS) {
400 bar = &accel_pci_dev->pci_bars[i++];
401 bar->virt_addr = pcim_iomap_table(pdev)[bar_nr];
402 }
403
404 pci_set_master(pdev);
405
406 adf_enable_aer(accel_dev);
407
408 if (pci_save_state(pdev)) {
409 dev_err(&pdev->dev, "Failed to save pci state.\n");
410 ret = -ENOMEM;
411 goto out_err_disable_aer;
412 }
413
414 ret = adf_sysfs_init(accel_dev);
415 if (ret)
416 goto out_err_disable_aer;
417
418 ret = hw_data->dev_config(accel_dev);
419 if (ret)
420 goto out_err_disable_aer;
421
422 ret = adf_dev_init(accel_dev);
423 if (ret)
424 goto out_err_dev_shutdown;
425
426 ret = adf_dev_start(accel_dev);
427 if (ret)
428 goto out_err_dev_stop;
429
430 return ret;
431
432 out_err_dev_stop:
433 adf_dev_stop(accel_dev);
434 out_err_dev_shutdown:
435 adf_dev_shutdown(accel_dev);
436 out_err_disable_aer:
437 adf_disable_aer(accel_dev);
438 out_err:
439 adf_cleanup_accel(accel_dev);
440 return ret;
441 }
442
adf_remove(struct pci_dev * pdev)443 static void adf_remove(struct pci_dev *pdev)
444 {
445 struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
446
447 if (!accel_dev) {
448 pr_err("QAT: Driver removal failed\n");
449 return;
450 }
451 adf_dev_stop(accel_dev);
452 adf_dev_shutdown(accel_dev);
453 adf_disable_aer(accel_dev);
454 adf_cleanup_accel(accel_dev);
455 }
456
457 static struct pci_driver adf_driver = {
458 .id_table = adf_pci_tbl,
459 .name = ADF_4XXX_DEVICE_NAME,
460 .probe = adf_probe,
461 .remove = adf_remove,
462 .sriov_configure = adf_sriov_configure,
463 .err_handler = &adf_err_handler,
464 };
465
466 module_pci_driver(adf_driver);
467
468 MODULE_LICENSE("Dual BSD/GPL");
469 MODULE_AUTHOR("Intel");
470 MODULE_FIRMWARE(ADF_4XXX_FW);
471 MODULE_FIRMWARE(ADF_4XXX_MMP);
472 MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
473 MODULE_VERSION(ADF_DRV_VERSION);
474 MODULE_SOFTDEP("pre: crypto-intel_qat");
475