1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/delay.h>
4 #include <linux/nospec.h>
5 #include "adf_accel_devices.h"
6 #include "adf_transport_internal.h"
7 #include "adf_transport_access_macros.h"
8 #include "adf_cfg.h"
9 #include "adf_common_drv.h"
10 
adf_modulo(u32 data,u32 shift)11 static inline u32 adf_modulo(u32 data, u32 shift)
12 {
13 	u32 div = data >> shift;
14 	u32 mult = div << shift;
15 
16 	return data - mult;
17 }
18 
adf_check_ring_alignment(u64 addr,u64 size)19 static inline int adf_check_ring_alignment(u64 addr, u64 size)
20 {
21 	if (((size - 1) & addr) != 0)
22 		return -EFAULT;
23 	return 0;
24 }
25 
adf_verify_ring_size(u32 msg_size,u32 msg_num)26 static int adf_verify_ring_size(u32 msg_size, u32 msg_num)
27 {
28 	int i = ADF_MIN_RING_SIZE;
29 
30 	for (; i <= ADF_MAX_RING_SIZE; i++)
31 		if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
32 			return i;
33 
34 	return ADF_DEFAULT_RING_SIZE;
35 }
36 
adf_reserve_ring(struct adf_etr_bank_data * bank,u32 ring)37 static int adf_reserve_ring(struct adf_etr_bank_data *bank, u32 ring)
38 {
39 	spin_lock(&bank->lock);
40 	if (bank->ring_mask & (1 << ring)) {
41 		spin_unlock(&bank->lock);
42 		return -EFAULT;
43 	}
44 	bank->ring_mask |= (1 << ring);
45 	spin_unlock(&bank->lock);
46 	return 0;
47 }
48 
adf_unreserve_ring(struct adf_etr_bank_data * bank,u32 ring)49 static void adf_unreserve_ring(struct adf_etr_bank_data *bank, u32 ring)
50 {
51 	spin_lock(&bank->lock);
52 	bank->ring_mask &= ~(1 << ring);
53 	spin_unlock(&bank->lock);
54 }
55 
adf_enable_ring_irq(struct adf_etr_bank_data * bank,u32 ring)56 static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
57 {
58 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
59 
60 	spin_lock_bh(&bank->lock);
61 	bank->irq_mask |= (1 << ring);
62 	spin_unlock_bh(&bank->lock);
63 	csr_ops->write_csr_int_col_en(bank->csr_addr, bank->bank_number,
64 				      bank->irq_mask);
65 	csr_ops->write_csr_int_col_ctl(bank->csr_addr, bank->bank_number,
66 				       bank->irq_coalesc_timer);
67 }
68 
adf_disable_ring_irq(struct adf_etr_bank_data * bank,u32 ring)69 static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
70 {
71 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
72 
73 	spin_lock_bh(&bank->lock);
74 	bank->irq_mask &= ~(1 << ring);
75 	spin_unlock_bh(&bank->lock);
76 	csr_ops->write_csr_int_col_en(bank->csr_addr, bank->bank_number,
77 				      bank->irq_mask);
78 }
79 
adf_send_message(struct adf_etr_ring_data * ring,u32 * msg)80 int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
81 {
82 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
83 
84 	if (atomic_add_return(1, ring->inflights) >
85 	    ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) {
86 		atomic_dec(ring->inflights);
87 		return -EAGAIN;
88 	}
89 	spin_lock_bh(&ring->lock);
90 	memcpy((void *)((uintptr_t)ring->base_addr + ring->tail), msg,
91 	       ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
92 
93 	ring->tail = adf_modulo(ring->tail +
94 				ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
95 				ADF_RING_SIZE_MODULO(ring->ring_size));
96 	csr_ops->write_csr_ring_tail(ring->bank->csr_addr,
97 				     ring->bank->bank_number, ring->ring_number,
98 				     ring->tail);
99 	spin_unlock_bh(&ring->lock);
100 
101 	return 0;
102 }
103 
adf_handle_response(struct adf_etr_ring_data * ring)104 static int adf_handle_response(struct adf_etr_ring_data *ring)
105 {
106 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
107 	u32 msg_counter = 0;
108 	u32 *msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
109 
110 	while (*msg != ADF_RING_EMPTY_SIG) {
111 		ring->callback((u32 *)msg);
112 		atomic_dec(ring->inflights);
113 		*msg = ADF_RING_EMPTY_SIG;
114 		ring->head = adf_modulo(ring->head +
115 					ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
116 					ADF_RING_SIZE_MODULO(ring->ring_size));
117 		msg_counter++;
118 		msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
119 	}
120 	if (msg_counter > 0) {
121 		csr_ops->write_csr_ring_head(ring->bank->csr_addr,
122 					     ring->bank->bank_number,
123 					     ring->ring_number, ring->head);
124 	}
125 	return 0;
126 }
127 
adf_configure_tx_ring(struct adf_etr_ring_data * ring)128 static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
129 {
130 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
131 	u32 ring_config = BUILD_RING_CONFIG(ring->ring_size);
132 
133 	csr_ops->write_csr_ring_config(ring->bank->csr_addr,
134 				       ring->bank->bank_number,
135 				       ring->ring_number, ring_config);
136 
137 }
138 
adf_configure_rx_ring(struct adf_etr_ring_data * ring)139 static void adf_configure_rx_ring(struct adf_etr_ring_data *ring)
140 {
141 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
142 	u32 ring_config =
143 			BUILD_RESP_RING_CONFIG(ring->ring_size,
144 					       ADF_RING_NEAR_WATERMARK_512,
145 					       ADF_RING_NEAR_WATERMARK_0);
146 
147 	csr_ops->write_csr_ring_config(ring->bank->csr_addr,
148 				       ring->bank->bank_number,
149 				       ring->ring_number, ring_config);
150 }
151 
adf_init_ring(struct adf_etr_ring_data * ring)152 static int adf_init_ring(struct adf_etr_ring_data *ring)
153 {
154 	struct adf_etr_bank_data *bank = ring->bank;
155 	struct adf_accel_dev *accel_dev = bank->accel_dev;
156 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
157 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
158 	u64 ring_base;
159 	u32 ring_size_bytes =
160 			ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
161 
162 	ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
163 	ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
164 					     ring_size_bytes, &ring->dma_addr,
165 					     GFP_KERNEL);
166 	if (!ring->base_addr)
167 		return -ENOMEM;
168 
169 	memset(ring->base_addr, 0x7F, ring_size_bytes);
170 	/* The base_addr has to be aligned to the size of the buffer */
171 	if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
172 		dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
173 		dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
174 				  ring->base_addr, ring->dma_addr);
175 		ring->base_addr = NULL;
176 		return -EFAULT;
177 	}
178 
179 	if (hw_data->tx_rings_mask & (1 << ring->ring_number))
180 		adf_configure_tx_ring(ring);
181 
182 	else
183 		adf_configure_rx_ring(ring);
184 
185 	ring_base = csr_ops->build_csr_ring_base_addr(ring->dma_addr,
186 						      ring->ring_size);
187 
188 	csr_ops->write_csr_ring_base(ring->bank->csr_addr,
189 				     ring->bank->bank_number, ring->ring_number,
190 				     ring_base);
191 	spin_lock_init(&ring->lock);
192 	return 0;
193 }
194 
adf_cleanup_ring(struct adf_etr_ring_data * ring)195 static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
196 {
197 	u32 ring_size_bytes =
198 			ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
199 	ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
200 
201 	if (ring->base_addr) {
202 		memset(ring->base_addr, 0x7F, ring_size_bytes);
203 		dma_free_coherent(&GET_DEV(ring->bank->accel_dev),
204 				  ring_size_bytes, ring->base_addr,
205 				  ring->dma_addr);
206 	}
207 }
208 
adf_create_ring(struct adf_accel_dev * accel_dev,const char * section,u32 bank_num,u32 num_msgs,u32 msg_size,const char * ring_name,adf_callback_fn callback,int poll_mode,struct adf_etr_ring_data ** ring_ptr)209 int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
210 		    u32 bank_num, u32 num_msgs,
211 		    u32 msg_size, const char *ring_name,
212 		    adf_callback_fn callback, int poll_mode,
213 		    struct adf_etr_ring_data **ring_ptr)
214 {
215 	struct adf_etr_data *transport_data = accel_dev->transport;
216 	u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(accel_dev);
217 	struct adf_etr_bank_data *bank;
218 	struct adf_etr_ring_data *ring;
219 	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
220 	u32 ring_num;
221 	int ret;
222 
223 	if (bank_num >= GET_MAX_BANKS(accel_dev)) {
224 		dev_err(&GET_DEV(accel_dev), "Invalid bank number\n");
225 		return -EFAULT;
226 	}
227 	if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
228 		dev_err(&GET_DEV(accel_dev), "Invalid msg size\n");
229 		return -EFAULT;
230 	}
231 	if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
232 			      ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
233 		dev_err(&GET_DEV(accel_dev),
234 			"Invalid ring size for given msg size\n");
235 		return -EFAULT;
236 	}
237 	if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
238 		dev_err(&GET_DEV(accel_dev), "Section %s, no such entry : %s\n",
239 			section, ring_name);
240 		return -EFAULT;
241 	}
242 	if (kstrtouint(val, 10, &ring_num)) {
243 		dev_err(&GET_DEV(accel_dev), "Can't get ring number\n");
244 		return -EFAULT;
245 	}
246 	if (ring_num >= num_rings_per_bank) {
247 		dev_err(&GET_DEV(accel_dev), "Invalid ring number\n");
248 		return -EFAULT;
249 	}
250 
251 	ring_num = array_index_nospec(ring_num, num_rings_per_bank);
252 	bank = &transport_data->banks[bank_num];
253 	if (adf_reserve_ring(bank, ring_num)) {
254 		dev_err(&GET_DEV(accel_dev), "Ring %d, %s already exists.\n",
255 			ring_num, ring_name);
256 		return -EFAULT;
257 	}
258 	ring = &bank->rings[ring_num];
259 	ring->ring_number = ring_num;
260 	ring->bank = bank;
261 	ring->callback = callback;
262 	ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
263 	ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
264 	ring->head = 0;
265 	ring->tail = 0;
266 	atomic_set(ring->inflights, 0);
267 	ret = adf_init_ring(ring);
268 	if (ret)
269 		goto err;
270 
271 	/* Enable HW arbitration for the given ring */
272 	adf_update_ring_arb(ring);
273 
274 	if (adf_ring_debugfs_add(ring, ring_name)) {
275 		dev_err(&GET_DEV(accel_dev),
276 			"Couldn't add ring debugfs entry\n");
277 		ret = -EFAULT;
278 		goto err;
279 	}
280 
281 	/* Enable interrupts if needed */
282 	if (callback && (!poll_mode))
283 		adf_enable_ring_irq(bank, ring->ring_number);
284 	*ring_ptr = ring;
285 	return 0;
286 err:
287 	adf_cleanup_ring(ring);
288 	adf_unreserve_ring(bank, ring_num);
289 	adf_update_ring_arb(ring);
290 	return ret;
291 }
292 
adf_remove_ring(struct adf_etr_ring_data * ring)293 void adf_remove_ring(struct adf_etr_ring_data *ring)
294 {
295 	struct adf_etr_bank_data *bank = ring->bank;
296 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
297 
298 	/* Disable interrupts for the given ring */
299 	adf_disable_ring_irq(bank, ring->ring_number);
300 
301 	/* Clear PCI config space */
302 
303 	csr_ops->write_csr_ring_config(bank->csr_addr, bank->bank_number,
304 				       ring->ring_number, 0);
305 	csr_ops->write_csr_ring_base(bank->csr_addr, bank->bank_number,
306 				     ring->ring_number, 0);
307 	adf_ring_debugfs_rm(ring);
308 	adf_unreserve_ring(bank, ring->ring_number);
309 	/* Disable HW arbitration for the given ring */
310 	adf_update_ring_arb(ring);
311 	adf_cleanup_ring(ring);
312 }
313 
adf_ring_response_handler(struct adf_etr_bank_data * bank)314 static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
315 {
316 	struct adf_accel_dev *accel_dev = bank->accel_dev;
317 	u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(accel_dev);
318 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
319 	unsigned long empty_rings;
320 	int i;
321 
322 	empty_rings = csr_ops->read_csr_e_stat(bank->csr_addr,
323 					       bank->bank_number);
324 	empty_rings = ~empty_rings & bank->irq_mask;
325 
326 	for_each_set_bit(i, &empty_rings, num_rings_per_bank)
327 		adf_handle_response(&bank->rings[i]);
328 }
329 
adf_response_handler(uintptr_t bank_addr)330 void adf_response_handler(uintptr_t bank_addr)
331 {
332 	struct adf_etr_bank_data *bank = (void *)bank_addr;
333 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
334 
335 	/* Handle all the responses and reenable IRQs */
336 	adf_ring_response_handler(bank);
337 
338 	csr_ops->write_csr_int_flag_and_col(bank->csr_addr, bank->bank_number,
339 					    bank->irq_mask);
340 }
341 
adf_get_cfg_int(struct adf_accel_dev * accel_dev,const char * section,const char * format,u32 key,u32 * value)342 static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
343 				  const char *section, const char *format,
344 				  u32 key, u32 *value)
345 {
346 	char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
347 	char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
348 
349 	snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key);
350 
351 	if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf))
352 		return -EFAULT;
353 
354 	if (kstrtouint(val_buf, 10, value))
355 		return -EFAULT;
356 	return 0;
357 }
358 
adf_get_coalesc_timer(struct adf_etr_bank_data * bank,const char * section,u32 bank_num_in_accel)359 static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
360 				  const char *section,
361 				  u32 bank_num_in_accel)
362 {
363 	if (adf_get_cfg_int(bank->accel_dev, section,
364 			    ADF_ETRMGR_COALESCE_TIMER_FORMAT,
365 			    bank_num_in_accel, &bank->irq_coalesc_timer))
366 		bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
367 
368 	if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer ||
369 	    ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer)
370 		bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
371 }
372 
adf_init_bank(struct adf_accel_dev * accel_dev,struct adf_etr_bank_data * bank,u32 bank_num,void __iomem * csr_addr)373 static int adf_init_bank(struct adf_accel_dev *accel_dev,
374 			 struct adf_etr_bank_data *bank,
375 			 u32 bank_num, void __iomem *csr_addr)
376 {
377 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
378 	u8 num_rings_per_bank = hw_data->num_rings_per_bank;
379 	struct adf_hw_csr_ops *csr_ops = &hw_data->csr_ops;
380 	u32 irq_mask = BIT(num_rings_per_bank) - 1;
381 	struct adf_etr_ring_data *ring;
382 	struct adf_etr_ring_data *tx_ring;
383 	u32 i, coalesc_enabled = 0;
384 	unsigned long ring_mask;
385 	int size;
386 
387 	memset(bank, 0, sizeof(*bank));
388 	bank->bank_number = bank_num;
389 	bank->csr_addr = csr_addr;
390 	bank->accel_dev = accel_dev;
391 	spin_lock_init(&bank->lock);
392 
393 	/* Allocate the rings in the bank */
394 	size = num_rings_per_bank * sizeof(struct adf_etr_ring_data);
395 	bank->rings = kzalloc_node(size, GFP_KERNEL,
396 				   dev_to_node(&GET_DEV(accel_dev)));
397 	if (!bank->rings)
398 		return -ENOMEM;
399 
400 	/* Enable IRQ coalescing always. This will allow to use
401 	 * the optimised flag and coalesc register.
402 	 * If it is disabled in the config file just use min time value */
403 	if ((adf_get_cfg_int(accel_dev, "Accelerator0",
404 			     ADF_ETRMGR_COALESCING_ENABLED_FORMAT, bank_num,
405 			     &coalesc_enabled) == 0) && coalesc_enabled)
406 		adf_get_coalesc_timer(bank, "Accelerator0", bank_num);
407 	else
408 		bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
409 
410 	for (i = 0; i < num_rings_per_bank; i++) {
411 		csr_ops->write_csr_ring_config(csr_addr, bank_num, i, 0);
412 		csr_ops->write_csr_ring_base(csr_addr, bank_num, i, 0);
413 
414 		ring = &bank->rings[i];
415 		if (hw_data->tx_rings_mask & (1 << i)) {
416 			ring->inflights =
417 				kzalloc_node(sizeof(atomic_t),
418 					     GFP_KERNEL,
419 					     dev_to_node(&GET_DEV(accel_dev)));
420 			if (!ring->inflights)
421 				goto err;
422 		} else {
423 			if (i < hw_data->tx_rx_gap) {
424 				dev_err(&GET_DEV(accel_dev),
425 					"Invalid tx rings mask config\n");
426 				goto err;
427 			}
428 			tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
429 			ring->inflights = tx_ring->inflights;
430 		}
431 	}
432 	if (adf_bank_debugfs_add(bank)) {
433 		dev_err(&GET_DEV(accel_dev),
434 			"Failed to add bank debugfs entry\n");
435 		goto err;
436 	}
437 
438 	csr_ops->write_csr_int_flag(csr_addr, bank_num, irq_mask);
439 	csr_ops->write_csr_int_srcsel(csr_addr, bank_num);
440 
441 	return 0;
442 err:
443 	ring_mask = hw_data->tx_rings_mask;
444 	for_each_set_bit(i, &ring_mask, num_rings_per_bank) {
445 		ring = &bank->rings[i];
446 		kfree(ring->inflights);
447 		ring->inflights = NULL;
448 	}
449 	kfree(bank->rings);
450 	return -ENOMEM;
451 }
452 
453 /**
454  * adf_init_etr_data() - Initialize transport rings for acceleration device
455  * @accel_dev:  Pointer to acceleration device.
456  *
457  * Function is the initializes the communications channels (rings) to the
458  * acceleration device accel_dev.
459  * To be used by QAT device specific drivers.
460  *
461  * Return: 0 on success, error code otherwise.
462  */
adf_init_etr_data(struct adf_accel_dev * accel_dev)463 int adf_init_etr_data(struct adf_accel_dev *accel_dev)
464 {
465 	struct adf_etr_data *etr_data;
466 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
467 	void __iomem *csr_addr;
468 	u32 size;
469 	u32 num_banks = 0;
470 	int i, ret;
471 
472 	etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
473 				dev_to_node(&GET_DEV(accel_dev)));
474 	if (!etr_data)
475 		return -ENOMEM;
476 
477 	num_banks = GET_MAX_BANKS(accel_dev);
478 	size = num_banks * sizeof(struct adf_etr_bank_data);
479 	etr_data->banks = kzalloc_node(size, GFP_KERNEL,
480 				       dev_to_node(&GET_DEV(accel_dev)));
481 	if (!etr_data->banks) {
482 		ret = -ENOMEM;
483 		goto err_bank;
484 	}
485 
486 	accel_dev->transport = etr_data;
487 	i = hw_data->get_etr_bar_id(hw_data);
488 	csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
489 
490 	/* accel_dev->debugfs_dir should always be non-NULL here */
491 	etr_data->debug = debugfs_create_dir("transport",
492 					     accel_dev->debugfs_dir);
493 
494 	for (i = 0; i < num_banks; i++) {
495 		ret = adf_init_bank(accel_dev, &etr_data->banks[i], i,
496 				    csr_addr);
497 		if (ret)
498 			goto err_bank_all;
499 	}
500 
501 	return 0;
502 
503 err_bank_all:
504 	debugfs_remove(etr_data->debug);
505 	kfree(etr_data->banks);
506 err_bank:
507 	kfree(etr_data);
508 	accel_dev->transport = NULL;
509 	return ret;
510 }
511 EXPORT_SYMBOL_GPL(adf_init_etr_data);
512 
cleanup_bank(struct adf_etr_bank_data * bank)513 static void cleanup_bank(struct adf_etr_bank_data *bank)
514 {
515 	struct adf_accel_dev *accel_dev = bank->accel_dev;
516 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
517 	u8 num_rings_per_bank = hw_data->num_rings_per_bank;
518 	u32 i;
519 
520 	for (i = 0; i < num_rings_per_bank; i++) {
521 		struct adf_etr_ring_data *ring = &bank->rings[i];
522 
523 		if (bank->ring_mask & (1 << i))
524 			adf_cleanup_ring(ring);
525 
526 		if (hw_data->tx_rings_mask & (1 << i))
527 			kfree(ring->inflights);
528 	}
529 	kfree(bank->rings);
530 	adf_bank_debugfs_rm(bank);
531 	memset(bank, 0, sizeof(*bank));
532 }
533 
adf_cleanup_etr_handles(struct adf_accel_dev * accel_dev)534 static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
535 {
536 	struct adf_etr_data *etr_data = accel_dev->transport;
537 	u32 i, num_banks = GET_MAX_BANKS(accel_dev);
538 
539 	for (i = 0; i < num_banks; i++)
540 		cleanup_bank(&etr_data->banks[i]);
541 }
542 
543 /**
544  * adf_cleanup_etr_data() - Clear transport rings for acceleration device
545  * @accel_dev:  Pointer to acceleration device.
546  *
547  * Function is the clears the communications channels (rings) of the
548  * acceleration device accel_dev.
549  * To be used by QAT device specific drivers.
550  *
551  * Return: void
552  */
adf_cleanup_etr_data(struct adf_accel_dev * accel_dev)553 void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
554 {
555 	struct adf_etr_data *etr_data = accel_dev->transport;
556 
557 	if (etr_data) {
558 		adf_cleanup_etr_handles(accel_dev);
559 		debugfs_remove(etr_data->debug);
560 		kfree(etr_data->banks->rings);
561 		kfree(etr_data->banks);
562 		kfree(etr_data);
563 		accel_dev->transport = NULL;
564 	}
565 }
566 EXPORT_SYMBOL_GPL(adf_cleanup_etr_data);
567