1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <adf_accel_devices.h>
4 #include <adf_pf2vf_msg.h>
5 #include <adf_common_drv.h>
6 #include <adf_gen2_hw_data.h>
7 #include "adf_dh895xcc_hw_data.h"
8 #include "icp_qat_hw.h"
9
10 /* Worker thread to service arbiter mappings */
11 static const u32 thrd_to_arb_map[ADF_DH895XCC_MAX_ACCELENGINES] = {
12 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
13 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
14 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222
15 };
16
17 static struct adf_hw_device_class dh895xcc_class = {
18 .name = ADF_DH895XCC_DEVICE_NAME,
19 .type = DEV_DH895XCC,
20 .instances = 0
21 };
22
get_accel_mask(struct adf_hw_device_data * self)23 static u32 get_accel_mask(struct adf_hw_device_data *self)
24 {
25 u32 fuses = self->fuses;
26
27 return ~fuses >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET &
28 ADF_DH895XCC_ACCELERATORS_MASK;
29 }
30
get_ae_mask(struct adf_hw_device_data * self)31 static u32 get_ae_mask(struct adf_hw_device_data *self)
32 {
33 u32 fuses = self->fuses;
34
35 return ~fuses & ADF_DH895XCC_ACCELENGINES_MASK;
36 }
37
get_misc_bar_id(struct adf_hw_device_data * self)38 static u32 get_misc_bar_id(struct adf_hw_device_data *self)
39 {
40 return ADF_DH895XCC_PMISC_BAR;
41 }
42
get_etr_bar_id(struct adf_hw_device_data * self)43 static u32 get_etr_bar_id(struct adf_hw_device_data *self)
44 {
45 return ADF_DH895XCC_ETR_BAR;
46 }
47
get_sram_bar_id(struct adf_hw_device_data * self)48 static u32 get_sram_bar_id(struct adf_hw_device_data *self)
49 {
50 return ADF_DH895XCC_SRAM_BAR;
51 }
52
get_accel_cap(struct adf_accel_dev * accel_dev)53 static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
54 {
55 struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
56 u32 capabilities;
57 u32 legfuses;
58
59 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
60 ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
61 ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
62
63 /* Read accelerator capabilities mask */
64 pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &legfuses);
65
66 if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE)
67 capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
68 if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
69 capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
70 if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE)
71 capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
72
73 return capabilities;
74 }
75
get_sku(struct adf_hw_device_data * self)76 static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
77 {
78 int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK)
79 >> ADF_DH895XCC_FUSECTL_SKU_SHIFT;
80
81 switch (sku) {
82 case ADF_DH895XCC_FUSECTL_SKU_1:
83 return DEV_SKU_1;
84 case ADF_DH895XCC_FUSECTL_SKU_2:
85 return DEV_SKU_2;
86 case ADF_DH895XCC_FUSECTL_SKU_3:
87 return DEV_SKU_3;
88 case ADF_DH895XCC_FUSECTL_SKU_4:
89 return DEV_SKU_4;
90 default:
91 return DEV_SKU_UNKNOWN;
92 }
93 return DEV_SKU_UNKNOWN;
94 }
95
adf_get_arbiter_mapping(void)96 static const u32 *adf_get_arbiter_mapping(void)
97 {
98 return thrd_to_arb_map;
99 }
100
adf_enable_ints(struct adf_accel_dev * accel_dev)101 static void adf_enable_ints(struct adf_accel_dev *accel_dev)
102 {
103 void __iomem *addr;
104
105 addr = (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr;
106
107 /* Enable bundle and misc interrupts */
108 ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET,
109 accel_dev->pf.vf_info ? 0 :
110 BIT_ULL(GET_MAX_BANKS(accel_dev)) - 1);
111 ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET,
112 ADF_DH895XCC_SMIA1_MASK);
113 }
114
get_vf2pf_sources(void __iomem * pmisc_bar)115 static u32 get_vf2pf_sources(void __iomem *pmisc_bar)
116 {
117 u32 errsou5, errmsk5, vf_int_mask;
118
119 vf_int_mask = adf_gen2_get_vf2pf_sources(pmisc_bar);
120
121 /* Get the interrupt sources triggered by VFs, but to avoid duplicates
122 * in the work queue, clear vf_int_mask_sets bits that are already
123 * masked in ERRMSK register.
124 */
125 errsou5 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRSOU5);
126 errmsk5 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRMSK5);
127 vf_int_mask |= ADF_DH895XCC_ERR_REG_VF2PF_U(errsou5);
128 vf_int_mask &= ~ADF_DH895XCC_ERR_REG_VF2PF_U(errmsk5);
129
130 return vf_int_mask;
131 }
132
enable_vf2pf_interrupts(void __iomem * pmisc_addr,u32 vf_mask)133 static void enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
134 {
135 /* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
136 adf_gen2_enable_vf2pf_interrupts(pmisc_addr, vf_mask);
137
138 /* Enable VF2PF Messaging Ints - VFs 16 through 31 per vf_mask[31:16] */
139 if (vf_mask >> 16) {
140 u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
141 & ~ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask);
142
143 ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
144 }
145 }
146
disable_vf2pf_interrupts(void __iomem * pmisc_addr,u32 vf_mask)147 static void disable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
148 {
149 /* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
150 adf_gen2_disable_vf2pf_interrupts(pmisc_addr, vf_mask);
151
152 /* Disable VF2PF interrupts for VFs 16 through 31 per vf_mask[31:16] */
153 if (vf_mask >> 16) {
154 u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
155 | ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask);
156
157 ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
158 }
159 }
160
configure_iov_threads(struct adf_accel_dev * accel_dev,bool enable)161 static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
162 {
163 adf_gen2_cfg_iov_thds(accel_dev, enable,
164 ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS,
165 ADF_DH895XCC_AE2FUNC_MAP_GRP_B_NUM_REGS);
166 }
167
adf_init_hw_data_dh895xcc(struct adf_hw_device_data * hw_data)168 void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
169 {
170 hw_data->dev_class = &dh895xcc_class;
171 hw_data->instance_id = dh895xcc_class.instances++;
172 hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS;
173 hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
174 hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS;
175 hw_data->num_logical_accel = 1;
176 hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
177 hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
178 hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
179 hw_data->alloc_irq = adf_isr_resource_alloc;
180 hw_data->free_irq = adf_isr_resource_free;
181 hw_data->enable_error_correction = adf_gen2_enable_error_correction;
182 hw_data->get_accel_mask = get_accel_mask;
183 hw_data->get_ae_mask = get_ae_mask;
184 hw_data->get_accel_cap = get_accel_cap;
185 hw_data->get_num_accels = adf_gen2_get_num_accels;
186 hw_data->get_num_aes = adf_gen2_get_num_aes;
187 hw_data->get_etr_bar_id = get_etr_bar_id;
188 hw_data->get_misc_bar_id = get_misc_bar_id;
189 hw_data->get_admin_info = adf_gen2_get_admin_info;
190 hw_data->get_arb_info = adf_gen2_get_arb_info;
191 hw_data->get_sram_bar_id = get_sram_bar_id;
192 hw_data->get_sku = get_sku;
193 hw_data->fw_name = ADF_DH895XCC_FW;
194 hw_data->fw_mmp_name = ADF_DH895XCC_MMP;
195 hw_data->init_admin_comms = adf_init_admin_comms;
196 hw_data->exit_admin_comms = adf_exit_admin_comms;
197 hw_data->configure_iov_threads = configure_iov_threads;
198 hw_data->send_admin_init = adf_send_admin_init;
199 hw_data->init_arb = adf_init_arb;
200 hw_data->exit_arb = adf_exit_arb;
201 hw_data->get_arb_mapping = adf_get_arbiter_mapping;
202 hw_data->enable_ints = adf_enable_ints;
203 hw_data->reset_device = adf_reset_sbr;
204 hw_data->get_pf2vf_offset = adf_gen2_get_pf2vf_offset;
205 hw_data->get_vf2pf_sources = get_vf2pf_sources;
206 hw_data->enable_vf2pf_interrupts = enable_vf2pf_interrupts;
207 hw_data->disable_vf2pf_interrupts = disable_vf2pf_interrupts;
208 hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
209 hw_data->disable_iov = adf_disable_sriov;
210 hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
211
212 adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
213 }
214
adf_clean_hw_data_dh895xcc(struct adf_hw_device_data * hw_data)215 void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
216 {
217 hw_data->dev_class->instances--;
218 }
219