1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2017-2019 NXP */
3
4 #include "enetc.h"
5
enetc_setup_cbdr(struct device * dev,struct enetc_hw * hw,int bd_count,struct enetc_cbdr * cbdr)6 int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
7 struct enetc_cbdr *cbdr)
8 {
9 int size = bd_count * sizeof(struct enetc_cbd);
10
11 cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base,
12 GFP_KERNEL);
13 if (!cbdr->bd_base)
14 return -ENOMEM;
15
16 /* h/w requires 128B alignment */
17 if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) {
18 dma_free_coherent(dev, size, cbdr->bd_base,
19 cbdr->bd_dma_base);
20 return -EINVAL;
21 }
22
23 cbdr->next_to_clean = 0;
24 cbdr->next_to_use = 0;
25 cbdr->dma_dev = dev;
26 cbdr->bd_count = bd_count;
27
28 cbdr->pir = hw->reg + ENETC_SICBDRPIR;
29 cbdr->cir = hw->reg + ENETC_SICBDRCIR;
30 cbdr->mr = hw->reg + ENETC_SICBDRMR;
31
32 /* set CBDR cache attributes */
33 enetc_wr(hw, ENETC_SICAR2,
34 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
35
36 enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base));
37 enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base));
38 enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count));
39
40 enetc_wr_reg(cbdr->pir, cbdr->next_to_clean);
41 enetc_wr_reg(cbdr->cir, cbdr->next_to_use);
42 /* enable ring */
43 enetc_wr_reg(cbdr->mr, BIT(31));
44
45 return 0;
46 }
47
enetc_teardown_cbdr(struct enetc_cbdr * cbdr)48 void enetc_teardown_cbdr(struct enetc_cbdr *cbdr)
49 {
50 int size = cbdr->bd_count * sizeof(struct enetc_cbd);
51
52 /* disable ring */
53 enetc_wr_reg(cbdr->mr, 0);
54
55 dma_free_coherent(cbdr->dma_dev, size, cbdr->bd_base,
56 cbdr->bd_dma_base);
57 cbdr->bd_base = NULL;
58 cbdr->dma_dev = NULL;
59 }
60
enetc_clean_cbdr(struct enetc_cbdr * ring)61 static void enetc_clean_cbdr(struct enetc_cbdr *ring)
62 {
63 struct enetc_cbd *dest_cbd;
64 int i, status;
65
66 i = ring->next_to_clean;
67
68 while (enetc_rd_reg(ring->cir) != i) {
69 dest_cbd = ENETC_CBD(*ring, i);
70 status = dest_cbd->status_flags & ENETC_CBD_STATUS_MASK;
71 if (status)
72 dev_warn(ring->dma_dev, "CMD err %04x for cmd %04x\n",
73 status, dest_cbd->cmd);
74
75 memset(dest_cbd, 0, sizeof(*dest_cbd));
76
77 i = (i + 1) % ring->bd_count;
78 }
79
80 ring->next_to_clean = i;
81 }
82
enetc_cbd_unused(struct enetc_cbdr * r)83 static int enetc_cbd_unused(struct enetc_cbdr *r)
84 {
85 return (r->next_to_clean - r->next_to_use - 1 + r->bd_count) %
86 r->bd_count;
87 }
88
enetc_send_cmd(struct enetc_si * si,struct enetc_cbd * cbd)89 int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
90 {
91 struct enetc_cbdr *ring = &si->cbd_ring;
92 int timeout = ENETC_CBDR_TIMEOUT;
93 struct enetc_cbd *dest_cbd;
94 int i;
95
96 if (unlikely(!ring->bd_base))
97 return -EIO;
98
99 if (unlikely(!enetc_cbd_unused(ring)))
100 enetc_clean_cbdr(ring);
101
102 i = ring->next_to_use;
103 dest_cbd = ENETC_CBD(*ring, i);
104
105 /* copy command to the ring */
106 *dest_cbd = *cbd;
107 i = (i + 1) % ring->bd_count;
108
109 ring->next_to_use = i;
110 /* let H/W know BD ring has been updated */
111 enetc_wr_reg(ring->pir, i);
112
113 do {
114 if (enetc_rd_reg(ring->cir) == i)
115 break;
116 udelay(10); /* cannot sleep, rtnl_lock() */
117 timeout -= 10;
118 } while (timeout);
119
120 if (!timeout)
121 return -EBUSY;
122
123 /* CBD may writeback data, feedback up level */
124 *cbd = *dest_cbd;
125
126 enetc_clean_cbdr(ring);
127
128 return 0;
129 }
130
enetc_clear_mac_flt_entry(struct enetc_si * si,int index)131 int enetc_clear_mac_flt_entry(struct enetc_si *si, int index)
132 {
133 struct enetc_cbd cbd;
134
135 memset(&cbd, 0, sizeof(cbd));
136
137 cbd.cls = 1;
138 cbd.status_flags = ENETC_CBD_FLAGS_SF;
139 cbd.index = cpu_to_le16(index);
140
141 return enetc_send_cmd(si, &cbd);
142 }
143
enetc_set_mac_flt_entry(struct enetc_si * si,int index,char * mac_addr,int si_map)144 int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
145 char *mac_addr, int si_map)
146 {
147 struct enetc_cbd cbd;
148 u32 upper;
149 u16 lower;
150
151 memset(&cbd, 0, sizeof(cbd));
152
153 /* fill up the "set" descriptor */
154 cbd.cls = 1;
155 cbd.status_flags = ENETC_CBD_FLAGS_SF;
156 cbd.index = cpu_to_le16(index);
157 cbd.opt[3] = cpu_to_le32(si_map);
158 /* enable entry */
159 cbd.opt[0] = cpu_to_le32(BIT(31));
160
161 upper = *(const u32 *)mac_addr;
162 lower = *(const u16 *)(mac_addr + 4);
163 cbd.addr[0] = cpu_to_le32(upper);
164 cbd.addr[1] = cpu_to_le32(lower);
165
166 return enetc_send_cmd(si, &cbd);
167 }
168
169 #define RFSE_ALIGN 64
170 /* Set entry in RFS table */
enetc_set_fs_entry(struct enetc_si * si,struct enetc_cmd_rfse * rfse,int index)171 int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
172 int index)
173 {
174 struct enetc_cbdr *ring = &si->cbd_ring;
175 struct enetc_cbd cbd = {.cmd = 0};
176 dma_addr_t dma, dma_align;
177 void *tmp, *tmp_align;
178 int err;
179
180 /* fill up the "set" descriptor */
181 cbd.cmd = 0;
182 cbd.cls = 4;
183 cbd.index = cpu_to_le16(index);
184 cbd.length = cpu_to_le16(sizeof(*rfse));
185 cbd.opt[3] = cpu_to_le32(0); /* SI */
186
187 tmp = dma_alloc_coherent(ring->dma_dev, sizeof(*rfse) + RFSE_ALIGN,
188 &dma, GFP_KERNEL);
189 if (!tmp) {
190 dev_err(ring->dma_dev, "DMA mapping of RFS entry failed!\n");
191 return -ENOMEM;
192 }
193
194 dma_align = ALIGN(dma, RFSE_ALIGN);
195 tmp_align = PTR_ALIGN(tmp, RFSE_ALIGN);
196 memcpy(tmp_align, rfse, sizeof(*rfse));
197
198 cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
199 cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
200
201 err = enetc_send_cmd(si, &cbd);
202 if (err)
203 dev_err(ring->dma_dev, "FS entry add failed (%d)!", err);
204
205 dma_free_coherent(ring->dma_dev, sizeof(*rfse) + RFSE_ALIGN,
206 tmp, dma);
207
208 return err;
209 }
210
211 #define RSSE_ALIGN 64
enetc_cmd_rss_table(struct enetc_si * si,u32 * table,int count,bool read)212 static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
213 bool read)
214 {
215 struct enetc_cbdr *ring = &si->cbd_ring;
216 struct enetc_cbd cbd = {.cmd = 0};
217 dma_addr_t dma, dma_align;
218 u8 *tmp, *tmp_align;
219 int err, i;
220
221 if (count < RSSE_ALIGN)
222 /* HW only takes in a full 64 entry table */
223 return -EINVAL;
224
225 tmp = dma_alloc_coherent(ring->dma_dev, count + RSSE_ALIGN,
226 &dma, GFP_KERNEL);
227 if (!tmp) {
228 dev_err(ring->dma_dev, "DMA mapping of RSS table failed!\n");
229 return -ENOMEM;
230 }
231 dma_align = ALIGN(dma, RSSE_ALIGN);
232 tmp_align = PTR_ALIGN(tmp, RSSE_ALIGN);
233
234 if (!read)
235 for (i = 0; i < count; i++)
236 tmp_align[i] = (u8)(table[i]);
237
238 /* fill up the descriptor */
239 cbd.cmd = read ? 2 : 1;
240 cbd.cls = 3;
241 cbd.length = cpu_to_le16(count);
242
243 cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
244 cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
245
246 err = enetc_send_cmd(si, &cbd);
247 if (err)
248 dev_err(ring->dma_dev, "RSS cmd failed (%d)!", err);
249
250 if (read)
251 for (i = 0; i < count; i++)
252 table[i] = tmp_align[i];
253
254 dma_free_coherent(ring->dma_dev, count + RSSE_ALIGN, tmp, dma);
255
256 return err;
257 }
258
259 /* Get RSS table */
enetc_get_rss_table(struct enetc_si * si,u32 * table,int count)260 int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count)
261 {
262 return enetc_cmd_rss_table(si, table, count, true);
263 }
264
265 /* Set RSS table */
enetc_set_rss_table(struct enetc_si * si,const u32 * table,int count)266 int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count)
267 {
268 return enetc_cmd_rss_table(si, (u32 *)table, count, false);
269 }
270